Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
0004  *
0005  * extracted from shdma.c
0006  *
0007  * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
0008  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
0009  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
0010  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
0011  */
0012 
0013 #include <linux/delay.h>
0014 #include <linux/shdma-base.h>
0015 #include <linux/dmaengine.h>
0016 #include <linux/init.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/module.h>
0019 #include <linux/pm_runtime.h>
0020 #include <linux/slab.h>
0021 #include <linux/spinlock.h>
0022 
0023 #include "../dmaengine.h"
0024 
0025 /* DMA descriptor control */
0026 enum shdma_desc_status {
0027     DESC_IDLE,
0028     DESC_PREPARED,
0029     DESC_SUBMITTED,
0030     DESC_COMPLETED, /* completed, have to call callback */
0031     DESC_WAITING,   /* callback called, waiting for ack / re-submit */
0032 };
0033 
0034 #define NR_DESCS_PER_CHANNEL 32
0035 
0036 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
0037 #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
0038 
0039 /*
0040  * For slave DMA we assume, that there is a finite number of DMA slaves in the
0041  * system, and that each such slave can only use a finite number of channels.
0042  * We use slave channel IDs to make sure, that no such slave channel ID is
0043  * allocated more than once.
0044  */
0045 static unsigned int slave_num = 256;
0046 module_param(slave_num, uint, 0444);
0047 
0048 /* A bitmask with slave_num bits */
0049 static unsigned long *shdma_slave_used;
0050 
0051 /* Called under spin_lock_irq(&schan->chan_lock") */
0052 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
0053 {
0054     struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
0055     const struct shdma_ops *ops = sdev->ops;
0056     struct shdma_desc *sdesc;
0057 
0058     /* DMA work check */
0059     if (ops->channel_busy(schan))
0060         return;
0061 
0062     /* Find the first not transferred descriptor */
0063     list_for_each_entry(sdesc, &schan->ld_queue, node)
0064         if (sdesc->mark == DESC_SUBMITTED) {
0065             ops->start_xfer(schan, sdesc);
0066             break;
0067         }
0068 }
0069 
0070 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
0071 {
0072     struct shdma_desc *chunk, *c, *desc =
0073         container_of(tx, struct shdma_desc, async_tx);
0074     struct shdma_chan *schan = to_shdma_chan(tx->chan);
0075     dma_async_tx_callback callback = tx->callback;
0076     dma_cookie_t cookie;
0077     bool power_up;
0078 
0079     spin_lock_irq(&schan->chan_lock);
0080 
0081     power_up = list_empty(&schan->ld_queue);
0082 
0083     cookie = dma_cookie_assign(tx);
0084 
0085     /* Mark all chunks of this descriptor as submitted, move to the queue */
0086     list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
0087         /*
0088          * All chunks are on the global ld_free, so, we have to find
0089          * the end of the chain ourselves
0090          */
0091         if (chunk != desc && (chunk->mark == DESC_IDLE ||
0092                       chunk->async_tx.cookie > 0 ||
0093                       chunk->async_tx.cookie == -EBUSY ||
0094                       &chunk->node == &schan->ld_free))
0095             break;
0096         chunk->mark = DESC_SUBMITTED;
0097         if (chunk->chunks == 1) {
0098             chunk->async_tx.callback = callback;
0099             chunk->async_tx.callback_param = tx->callback_param;
0100         } else {
0101             /* Callback goes to the last chunk */
0102             chunk->async_tx.callback = NULL;
0103         }
0104         chunk->cookie = cookie;
0105         list_move_tail(&chunk->node, &schan->ld_queue);
0106 
0107         dev_dbg(schan->dev, "submit #%d@%p on %d\n",
0108             tx->cookie, &chunk->async_tx, schan->id);
0109     }
0110 
0111     if (power_up) {
0112         int ret;
0113         schan->pm_state = SHDMA_PM_BUSY;
0114 
0115         ret = pm_runtime_get(schan->dev);
0116 
0117         spin_unlock_irq(&schan->chan_lock);
0118         if (ret < 0)
0119             dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
0120 
0121         pm_runtime_barrier(schan->dev);
0122 
0123         spin_lock_irq(&schan->chan_lock);
0124 
0125         /* Have we been reset, while waiting? */
0126         if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
0127             struct shdma_dev *sdev =
0128                 to_shdma_dev(schan->dma_chan.device);
0129             const struct shdma_ops *ops = sdev->ops;
0130             dev_dbg(schan->dev, "Bring up channel %d\n",
0131                 schan->id);
0132             /*
0133              * TODO: .xfer_setup() might fail on some platforms.
0134              * Make it int then, on error remove chunks from the
0135              * queue again
0136              */
0137             ops->setup_xfer(schan, schan->slave_id);
0138 
0139             if (schan->pm_state == SHDMA_PM_PENDING)
0140                 shdma_chan_xfer_ld_queue(schan);
0141             schan->pm_state = SHDMA_PM_ESTABLISHED;
0142         }
0143     } else {
0144         /*
0145          * Tell .device_issue_pending() not to run the queue, interrupts
0146          * will do it anyway
0147          */
0148         schan->pm_state = SHDMA_PM_PENDING;
0149     }
0150 
0151     spin_unlock_irq(&schan->chan_lock);
0152 
0153     return cookie;
0154 }
0155 
0156 /* Called with desc_lock held */
0157 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
0158 {
0159     struct shdma_desc *sdesc;
0160 
0161     list_for_each_entry(sdesc, &schan->ld_free, node)
0162         if (sdesc->mark != DESC_PREPARED) {
0163             BUG_ON(sdesc->mark != DESC_IDLE);
0164             list_del(&sdesc->node);
0165             return sdesc;
0166         }
0167 
0168     return NULL;
0169 }
0170 
0171 static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
0172 {
0173     struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
0174     const struct shdma_ops *ops = sdev->ops;
0175     int ret, match;
0176 
0177     if (schan->dev->of_node) {
0178         match = schan->hw_req;
0179         ret = ops->set_slave(schan, match, slave_addr, true);
0180         if (ret < 0)
0181             return ret;
0182     } else {
0183         match = schan->real_slave_id;
0184     }
0185 
0186     if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
0187         return -EINVAL;
0188 
0189     if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
0190         return -EBUSY;
0191 
0192     ret = ops->set_slave(schan, match, slave_addr, false);
0193     if (ret < 0) {
0194         clear_bit(schan->real_slave_id, shdma_slave_used);
0195         return ret;
0196     }
0197 
0198     schan->slave_id = schan->real_slave_id;
0199 
0200     return 0;
0201 }
0202 
0203 static int shdma_alloc_chan_resources(struct dma_chan *chan)
0204 {
0205     struct shdma_chan *schan = to_shdma_chan(chan);
0206     struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
0207     const struct shdma_ops *ops = sdev->ops;
0208     struct shdma_desc *desc;
0209     struct shdma_slave *slave = chan->private;
0210     int ret, i;
0211 
0212     /*
0213      * This relies on the guarantee from dmaengine that alloc_chan_resources
0214      * never runs concurrently with itself or free_chan_resources.
0215      */
0216     if (slave) {
0217         /* Legacy mode: .private is set in filter */
0218         schan->real_slave_id = slave->slave_id;
0219         ret = shdma_setup_slave(schan, 0);
0220         if (ret < 0)
0221             goto esetslave;
0222     } else {
0223         /* Normal mode: real_slave_id was set by filter */
0224         schan->slave_id = -EINVAL;
0225     }
0226 
0227     schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
0228                   sdev->desc_size, GFP_KERNEL);
0229     if (!schan->desc) {
0230         ret = -ENOMEM;
0231         goto edescalloc;
0232     }
0233     schan->desc_num = NR_DESCS_PER_CHANNEL;
0234 
0235     for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
0236         desc = ops->embedded_desc(schan->desc, i);
0237         dma_async_tx_descriptor_init(&desc->async_tx,
0238                          &schan->dma_chan);
0239         desc->async_tx.tx_submit = shdma_tx_submit;
0240         desc->mark = DESC_IDLE;
0241 
0242         list_add(&desc->node, &schan->ld_free);
0243     }
0244 
0245     return NR_DESCS_PER_CHANNEL;
0246 
0247 edescalloc:
0248     if (slave)
0249 esetslave:
0250         clear_bit(slave->slave_id, shdma_slave_used);
0251     chan->private = NULL;
0252     return ret;
0253 }
0254 
0255 /*
0256  * This is the standard shdma filter function to be used as a replacement to the
0257  * "old" method, using the .private pointer.
0258  * You always have to pass a valid slave id as the argument, old drivers that
0259  * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
0260  * need to be updated so we can remove the slave_id field from dma_slave_config.
0261  * parameter. If this filter is used, the slave driver, after calling
0262  * dma_request_channel(), will also have to call dmaengine_slave_config() with
0263  * .direction, and either .src_addr or .dst_addr set.
0264  *
0265  * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
0266  * capability! If this becomes a requirement, hardware glue drivers, using this
0267  * services would have to provide their own filters, which first would check
0268  * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
0269  * this, and only then, in case of a match, call this common filter.
0270  * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
0271  * In that case the MID-RID value is used for slave channel filtering and is
0272  * passed to this function in the "arg" parameter.
0273  */
0274 bool shdma_chan_filter(struct dma_chan *chan, void *arg)
0275 {
0276     struct shdma_chan *schan;
0277     struct shdma_dev *sdev;
0278     int slave_id = (long)arg;
0279     int ret;
0280 
0281     /* Only support channels handled by this driver. */
0282     if (chan->device->device_alloc_chan_resources !=
0283         shdma_alloc_chan_resources)
0284         return false;
0285 
0286     schan = to_shdma_chan(chan);
0287     sdev = to_shdma_dev(chan->device);
0288 
0289     /*
0290      * For DT, the schan->slave_id field is generated by the
0291      * set_slave function from the slave ID that is passed in
0292      * from xlate. For the non-DT case, the slave ID is
0293      * directly passed into the filter function by the driver
0294      */
0295     if (schan->dev->of_node) {
0296         ret = sdev->ops->set_slave(schan, slave_id, 0, true);
0297         if (ret < 0)
0298             return false;
0299 
0300         schan->real_slave_id = schan->slave_id;
0301         return true;
0302     }
0303 
0304     if (slave_id < 0) {
0305         /* No slave requested - arbitrary channel */
0306         dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
0307         return true;
0308     }
0309 
0310     if (slave_id >= slave_num)
0311         return false;
0312 
0313     ret = sdev->ops->set_slave(schan, slave_id, 0, true);
0314     if (ret < 0)
0315         return false;
0316 
0317     schan->real_slave_id = slave_id;
0318 
0319     return true;
0320 }
0321 EXPORT_SYMBOL(shdma_chan_filter);
0322 
0323 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
0324 {
0325     struct shdma_desc *desc, *_desc;
0326     /* Is the "exposed" head of a chain acked? */
0327     bool head_acked = false;
0328     dma_cookie_t cookie = 0;
0329     dma_async_tx_callback callback = NULL;
0330     struct dmaengine_desc_callback cb;
0331     unsigned long flags;
0332     LIST_HEAD(cyclic_list);
0333 
0334     memset(&cb, 0, sizeof(cb));
0335     spin_lock_irqsave(&schan->chan_lock, flags);
0336     list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
0337         struct dma_async_tx_descriptor *tx = &desc->async_tx;
0338 
0339         BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
0340         BUG_ON(desc->mark != DESC_SUBMITTED &&
0341                desc->mark != DESC_COMPLETED &&
0342                desc->mark != DESC_WAITING);
0343 
0344         /*
0345          * queue is ordered, and we use this loop to (1) clean up all
0346          * completed descriptors, and to (2) update descriptor flags of
0347          * any chunks in a (partially) completed chain
0348          */
0349         if (!all && desc->mark == DESC_SUBMITTED &&
0350             desc->cookie != cookie)
0351             break;
0352 
0353         if (tx->cookie > 0)
0354             cookie = tx->cookie;
0355 
0356         if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
0357             if (schan->dma_chan.completed_cookie != desc->cookie - 1)
0358                 dev_dbg(schan->dev,
0359                     "Completing cookie %d, expected %d\n",
0360                     desc->cookie,
0361                     schan->dma_chan.completed_cookie + 1);
0362             schan->dma_chan.completed_cookie = desc->cookie;
0363         }
0364 
0365         /* Call callback on the last chunk */
0366         if (desc->mark == DESC_COMPLETED && tx->callback) {
0367             desc->mark = DESC_WAITING;
0368             dmaengine_desc_get_callback(tx, &cb);
0369             callback = tx->callback;
0370             dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
0371                 tx->cookie, tx, schan->id);
0372             BUG_ON(desc->chunks != 1);
0373             break;
0374         }
0375 
0376         if (tx->cookie > 0 || tx->cookie == -EBUSY) {
0377             if (desc->mark == DESC_COMPLETED) {
0378                 BUG_ON(tx->cookie < 0);
0379                 desc->mark = DESC_WAITING;
0380             }
0381             head_acked = async_tx_test_ack(tx);
0382         } else {
0383             switch (desc->mark) {
0384             case DESC_COMPLETED:
0385                 desc->mark = DESC_WAITING;
0386                 fallthrough;
0387             case DESC_WAITING:
0388                 if (head_acked)
0389                     async_tx_ack(&desc->async_tx);
0390             }
0391         }
0392 
0393         dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
0394             tx, tx->cookie);
0395 
0396         if (((desc->mark == DESC_COMPLETED ||
0397               desc->mark == DESC_WAITING) &&
0398              async_tx_test_ack(&desc->async_tx)) || all) {
0399 
0400             if (all || !desc->cyclic) {
0401                 /* Remove from ld_queue list */
0402                 desc->mark = DESC_IDLE;
0403                 list_move(&desc->node, &schan->ld_free);
0404             } else {
0405                 /* reuse as cyclic */
0406                 desc->mark = DESC_SUBMITTED;
0407                 list_move_tail(&desc->node, &cyclic_list);
0408             }
0409 
0410             if (list_empty(&schan->ld_queue)) {
0411                 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
0412                 pm_runtime_put(schan->dev);
0413                 schan->pm_state = SHDMA_PM_ESTABLISHED;
0414             } else if (schan->pm_state == SHDMA_PM_PENDING) {
0415                 shdma_chan_xfer_ld_queue(schan);
0416             }
0417         }
0418     }
0419 
0420     if (all && !callback)
0421         /*
0422          * Terminating and the loop completed normally: forgive
0423          * uncompleted cookies
0424          */
0425         schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
0426 
0427     list_splice_tail(&cyclic_list, &schan->ld_queue);
0428 
0429     spin_unlock_irqrestore(&schan->chan_lock, flags);
0430 
0431     dmaengine_desc_callback_invoke(&cb, NULL);
0432 
0433     return callback;
0434 }
0435 
0436 /*
0437  * shdma_chan_ld_cleanup - Clean up link descriptors
0438  *
0439  * Clean up the ld_queue of DMA channel.
0440  */
0441 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
0442 {
0443     while (__ld_cleanup(schan, all))
0444         ;
0445 }
0446 
0447 /*
0448  * shdma_free_chan_resources - Free all resources of the channel.
0449  */
0450 static void shdma_free_chan_resources(struct dma_chan *chan)
0451 {
0452     struct shdma_chan *schan = to_shdma_chan(chan);
0453     struct shdma_dev *sdev = to_shdma_dev(chan->device);
0454     const struct shdma_ops *ops = sdev->ops;
0455     LIST_HEAD(list);
0456 
0457     /* Protect against ISR */
0458     spin_lock_irq(&schan->chan_lock);
0459     ops->halt_channel(schan);
0460     spin_unlock_irq(&schan->chan_lock);
0461 
0462     /* Now no new interrupts will occur */
0463 
0464     /* Prepared and not submitted descriptors can still be on the queue */
0465     if (!list_empty(&schan->ld_queue))
0466         shdma_chan_ld_cleanup(schan, true);
0467 
0468     if (schan->slave_id >= 0) {
0469         /* The caller is holding dma_list_mutex */
0470         clear_bit(schan->slave_id, shdma_slave_used);
0471         chan->private = NULL;
0472     }
0473 
0474     schan->real_slave_id = 0;
0475 
0476     spin_lock_irq(&schan->chan_lock);
0477 
0478     list_splice_init(&schan->ld_free, &list);
0479     schan->desc_num = 0;
0480 
0481     spin_unlock_irq(&schan->chan_lock);
0482 
0483     kfree(schan->desc);
0484 }
0485 
0486 /**
0487  * shdma_add_desc - get, set up and return one transfer descriptor
0488  * @schan:  DMA channel
0489  * @flags:  DMA transfer flags
0490  * @dst:    destination DMA address, incremented when direction equals
0491  *      DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
0492  * @src:    source DMA address, incremented when direction equals
0493  *      DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
0494  * @len:    DMA transfer length
0495  * @first:  if NULL, set to the current descriptor and cookie set to -EBUSY
0496  * @direction:  needed for slave DMA to decide which address to keep constant,
0497  *      equals DMA_MEM_TO_MEM for MEMCPY
0498  * Returns 0 or an error
0499  * Locks: called with desc_lock held
0500  */
0501 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
0502     unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
0503     struct shdma_desc **first, enum dma_transfer_direction direction)
0504 {
0505     struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
0506     const struct shdma_ops *ops = sdev->ops;
0507     struct shdma_desc *new;
0508     size_t copy_size = *len;
0509 
0510     if (!copy_size)
0511         return NULL;
0512 
0513     /* Allocate the link descriptor from the free list */
0514     new = shdma_get_desc(schan);
0515     if (!new) {
0516         dev_err(schan->dev, "No free link descriptor available\n");
0517         return NULL;
0518     }
0519 
0520     ops->desc_setup(schan, new, *src, *dst, &copy_size);
0521 
0522     if (!*first) {
0523         /* First desc */
0524         new->async_tx.cookie = -EBUSY;
0525         *first = new;
0526     } else {
0527         /* Other desc - invisible to the user */
0528         new->async_tx.cookie = -EINVAL;
0529     }
0530 
0531     dev_dbg(schan->dev,
0532         "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
0533         copy_size, *len, src, dst, &new->async_tx,
0534         new->async_tx.cookie);
0535 
0536     new->mark = DESC_PREPARED;
0537     new->async_tx.flags = flags;
0538     new->direction = direction;
0539     new->partial = 0;
0540 
0541     *len -= copy_size;
0542     if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
0543         *src += copy_size;
0544     if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
0545         *dst += copy_size;
0546 
0547     return new;
0548 }
0549 
0550 /*
0551  * shdma_prep_sg - prepare transfer descriptors from an SG list
0552  *
0553  * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
0554  * converted to scatter-gather to guarantee consistent locking and a correct
0555  * list manipulation. For slave DMA direction carries the usual meaning, and,
0556  * logically, the SG list is RAM and the addr variable contains slave address,
0557  * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
0558  * and the SG list contains only one element and points at the source buffer.
0559  */
0560 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
0561     struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
0562     enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
0563 {
0564     struct scatterlist *sg;
0565     struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
0566     LIST_HEAD(tx_list);
0567     int chunks = 0;
0568     unsigned long irq_flags;
0569     int i;
0570 
0571     for_each_sg(sgl, sg, sg_len, i)
0572         chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
0573 
0574     /* Have to lock the whole loop to protect against concurrent release */
0575     spin_lock_irqsave(&schan->chan_lock, irq_flags);
0576 
0577     /*
0578      * Chaining:
0579      * first descriptor is what user is dealing with in all API calls, its
0580      *  cookie is at first set to -EBUSY, at tx-submit to a positive
0581      *  number
0582      * if more than one chunk is needed further chunks have cookie = -EINVAL
0583      * the last chunk, if not equal to the first, has cookie = -ENOSPC
0584      * all chunks are linked onto the tx_list head with their .node heads
0585      *  only during this function, then they are immediately spliced
0586      *  back onto the free list in form of a chain
0587      */
0588     for_each_sg(sgl, sg, sg_len, i) {
0589         dma_addr_t sg_addr = sg_dma_address(sg);
0590         size_t len = sg_dma_len(sg);
0591 
0592         if (!len)
0593             goto err_get_desc;
0594 
0595         do {
0596             dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
0597                 i, sg, len, &sg_addr);
0598 
0599             if (direction == DMA_DEV_TO_MEM)
0600                 new = shdma_add_desc(schan, flags,
0601                         &sg_addr, addr, &len, &first,
0602                         direction);
0603             else
0604                 new = shdma_add_desc(schan, flags,
0605                         addr, &sg_addr, &len, &first,
0606                         direction);
0607             if (!new)
0608                 goto err_get_desc;
0609 
0610             new->cyclic = cyclic;
0611             if (cyclic)
0612                 new->chunks = 1;
0613             else
0614                 new->chunks = chunks--;
0615             list_add_tail(&new->node, &tx_list);
0616         } while (len);
0617     }
0618 
0619     if (new != first)
0620         new->async_tx.cookie = -ENOSPC;
0621 
0622     /* Put them back on the free list, so, they don't get lost */
0623     list_splice_tail(&tx_list, &schan->ld_free);
0624 
0625     spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
0626 
0627     return &first->async_tx;
0628 
0629 err_get_desc:
0630     list_for_each_entry(new, &tx_list, node)
0631         new->mark = DESC_IDLE;
0632     list_splice(&tx_list, &schan->ld_free);
0633 
0634     spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
0635 
0636     return NULL;
0637 }
0638 
0639 static struct dma_async_tx_descriptor *shdma_prep_memcpy(
0640     struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
0641     size_t len, unsigned long flags)
0642 {
0643     struct shdma_chan *schan = to_shdma_chan(chan);
0644     struct scatterlist sg;
0645 
0646     if (!chan || !len)
0647         return NULL;
0648 
0649     BUG_ON(!schan->desc_num);
0650 
0651     sg_init_table(&sg, 1);
0652     sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
0653             offset_in_page(dma_src));
0654     sg_dma_address(&sg) = dma_src;
0655     sg_dma_len(&sg) = len;
0656 
0657     return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
0658                  flags, false);
0659 }
0660 
0661 static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
0662     struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
0663     enum dma_transfer_direction direction, unsigned long flags, void *context)
0664 {
0665     struct shdma_chan *schan = to_shdma_chan(chan);
0666     struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
0667     const struct shdma_ops *ops = sdev->ops;
0668     int slave_id = schan->slave_id;
0669     dma_addr_t slave_addr;
0670 
0671     if (!chan)
0672         return NULL;
0673 
0674     BUG_ON(!schan->desc_num);
0675 
0676     /* Someone calling slave DMA on a generic channel? */
0677     if (slave_id < 0 || !sg_len) {
0678         dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
0679              __func__, sg_len, slave_id);
0680         return NULL;
0681     }
0682 
0683     slave_addr = ops->slave_addr(schan);
0684 
0685     return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
0686                  direction, flags, false);
0687 }
0688 
0689 #define SHDMA_MAX_SG_LEN 32
0690 
0691 static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
0692     struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
0693     size_t period_len, enum dma_transfer_direction direction,
0694     unsigned long flags)
0695 {
0696     struct shdma_chan *schan = to_shdma_chan(chan);
0697     struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
0698     struct dma_async_tx_descriptor *desc;
0699     const struct shdma_ops *ops = sdev->ops;
0700     unsigned int sg_len = buf_len / period_len;
0701     int slave_id = schan->slave_id;
0702     dma_addr_t slave_addr;
0703     struct scatterlist *sgl;
0704     int i;
0705 
0706     if (!chan)
0707         return NULL;
0708 
0709     BUG_ON(!schan->desc_num);
0710 
0711     if (sg_len > SHDMA_MAX_SG_LEN) {
0712         dev_err(schan->dev, "sg length %d exceeds limit %d",
0713                 sg_len, SHDMA_MAX_SG_LEN);
0714         return NULL;
0715     }
0716 
0717     /* Someone calling slave DMA on a generic channel? */
0718     if (slave_id < 0 || (buf_len < period_len)) {
0719         dev_warn(schan->dev,
0720             "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
0721             __func__, buf_len, period_len, slave_id);
0722         return NULL;
0723     }
0724 
0725     slave_addr = ops->slave_addr(schan);
0726 
0727     /*
0728      * Allocate the sg list dynamically as it would consumer too much stack
0729      * space.
0730      */
0731     sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
0732     if (!sgl)
0733         return NULL;
0734 
0735     sg_init_table(sgl, sg_len);
0736 
0737     for (i = 0; i < sg_len; i++) {
0738         dma_addr_t src = buf_addr + (period_len * i);
0739 
0740         sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
0741                 offset_in_page(src));
0742         sg_dma_address(&sgl[i]) = src;
0743         sg_dma_len(&sgl[i]) = period_len;
0744     }
0745 
0746     desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
0747                  direction, flags, true);
0748 
0749     kfree(sgl);
0750     return desc;
0751 }
0752 
0753 static int shdma_terminate_all(struct dma_chan *chan)
0754 {
0755     struct shdma_chan *schan = to_shdma_chan(chan);
0756     struct shdma_dev *sdev = to_shdma_dev(chan->device);
0757     const struct shdma_ops *ops = sdev->ops;
0758     unsigned long flags;
0759 
0760     spin_lock_irqsave(&schan->chan_lock, flags);
0761     ops->halt_channel(schan);
0762 
0763     if (ops->get_partial && !list_empty(&schan->ld_queue)) {
0764         /* Record partial transfer */
0765         struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
0766                                struct shdma_desc, node);
0767         desc->partial = ops->get_partial(schan, desc);
0768     }
0769 
0770     spin_unlock_irqrestore(&schan->chan_lock, flags);
0771 
0772     shdma_chan_ld_cleanup(schan, true);
0773 
0774     return 0;
0775 }
0776 
0777 static int shdma_config(struct dma_chan *chan,
0778             struct dma_slave_config *config)
0779 {
0780     struct shdma_chan *schan = to_shdma_chan(chan);
0781 
0782     /*
0783      * So far only .slave_id is used, but the slave drivers are
0784      * encouraged to also set a transfer direction and an address.
0785      */
0786     if (!config)
0787         return -EINVAL;
0788 
0789     /*
0790      * We could lock this, but you shouldn't be configuring the
0791      * channel, while using it...
0792      */
0793     return shdma_setup_slave(schan,
0794                  config->direction == DMA_DEV_TO_MEM ?
0795                  config->src_addr : config->dst_addr);
0796 }
0797 
0798 static void shdma_issue_pending(struct dma_chan *chan)
0799 {
0800     struct shdma_chan *schan = to_shdma_chan(chan);
0801 
0802     spin_lock_irq(&schan->chan_lock);
0803     if (schan->pm_state == SHDMA_PM_ESTABLISHED)
0804         shdma_chan_xfer_ld_queue(schan);
0805     else
0806         schan->pm_state = SHDMA_PM_PENDING;
0807     spin_unlock_irq(&schan->chan_lock);
0808 }
0809 
0810 static enum dma_status shdma_tx_status(struct dma_chan *chan,
0811                     dma_cookie_t cookie,
0812                     struct dma_tx_state *txstate)
0813 {
0814     struct shdma_chan *schan = to_shdma_chan(chan);
0815     enum dma_status status;
0816     unsigned long flags;
0817 
0818     shdma_chan_ld_cleanup(schan, false);
0819 
0820     spin_lock_irqsave(&schan->chan_lock, flags);
0821 
0822     status = dma_cookie_status(chan, cookie, txstate);
0823 
0824     /*
0825      * If we don't find cookie on the queue, it has been aborted and we have
0826      * to report error
0827      */
0828     if (status != DMA_COMPLETE) {
0829         struct shdma_desc *sdesc;
0830         status = DMA_ERROR;
0831         list_for_each_entry(sdesc, &schan->ld_queue, node)
0832             if (sdesc->cookie == cookie) {
0833                 status = DMA_IN_PROGRESS;
0834                 break;
0835             }
0836     }
0837 
0838     spin_unlock_irqrestore(&schan->chan_lock, flags);
0839 
0840     return status;
0841 }
0842 
0843 /* Called from error IRQ or NMI */
0844 bool shdma_reset(struct shdma_dev *sdev)
0845 {
0846     const struct shdma_ops *ops = sdev->ops;
0847     struct shdma_chan *schan;
0848     unsigned int handled = 0;
0849     int i;
0850 
0851     /* Reset all channels */
0852     shdma_for_each_chan(schan, sdev, i) {
0853         struct shdma_desc *sdesc;
0854         LIST_HEAD(dl);
0855 
0856         if (!schan)
0857             continue;
0858 
0859         spin_lock(&schan->chan_lock);
0860 
0861         /* Stop the channel */
0862         ops->halt_channel(schan);
0863 
0864         list_splice_init(&schan->ld_queue, &dl);
0865 
0866         if (!list_empty(&dl)) {
0867             dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
0868             pm_runtime_put(schan->dev);
0869         }
0870         schan->pm_state = SHDMA_PM_ESTABLISHED;
0871 
0872         spin_unlock(&schan->chan_lock);
0873 
0874         /* Complete all  */
0875         list_for_each_entry(sdesc, &dl, node) {
0876             struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
0877 
0878             sdesc->mark = DESC_IDLE;
0879             dmaengine_desc_get_callback_invoke(tx, NULL);
0880         }
0881 
0882         spin_lock(&schan->chan_lock);
0883         list_splice(&dl, &schan->ld_free);
0884         spin_unlock(&schan->chan_lock);
0885 
0886         handled++;
0887     }
0888 
0889     return !!handled;
0890 }
0891 EXPORT_SYMBOL(shdma_reset);
0892 
0893 static irqreturn_t chan_irq(int irq, void *dev)
0894 {
0895     struct shdma_chan *schan = dev;
0896     const struct shdma_ops *ops =
0897         to_shdma_dev(schan->dma_chan.device)->ops;
0898     irqreturn_t ret;
0899 
0900     spin_lock(&schan->chan_lock);
0901 
0902     ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
0903 
0904     spin_unlock(&schan->chan_lock);
0905 
0906     return ret;
0907 }
0908 
0909 static irqreturn_t chan_irqt(int irq, void *dev)
0910 {
0911     struct shdma_chan *schan = dev;
0912     const struct shdma_ops *ops =
0913         to_shdma_dev(schan->dma_chan.device)->ops;
0914     struct shdma_desc *sdesc;
0915 
0916     spin_lock_irq(&schan->chan_lock);
0917     list_for_each_entry(sdesc, &schan->ld_queue, node) {
0918         if (sdesc->mark == DESC_SUBMITTED &&
0919             ops->desc_completed(schan, sdesc)) {
0920             dev_dbg(schan->dev, "done #%d@%p\n",
0921                 sdesc->async_tx.cookie, &sdesc->async_tx);
0922             sdesc->mark = DESC_COMPLETED;
0923             break;
0924         }
0925     }
0926     /* Next desc */
0927     shdma_chan_xfer_ld_queue(schan);
0928     spin_unlock_irq(&schan->chan_lock);
0929 
0930     shdma_chan_ld_cleanup(schan, false);
0931 
0932     return IRQ_HANDLED;
0933 }
0934 
0935 int shdma_request_irq(struct shdma_chan *schan, int irq,
0936                unsigned long flags, const char *name)
0937 {
0938     int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
0939                         chan_irqt, flags, name, schan);
0940 
0941     schan->irq = ret < 0 ? ret : irq;
0942 
0943     return ret;
0944 }
0945 EXPORT_SYMBOL(shdma_request_irq);
0946 
0947 void shdma_chan_probe(struct shdma_dev *sdev,
0948                struct shdma_chan *schan, int id)
0949 {
0950     schan->pm_state = SHDMA_PM_ESTABLISHED;
0951 
0952     /* reference struct dma_device */
0953     schan->dma_chan.device = &sdev->dma_dev;
0954     dma_cookie_init(&schan->dma_chan);
0955 
0956     schan->dev = sdev->dma_dev.dev;
0957     schan->id = id;
0958 
0959     if (!schan->max_xfer_len)
0960         schan->max_xfer_len = PAGE_SIZE;
0961 
0962     spin_lock_init(&schan->chan_lock);
0963 
0964     /* Init descripter manage list */
0965     INIT_LIST_HEAD(&schan->ld_queue);
0966     INIT_LIST_HEAD(&schan->ld_free);
0967 
0968     /* Add the channel to DMA device channel list */
0969     list_add_tail(&schan->dma_chan.device_node,
0970             &sdev->dma_dev.channels);
0971     sdev->schan[id] = schan;
0972 }
0973 EXPORT_SYMBOL(shdma_chan_probe);
0974 
0975 void shdma_chan_remove(struct shdma_chan *schan)
0976 {
0977     list_del(&schan->dma_chan.device_node);
0978 }
0979 EXPORT_SYMBOL(shdma_chan_remove);
0980 
0981 int shdma_init(struct device *dev, struct shdma_dev *sdev,
0982             int chan_num)
0983 {
0984     struct dma_device *dma_dev = &sdev->dma_dev;
0985 
0986     /*
0987      * Require all call-backs for now, they can trivially be made optional
0988      * later as required
0989      */
0990     if (!sdev->ops ||
0991         !sdev->desc_size ||
0992         !sdev->ops->embedded_desc ||
0993         !sdev->ops->start_xfer ||
0994         !sdev->ops->setup_xfer ||
0995         !sdev->ops->set_slave ||
0996         !sdev->ops->desc_setup ||
0997         !sdev->ops->slave_addr ||
0998         !sdev->ops->channel_busy ||
0999         !sdev->ops->halt_channel ||
1000         !sdev->ops->desc_completed)
1001         return -EINVAL;
1002 
1003     sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
1004     if (!sdev->schan)
1005         return -ENOMEM;
1006 
1007     INIT_LIST_HEAD(&dma_dev->channels);
1008 
1009     /* Common and MEMCPY operations */
1010     dma_dev->device_alloc_chan_resources
1011         = shdma_alloc_chan_resources;
1012     dma_dev->device_free_chan_resources = shdma_free_chan_resources;
1013     dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
1014     dma_dev->device_tx_status = shdma_tx_status;
1015     dma_dev->device_issue_pending = shdma_issue_pending;
1016 
1017     /* Compulsory for DMA_SLAVE fields */
1018     dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1019     dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1020     dma_dev->device_config = shdma_config;
1021     dma_dev->device_terminate_all = shdma_terminate_all;
1022 
1023     dma_dev->dev = dev;
1024 
1025     return 0;
1026 }
1027 EXPORT_SYMBOL(shdma_init);
1028 
1029 void shdma_cleanup(struct shdma_dev *sdev)
1030 {
1031     kfree(sdev->schan);
1032 }
1033 EXPORT_SYMBOL(shdma_cleanup);
1034 
1035 static int __init shdma_enter(void)
1036 {
1037     shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL);
1038     if (!shdma_slave_used)
1039         return -ENOMEM;
1040     return 0;
1041 }
1042 module_init(shdma_enter);
1043 
1044 static void __exit shdma_exit(void)
1045 {
1046     bitmap_free(shdma_slave_used);
1047 }
1048 module_exit(shdma_exit);
1049 
1050 MODULE_LICENSE("GPL v2");
1051 MODULE_DESCRIPTION("SH-DMA driver base library");
1052 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");