Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Freescale MPC85xx, MPC83xx DMA Engine support
0004  *
0005  * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
0006  *
0007  * Author:
0008  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
0009  *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
0010  *
0011  * Description:
0012  *   DMA engine driver for Freescale MPC8540 DMA controller, which is
0013  *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
0014  *   The support for MPC8349 DMA controller is also added.
0015  *
0016  * This driver instructs the DMA controller to issue the PCI Read Multiple
0017  * command for PCI read operations, instead of using the default PCI Read Line
0018  * command. Please be aware that this setting may result in read pre-fetching
0019  * on some platforms.
0020  */
0021 
0022 #include <linux/init.h>
0023 #include <linux/module.h>
0024 #include <linux/pci.h>
0025 #include <linux/slab.h>
0026 #include <linux/interrupt.h>
0027 #include <linux/dmaengine.h>
0028 #include <linux/delay.h>
0029 #include <linux/dma-mapping.h>
0030 #include <linux/dmapool.h>
0031 #include <linux/of_address.h>
0032 #include <linux/of_irq.h>
0033 #include <linux/of_platform.h>
0034 #include <linux/fsldma.h>
0035 #include "dmaengine.h"
0036 #include "fsldma.h"
0037 
0038 #define chan_dbg(chan, fmt, arg...)                 \
0039     dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
0040 #define chan_err(chan, fmt, arg...)                 \
0041     dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
0042 
0043 static const char msg_ld_oom[] = "No free memory for link descriptor";
0044 
0045 /*
0046  * Register Helpers
0047  */
0048 
0049 static void set_sr(struct fsldma_chan *chan, u32 val)
0050 {
0051     FSL_DMA_OUT(chan, &chan->regs->sr, val, 32);
0052 }
0053 
0054 static u32 get_sr(struct fsldma_chan *chan)
0055 {
0056     return FSL_DMA_IN(chan, &chan->regs->sr, 32);
0057 }
0058 
0059 static void set_mr(struct fsldma_chan *chan, u32 val)
0060 {
0061     FSL_DMA_OUT(chan, &chan->regs->mr, val, 32);
0062 }
0063 
0064 static u32 get_mr(struct fsldma_chan *chan)
0065 {
0066     return FSL_DMA_IN(chan, &chan->regs->mr, 32);
0067 }
0068 
0069 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
0070 {
0071     FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
0072 }
0073 
0074 static dma_addr_t get_cdar(struct fsldma_chan *chan)
0075 {
0076     return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
0077 }
0078 
0079 static void set_bcr(struct fsldma_chan *chan, u32 val)
0080 {
0081     FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32);
0082 }
0083 
0084 static u32 get_bcr(struct fsldma_chan *chan)
0085 {
0086     return FSL_DMA_IN(chan, &chan->regs->bcr, 32);
0087 }
0088 
0089 /*
0090  * Descriptor Helpers
0091  */
0092 
0093 static void set_desc_cnt(struct fsldma_chan *chan,
0094                 struct fsl_dma_ld_hw *hw, u32 count)
0095 {
0096     hw->count = CPU_TO_DMA(chan, count, 32);
0097 }
0098 
0099 static void set_desc_src(struct fsldma_chan *chan,
0100              struct fsl_dma_ld_hw *hw, dma_addr_t src)
0101 {
0102     u64 snoop_bits;
0103 
0104     snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
0105         ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
0106     hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
0107 }
0108 
0109 static void set_desc_dst(struct fsldma_chan *chan,
0110              struct fsl_dma_ld_hw *hw, dma_addr_t dst)
0111 {
0112     u64 snoop_bits;
0113 
0114     snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
0115         ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
0116     hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
0117 }
0118 
0119 static void set_desc_next(struct fsldma_chan *chan,
0120               struct fsl_dma_ld_hw *hw, dma_addr_t next)
0121 {
0122     u64 snoop_bits;
0123 
0124     snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
0125         ? FSL_DMA_SNEN : 0;
0126     hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
0127 }
0128 
0129 static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
0130 {
0131     u64 snoop_bits;
0132 
0133     snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
0134         ? FSL_DMA_SNEN : 0;
0135 
0136     desc->hw.next_ln_addr = CPU_TO_DMA(chan,
0137         DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
0138             | snoop_bits, 64);
0139 }
0140 
0141 /*
0142  * DMA Engine Hardware Control Helpers
0143  */
0144 
0145 static void dma_init(struct fsldma_chan *chan)
0146 {
0147     /* Reset the channel */
0148     set_mr(chan, 0);
0149 
0150     switch (chan->feature & FSL_DMA_IP_MASK) {
0151     case FSL_DMA_IP_85XX:
0152         /* Set the channel to below modes:
0153          * EIE - Error interrupt enable
0154          * EOLNIE - End of links interrupt enable
0155          * BWC - Bandwidth sharing among channels
0156          */
0157         set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
0158             | FSL_DMA_MR_EOLNIE);
0159         break;
0160     case FSL_DMA_IP_83XX:
0161         /* Set the channel to below modes:
0162          * EOTIE - End-of-transfer interrupt enable
0163          * PRC_RM - PCI read multiple
0164          */
0165         set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
0166         break;
0167     }
0168 }
0169 
0170 static int dma_is_idle(struct fsldma_chan *chan)
0171 {
0172     u32 sr = get_sr(chan);
0173     return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
0174 }
0175 
0176 /*
0177  * Start the DMA controller
0178  *
0179  * Preconditions:
0180  * - the CDAR register must point to the start descriptor
0181  * - the MRn[CS] bit must be cleared
0182  */
0183 static void dma_start(struct fsldma_chan *chan)
0184 {
0185     u32 mode;
0186 
0187     mode = get_mr(chan);
0188 
0189     if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
0190         set_bcr(chan, 0);
0191         mode |= FSL_DMA_MR_EMP_EN;
0192     } else {
0193         mode &= ~FSL_DMA_MR_EMP_EN;
0194     }
0195 
0196     if (chan->feature & FSL_DMA_CHAN_START_EXT) {
0197         mode |= FSL_DMA_MR_EMS_EN;
0198     } else {
0199         mode &= ~FSL_DMA_MR_EMS_EN;
0200         mode |= FSL_DMA_MR_CS;
0201     }
0202 
0203     set_mr(chan, mode);
0204 }
0205 
0206 static void dma_halt(struct fsldma_chan *chan)
0207 {
0208     u32 mode;
0209     int i;
0210 
0211     /* read the mode register */
0212     mode = get_mr(chan);
0213 
0214     /*
0215      * The 85xx controller supports channel abort, which will stop
0216      * the current transfer. On 83xx, this bit is the transfer error
0217      * mask bit, which should not be changed.
0218      */
0219     if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
0220         mode |= FSL_DMA_MR_CA;
0221         set_mr(chan, mode);
0222 
0223         mode &= ~FSL_DMA_MR_CA;
0224     }
0225 
0226     /* stop the DMA controller */
0227     mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
0228     set_mr(chan, mode);
0229 
0230     /* wait for the DMA controller to become idle */
0231     for (i = 0; i < 100; i++) {
0232         if (dma_is_idle(chan))
0233             return;
0234 
0235         udelay(10);
0236     }
0237 
0238     if (!dma_is_idle(chan))
0239         chan_err(chan, "DMA halt timeout!\n");
0240 }
0241 
0242 /**
0243  * fsl_chan_set_src_loop_size - Set source address hold transfer size
0244  * @chan : Freescale DMA channel
0245  * @size     : Address loop size, 0 for disable loop
0246  *
0247  * The set source address hold transfer size. The source
0248  * address hold or loop transfer size is when the DMA transfer
0249  * data from source address (SA), if the loop size is 4, the DMA will
0250  * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
0251  * SA + 1 ... and so on.
0252  */
0253 static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
0254 {
0255     u32 mode;
0256 
0257     mode = get_mr(chan);
0258 
0259     switch (size) {
0260     case 0:
0261         mode &= ~FSL_DMA_MR_SAHE;
0262         break;
0263     case 1:
0264     case 2:
0265     case 4:
0266     case 8:
0267         mode &= ~FSL_DMA_MR_SAHTS_MASK;
0268         mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
0269         break;
0270     }
0271 
0272     set_mr(chan, mode);
0273 }
0274 
0275 /**
0276  * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
0277  * @chan : Freescale DMA channel
0278  * @size     : Address loop size, 0 for disable loop
0279  *
0280  * The set destination address hold transfer size. The destination
0281  * address hold or loop transfer size is when the DMA transfer
0282  * data to destination address (TA), if the loop size is 4, the DMA will
0283  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
0284  * TA + 1 ... and so on.
0285  */
0286 static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
0287 {
0288     u32 mode;
0289 
0290     mode = get_mr(chan);
0291 
0292     switch (size) {
0293     case 0:
0294         mode &= ~FSL_DMA_MR_DAHE;
0295         break;
0296     case 1:
0297     case 2:
0298     case 4:
0299     case 8:
0300         mode &= ~FSL_DMA_MR_DAHTS_MASK;
0301         mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
0302         break;
0303     }
0304 
0305     set_mr(chan, mode);
0306 }
0307 
0308 /**
0309  * fsl_chan_set_request_count - Set DMA Request Count for external control
0310  * @chan : Freescale DMA channel
0311  * @size     : Number of bytes to transfer in a single request
0312  *
0313  * The Freescale DMA channel can be controlled by the external signal DREQ#.
0314  * The DMA request count is how many bytes are allowed to transfer before
0315  * pausing the channel, after which a new assertion of DREQ# resumes channel
0316  * operation.
0317  *
0318  * A size of 0 disables external pause control. The maximum size is 1024.
0319  */
0320 static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
0321 {
0322     u32 mode;
0323 
0324     BUG_ON(size > 1024);
0325 
0326     mode = get_mr(chan);
0327     mode &= ~FSL_DMA_MR_BWC_MASK;
0328     mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
0329 
0330     set_mr(chan, mode);
0331 }
0332 
0333 /**
0334  * fsl_chan_toggle_ext_pause - Toggle channel external pause status
0335  * @chan : Freescale DMA channel
0336  * @enable   : 0 is disabled, 1 is enabled.
0337  *
0338  * The Freescale DMA channel can be controlled by the external signal DREQ#.
0339  * The DMA Request Count feature should be used in addition to this feature
0340  * to set the number of bytes to transfer before pausing the channel.
0341  */
0342 static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
0343 {
0344     if (enable)
0345         chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
0346     else
0347         chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
0348 }
0349 
0350 /**
0351  * fsl_chan_toggle_ext_start - Toggle channel external start status
0352  * @chan : Freescale DMA channel
0353  * @enable   : 0 is disabled, 1 is enabled.
0354  *
0355  * If enable the external start, the channel can be started by an
0356  * external DMA start pin. So the dma_start() does not start the
0357  * transfer immediately. The DMA channel will wait for the
0358  * control pin asserted.
0359  */
0360 static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
0361 {
0362     if (enable)
0363         chan->feature |= FSL_DMA_CHAN_START_EXT;
0364     else
0365         chan->feature &= ~FSL_DMA_CHAN_START_EXT;
0366 }
0367 
0368 int fsl_dma_external_start(struct dma_chan *dchan, int enable)
0369 {
0370     struct fsldma_chan *chan;
0371 
0372     if (!dchan)
0373         return -EINVAL;
0374 
0375     chan = to_fsl_chan(dchan);
0376 
0377     fsl_chan_toggle_ext_start(chan, enable);
0378     return 0;
0379 }
0380 EXPORT_SYMBOL_GPL(fsl_dma_external_start);
0381 
0382 static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
0383 {
0384     struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
0385 
0386     if (list_empty(&chan->ld_pending))
0387         goto out_splice;
0388 
0389     /*
0390      * Add the hardware descriptor to the chain of hardware descriptors
0391      * that already exists in memory.
0392      *
0393      * This will un-set the EOL bit of the existing transaction, and the
0394      * last link in this transaction will become the EOL descriptor.
0395      */
0396     set_desc_next(chan, &tail->hw, desc->async_tx.phys);
0397 
0398     /*
0399      * Add the software descriptor and all children to the list
0400      * of pending transactions
0401      */
0402 out_splice:
0403     list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
0404 }
0405 
0406 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
0407 {
0408     struct fsldma_chan *chan = to_fsl_chan(tx->chan);
0409     struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
0410     struct fsl_desc_sw *child;
0411     dma_cookie_t cookie = -EINVAL;
0412 
0413     spin_lock_bh(&chan->desc_lock);
0414 
0415 #ifdef CONFIG_PM
0416     if (unlikely(chan->pm_state != RUNNING)) {
0417         chan_dbg(chan, "cannot submit due to suspend\n");
0418         spin_unlock_bh(&chan->desc_lock);
0419         return -1;
0420     }
0421 #endif
0422 
0423     /*
0424      * assign cookies to all of the software descriptors
0425      * that make up this transaction
0426      */
0427     list_for_each_entry(child, &desc->tx_list, node) {
0428         cookie = dma_cookie_assign(&child->async_tx);
0429     }
0430 
0431     /* put this transaction onto the tail of the pending queue */
0432     append_ld_queue(chan, desc);
0433 
0434     spin_unlock_bh(&chan->desc_lock);
0435 
0436     return cookie;
0437 }
0438 
0439 /**
0440  * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool.
0441  * @chan : Freescale DMA channel
0442  * @desc: descriptor to be freed
0443  */
0444 static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
0445         struct fsl_desc_sw *desc)
0446 {
0447     list_del(&desc->node);
0448     chan_dbg(chan, "LD %p free\n", desc);
0449     dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
0450 }
0451 
0452 /**
0453  * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
0454  * @chan : Freescale DMA channel
0455  *
0456  * Return - The descriptor allocated. NULL for failed.
0457  */
0458 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
0459 {
0460     struct fsl_desc_sw *desc;
0461     dma_addr_t pdesc;
0462 
0463     desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
0464     if (!desc) {
0465         chan_dbg(chan, "out of memory for link descriptor\n");
0466         return NULL;
0467     }
0468 
0469     INIT_LIST_HEAD(&desc->tx_list);
0470     dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
0471     desc->async_tx.tx_submit = fsl_dma_tx_submit;
0472     desc->async_tx.phys = pdesc;
0473 
0474     chan_dbg(chan, "LD %p allocated\n", desc);
0475 
0476     return desc;
0477 }
0478 
0479 /**
0480  * fsldma_clean_completed_descriptor - free all descriptors which
0481  * has been completed and acked
0482  * @chan: Freescale DMA channel
0483  *
0484  * This function is used on all completed and acked descriptors.
0485  * All descriptors should only be freed in this function.
0486  */
0487 static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
0488 {
0489     struct fsl_desc_sw *desc, *_desc;
0490 
0491     /* Run the callback for each descriptor, in order */
0492     list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
0493         if (async_tx_test_ack(&desc->async_tx))
0494             fsl_dma_free_descriptor(chan, desc);
0495 }
0496 
0497 /**
0498  * fsldma_run_tx_complete_actions - cleanup a single link descriptor
0499  * @chan: Freescale DMA channel
0500  * @desc: descriptor to cleanup and free
0501  * @cookie: Freescale DMA transaction identifier
0502  *
0503  * This function is used on a descriptor which has been executed by the DMA
0504  * controller. It will run any callbacks, submit any dependencies.
0505  */
0506 static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
0507         struct fsl_desc_sw *desc, dma_cookie_t cookie)
0508 {
0509     struct dma_async_tx_descriptor *txd = &desc->async_tx;
0510     dma_cookie_t ret = cookie;
0511 
0512     BUG_ON(txd->cookie < 0);
0513 
0514     if (txd->cookie > 0) {
0515         ret = txd->cookie;
0516 
0517         dma_descriptor_unmap(txd);
0518         /* Run the link descriptor callback function */
0519         dmaengine_desc_get_callback_invoke(txd, NULL);
0520     }
0521 
0522     /* Run any dependencies */
0523     dma_run_dependencies(txd);
0524 
0525     return ret;
0526 }
0527 
0528 /**
0529  * fsldma_clean_running_descriptor - move the completed descriptor from
0530  * ld_running to ld_completed
0531  * @chan: Freescale DMA channel
0532  * @desc: the descriptor which is completed
0533  *
0534  * Free the descriptor directly if acked by async_tx api, or move it to
0535  * queue ld_completed.
0536  */
0537 static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
0538         struct fsl_desc_sw *desc)
0539 {
0540     /* Remove from the list of transactions */
0541     list_del(&desc->node);
0542 
0543     /*
0544      * the client is allowed to attach dependent operations
0545      * until 'ack' is set
0546      */
0547     if (!async_tx_test_ack(&desc->async_tx)) {
0548         /*
0549          * Move this descriptor to the list of descriptors which is
0550          * completed, but still awaiting the 'ack' bit to be set.
0551          */
0552         list_add_tail(&desc->node, &chan->ld_completed);
0553         return;
0554     }
0555 
0556     dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
0557 }
0558 
0559 /**
0560  * fsl_chan_xfer_ld_queue - transfer any pending transactions
0561  * @chan : Freescale DMA channel
0562  *
0563  * HARDWARE STATE: idle
0564  * LOCKING: must hold chan->desc_lock
0565  */
0566 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
0567 {
0568     struct fsl_desc_sw *desc;
0569 
0570     /*
0571      * If the list of pending descriptors is empty, then we
0572      * don't need to do any work at all
0573      */
0574     if (list_empty(&chan->ld_pending)) {
0575         chan_dbg(chan, "no pending LDs\n");
0576         return;
0577     }
0578 
0579     /*
0580      * The DMA controller is not idle, which means that the interrupt
0581      * handler will start any queued transactions when it runs after
0582      * this transaction finishes
0583      */
0584     if (!chan->idle) {
0585         chan_dbg(chan, "DMA controller still busy\n");
0586         return;
0587     }
0588 
0589     /*
0590      * If there are some link descriptors which have not been
0591      * transferred, we need to start the controller
0592      */
0593 
0594     /*
0595      * Move all elements from the queue of pending transactions
0596      * onto the list of running transactions
0597      */
0598     chan_dbg(chan, "idle, starting controller\n");
0599     desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
0600     list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
0601 
0602     /*
0603      * The 85xx DMA controller doesn't clear the channel start bit
0604      * automatically at the end of a transfer. Therefore we must clear
0605      * it in software before starting the transfer.
0606      */
0607     if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
0608         u32 mode;
0609 
0610         mode = get_mr(chan);
0611         mode &= ~FSL_DMA_MR_CS;
0612         set_mr(chan, mode);
0613     }
0614 
0615     /*
0616      * Program the descriptor's address into the DMA controller,
0617      * then start the DMA transaction
0618      */
0619     set_cdar(chan, desc->async_tx.phys);
0620     get_cdar(chan);
0621 
0622     dma_start(chan);
0623     chan->idle = false;
0624 }
0625 
0626 /**
0627  * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
0628  * and move them to ld_completed to free until flag 'ack' is set
0629  * @chan: Freescale DMA channel
0630  *
0631  * This function is used on descriptors which have been executed by the DMA
0632  * controller. It will run any callbacks, submit any dependencies, then
0633  * free these descriptors if flag 'ack' is set.
0634  */
0635 static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
0636 {
0637     struct fsl_desc_sw *desc, *_desc;
0638     dma_cookie_t cookie = 0;
0639     dma_addr_t curr_phys = get_cdar(chan);
0640     int seen_current = 0;
0641 
0642     fsldma_clean_completed_descriptor(chan);
0643 
0644     /* Run the callback for each descriptor, in order */
0645     list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
0646         /*
0647          * do not advance past the current descriptor loaded into the
0648          * hardware channel, subsequent descriptors are either in
0649          * process or have not been submitted
0650          */
0651         if (seen_current)
0652             break;
0653 
0654         /*
0655          * stop the search if we reach the current descriptor and the
0656          * channel is busy
0657          */
0658         if (desc->async_tx.phys == curr_phys) {
0659             seen_current = 1;
0660             if (!dma_is_idle(chan))
0661                 break;
0662         }
0663 
0664         cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
0665 
0666         fsldma_clean_running_descriptor(chan, desc);
0667     }
0668 
0669     /*
0670      * Start any pending transactions automatically
0671      *
0672      * In the ideal case, we keep the DMA controller busy while we go
0673      * ahead and free the descriptors below.
0674      */
0675     fsl_chan_xfer_ld_queue(chan);
0676 
0677     if (cookie > 0)
0678         chan->common.completed_cookie = cookie;
0679 }
0680 
0681 /**
0682  * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
0683  * @chan : Freescale DMA channel
0684  *
0685  * This function will create a dma pool for descriptor allocation.
0686  *
0687  * Return - The number of descriptors allocated.
0688  */
0689 static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
0690 {
0691     struct fsldma_chan *chan = to_fsl_chan(dchan);
0692 
0693     /* Has this channel already been allocated? */
0694     if (chan->desc_pool)
0695         return 1;
0696 
0697     /*
0698      * We need the descriptor to be aligned to 32bytes
0699      * for meeting FSL DMA specification requirement.
0700      */
0701     chan->desc_pool = dma_pool_create(chan->name, chan->dev,
0702                       sizeof(struct fsl_desc_sw),
0703                       __alignof__(struct fsl_desc_sw), 0);
0704     if (!chan->desc_pool) {
0705         chan_err(chan, "unable to allocate descriptor pool\n");
0706         return -ENOMEM;
0707     }
0708 
0709     /* there is at least one descriptor free to be allocated */
0710     return 1;
0711 }
0712 
0713 /**
0714  * fsldma_free_desc_list - Free all descriptors in a queue
0715  * @chan: Freescae DMA channel
0716  * @list: the list to free
0717  *
0718  * LOCKING: must hold chan->desc_lock
0719  */
0720 static void fsldma_free_desc_list(struct fsldma_chan *chan,
0721                   struct list_head *list)
0722 {
0723     struct fsl_desc_sw *desc, *_desc;
0724 
0725     list_for_each_entry_safe(desc, _desc, list, node)
0726         fsl_dma_free_descriptor(chan, desc);
0727 }
0728 
0729 static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
0730                       struct list_head *list)
0731 {
0732     struct fsl_desc_sw *desc, *_desc;
0733 
0734     list_for_each_entry_safe_reverse(desc, _desc, list, node)
0735         fsl_dma_free_descriptor(chan, desc);
0736 }
0737 
0738 /**
0739  * fsl_dma_free_chan_resources - Free all resources of the channel.
0740  * @chan : Freescale DMA channel
0741  */
0742 static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
0743 {
0744     struct fsldma_chan *chan = to_fsl_chan(dchan);
0745 
0746     chan_dbg(chan, "free all channel resources\n");
0747     spin_lock_bh(&chan->desc_lock);
0748     fsldma_cleanup_descriptors(chan);
0749     fsldma_free_desc_list(chan, &chan->ld_pending);
0750     fsldma_free_desc_list(chan, &chan->ld_running);
0751     fsldma_free_desc_list(chan, &chan->ld_completed);
0752     spin_unlock_bh(&chan->desc_lock);
0753 
0754     dma_pool_destroy(chan->desc_pool);
0755     chan->desc_pool = NULL;
0756 }
0757 
0758 static struct dma_async_tx_descriptor *
0759 fsl_dma_prep_memcpy(struct dma_chan *dchan,
0760     dma_addr_t dma_dst, dma_addr_t dma_src,
0761     size_t len, unsigned long flags)
0762 {
0763     struct fsldma_chan *chan;
0764     struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
0765     size_t copy;
0766 
0767     if (!dchan)
0768         return NULL;
0769 
0770     if (!len)
0771         return NULL;
0772 
0773     chan = to_fsl_chan(dchan);
0774 
0775     do {
0776 
0777         /* Allocate the link descriptor from DMA pool */
0778         new = fsl_dma_alloc_descriptor(chan);
0779         if (!new) {
0780             chan_err(chan, "%s\n", msg_ld_oom);
0781             goto fail;
0782         }
0783 
0784         copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
0785 
0786         set_desc_cnt(chan, &new->hw, copy);
0787         set_desc_src(chan, &new->hw, dma_src);
0788         set_desc_dst(chan, &new->hw, dma_dst);
0789 
0790         if (!first)
0791             first = new;
0792         else
0793             set_desc_next(chan, &prev->hw, new->async_tx.phys);
0794 
0795         new->async_tx.cookie = 0;
0796         async_tx_ack(&new->async_tx);
0797 
0798         prev = new;
0799         len -= copy;
0800         dma_src += copy;
0801         dma_dst += copy;
0802 
0803         /* Insert the link descriptor to the LD ring */
0804         list_add_tail(&new->node, &first->tx_list);
0805     } while (len);
0806 
0807     new->async_tx.flags = flags; /* client is in control of this ack */
0808     new->async_tx.cookie = -EBUSY;
0809 
0810     /* Set End-of-link to the last link descriptor of new list */
0811     set_ld_eol(chan, new);
0812 
0813     return &first->async_tx;
0814 
0815 fail:
0816     if (!first)
0817         return NULL;
0818 
0819     fsldma_free_desc_list_reverse(chan, &first->tx_list);
0820     return NULL;
0821 }
0822 
0823 static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
0824 {
0825     struct fsldma_chan *chan;
0826 
0827     if (!dchan)
0828         return -EINVAL;
0829 
0830     chan = to_fsl_chan(dchan);
0831 
0832     spin_lock_bh(&chan->desc_lock);
0833 
0834     /* Halt the DMA engine */
0835     dma_halt(chan);
0836 
0837     /* Remove and free all of the descriptors in the LD queue */
0838     fsldma_free_desc_list(chan, &chan->ld_pending);
0839     fsldma_free_desc_list(chan, &chan->ld_running);
0840     fsldma_free_desc_list(chan, &chan->ld_completed);
0841     chan->idle = true;
0842 
0843     spin_unlock_bh(&chan->desc_lock);
0844     return 0;
0845 }
0846 
0847 static int fsl_dma_device_config(struct dma_chan *dchan,
0848                  struct dma_slave_config *config)
0849 {
0850     struct fsldma_chan *chan;
0851     int size;
0852 
0853     if (!dchan)
0854         return -EINVAL;
0855 
0856     chan = to_fsl_chan(dchan);
0857 
0858     /* make sure the channel supports setting burst size */
0859     if (!chan->set_request_count)
0860         return -ENXIO;
0861 
0862     /* we set the controller burst size depending on direction */
0863     if (config->direction == DMA_MEM_TO_DEV)
0864         size = config->dst_addr_width * config->dst_maxburst;
0865     else
0866         size = config->src_addr_width * config->src_maxburst;
0867 
0868     chan->set_request_count(chan, size);
0869     return 0;
0870 }
0871 
0872 
0873 /**
0874  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
0875  * @chan : Freescale DMA channel
0876  */
0877 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
0878 {
0879     struct fsldma_chan *chan = to_fsl_chan(dchan);
0880 
0881     spin_lock_bh(&chan->desc_lock);
0882     fsl_chan_xfer_ld_queue(chan);
0883     spin_unlock_bh(&chan->desc_lock);
0884 }
0885 
0886 /**
0887  * fsl_tx_status - Determine the DMA status
0888  * @chan : Freescale DMA channel
0889  */
0890 static enum dma_status fsl_tx_status(struct dma_chan *dchan,
0891                     dma_cookie_t cookie,
0892                     struct dma_tx_state *txstate)
0893 {
0894     struct fsldma_chan *chan = to_fsl_chan(dchan);
0895     enum dma_status ret;
0896 
0897     ret = dma_cookie_status(dchan, cookie, txstate);
0898     if (ret == DMA_COMPLETE)
0899         return ret;
0900 
0901     spin_lock_bh(&chan->desc_lock);
0902     fsldma_cleanup_descriptors(chan);
0903     spin_unlock_bh(&chan->desc_lock);
0904 
0905     return dma_cookie_status(dchan, cookie, txstate);
0906 }
0907 
0908 /*----------------------------------------------------------------------------*/
0909 /* Interrupt Handling                                                         */
0910 /*----------------------------------------------------------------------------*/
0911 
0912 static irqreturn_t fsldma_chan_irq(int irq, void *data)
0913 {
0914     struct fsldma_chan *chan = data;
0915     u32 stat;
0916 
0917     /* save and clear the status register */
0918     stat = get_sr(chan);
0919     set_sr(chan, stat);
0920     chan_dbg(chan, "irq: stat = 0x%x\n", stat);
0921 
0922     /* check that this was really our device */
0923     stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
0924     if (!stat)
0925         return IRQ_NONE;
0926 
0927     if (stat & FSL_DMA_SR_TE)
0928         chan_err(chan, "Transfer Error!\n");
0929 
0930     /*
0931      * Programming Error
0932      * The DMA_INTERRUPT async_tx is a NULL transfer, which will
0933      * trigger a PE interrupt.
0934      */
0935     if (stat & FSL_DMA_SR_PE) {
0936         chan_dbg(chan, "irq: Programming Error INT\n");
0937         stat &= ~FSL_DMA_SR_PE;
0938         if (get_bcr(chan) != 0)
0939             chan_err(chan, "Programming Error!\n");
0940     }
0941 
0942     /*
0943      * For MPC8349, EOCDI event need to update cookie
0944      * and start the next transfer if it exist.
0945      */
0946     if (stat & FSL_DMA_SR_EOCDI) {
0947         chan_dbg(chan, "irq: End-of-Chain link INT\n");
0948         stat &= ~FSL_DMA_SR_EOCDI;
0949     }
0950 
0951     /*
0952      * If it current transfer is the end-of-transfer,
0953      * we should clear the Channel Start bit for
0954      * prepare next transfer.
0955      */
0956     if (stat & FSL_DMA_SR_EOLNI) {
0957         chan_dbg(chan, "irq: End-of-link INT\n");
0958         stat &= ~FSL_DMA_SR_EOLNI;
0959     }
0960 
0961     /* check that the DMA controller is really idle */
0962     if (!dma_is_idle(chan))
0963         chan_err(chan, "irq: controller not idle!\n");
0964 
0965     /* check that we handled all of the bits */
0966     if (stat)
0967         chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
0968 
0969     /*
0970      * Schedule the tasklet to handle all cleanup of the current
0971      * transaction. It will start a new transaction if there is
0972      * one pending.
0973      */
0974     tasklet_schedule(&chan->tasklet);
0975     chan_dbg(chan, "irq: Exit\n");
0976     return IRQ_HANDLED;
0977 }
0978 
0979 static void dma_do_tasklet(struct tasklet_struct *t)
0980 {
0981     struct fsldma_chan *chan = from_tasklet(chan, t, tasklet);
0982 
0983     chan_dbg(chan, "tasklet entry\n");
0984 
0985     spin_lock(&chan->desc_lock);
0986 
0987     /* the hardware is now idle and ready for more */
0988     chan->idle = true;
0989 
0990     /* Run all cleanup for descriptors which have been completed */
0991     fsldma_cleanup_descriptors(chan);
0992 
0993     spin_unlock(&chan->desc_lock);
0994 
0995     chan_dbg(chan, "tasklet exit\n");
0996 }
0997 
0998 static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
0999 {
1000     struct fsldma_device *fdev = data;
1001     struct fsldma_chan *chan;
1002     unsigned int handled = 0;
1003     u32 gsr, mask;
1004     int i;
1005 
1006     gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1007                            : in_le32(fdev->regs);
1008     mask = 0xff000000;
1009     dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1010 
1011     for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1012         chan = fdev->chan[i];
1013         if (!chan)
1014             continue;
1015 
1016         if (gsr & mask) {
1017             dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1018             fsldma_chan_irq(irq, chan);
1019             handled++;
1020         }
1021 
1022         gsr &= ~mask;
1023         mask >>= 8;
1024     }
1025 
1026     return IRQ_RETVAL(handled);
1027 }
1028 
1029 static void fsldma_free_irqs(struct fsldma_device *fdev)
1030 {
1031     struct fsldma_chan *chan;
1032     int i;
1033 
1034     if (fdev->irq) {
1035         dev_dbg(fdev->dev, "free per-controller IRQ\n");
1036         free_irq(fdev->irq, fdev);
1037         return;
1038     }
1039 
1040     for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1041         chan = fdev->chan[i];
1042         if (chan && chan->irq) {
1043             chan_dbg(chan, "free per-channel IRQ\n");
1044             free_irq(chan->irq, chan);
1045         }
1046     }
1047 }
1048 
1049 static int fsldma_request_irqs(struct fsldma_device *fdev)
1050 {
1051     struct fsldma_chan *chan;
1052     int ret;
1053     int i;
1054 
1055     /* if we have a per-controller IRQ, use that */
1056     if (fdev->irq) {
1057         dev_dbg(fdev->dev, "request per-controller IRQ\n");
1058         ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1059                   "fsldma-controller", fdev);
1060         return ret;
1061     }
1062 
1063     /* no per-controller IRQ, use the per-channel IRQs */
1064     for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1065         chan = fdev->chan[i];
1066         if (!chan)
1067             continue;
1068 
1069         if (!chan->irq) {
1070             chan_err(chan, "interrupts property missing in device tree\n");
1071             ret = -ENODEV;
1072             goto out_unwind;
1073         }
1074 
1075         chan_dbg(chan, "request per-channel IRQ\n");
1076         ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1077                   "fsldma-chan", chan);
1078         if (ret) {
1079             chan_err(chan, "unable to request per-channel IRQ\n");
1080             goto out_unwind;
1081         }
1082     }
1083 
1084     return 0;
1085 
1086 out_unwind:
1087     for (/* none */; i >= 0; i--) {
1088         chan = fdev->chan[i];
1089         if (!chan)
1090             continue;
1091 
1092         if (!chan->irq)
1093             continue;
1094 
1095         free_irq(chan->irq, chan);
1096     }
1097 
1098     return ret;
1099 }
1100 
1101 /*----------------------------------------------------------------------------*/
1102 /* OpenFirmware Subsystem                                                     */
1103 /*----------------------------------------------------------------------------*/
1104 
1105 static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1106     struct device_node *node, u32 feature, const char *compatible)
1107 {
1108     struct fsldma_chan *chan;
1109     struct resource res;
1110     int err;
1111 
1112     /* alloc channel */
1113     chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1114     if (!chan) {
1115         err = -ENOMEM;
1116         goto out_return;
1117     }
1118 
1119     /* ioremap registers for use */
1120     chan->regs = of_iomap(node, 0);
1121     if (!chan->regs) {
1122         dev_err(fdev->dev, "unable to ioremap registers\n");
1123         err = -ENOMEM;
1124         goto out_free_chan;
1125     }
1126 
1127     err = of_address_to_resource(node, 0, &res);
1128     if (err) {
1129         dev_err(fdev->dev, "unable to find 'reg' property\n");
1130         goto out_iounmap_regs;
1131     }
1132 
1133     chan->feature = feature;
1134     if (!fdev->feature)
1135         fdev->feature = chan->feature;
1136 
1137     /*
1138      * If the DMA device's feature is different than the feature
1139      * of its channels, report the bug
1140      */
1141     WARN_ON(fdev->feature != chan->feature);
1142 
1143     chan->dev = fdev->dev;
1144     chan->id = (res.start & 0xfff) < 0x300 ?
1145            ((res.start - 0x100) & 0xfff) >> 7 :
1146            ((res.start - 0x200) & 0xfff) >> 7;
1147     if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1148         dev_err(fdev->dev, "too many channels for device\n");
1149         err = -EINVAL;
1150         goto out_iounmap_regs;
1151     }
1152 
1153     fdev->chan[chan->id] = chan;
1154     tasklet_setup(&chan->tasklet, dma_do_tasklet);
1155     snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1156 
1157     /* Initialize the channel */
1158     dma_init(chan);
1159 
1160     /* Clear cdar registers */
1161     set_cdar(chan, 0);
1162 
1163     switch (chan->feature & FSL_DMA_IP_MASK) {
1164     case FSL_DMA_IP_85XX:
1165         chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1166         fallthrough;
1167     case FSL_DMA_IP_83XX:
1168         chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1169         chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1170         chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1171         chan->set_request_count = fsl_chan_set_request_count;
1172     }
1173 
1174     spin_lock_init(&chan->desc_lock);
1175     INIT_LIST_HEAD(&chan->ld_pending);
1176     INIT_LIST_HEAD(&chan->ld_running);
1177     INIT_LIST_HEAD(&chan->ld_completed);
1178     chan->idle = true;
1179 #ifdef CONFIG_PM
1180     chan->pm_state = RUNNING;
1181 #endif
1182 
1183     chan->common.device = &fdev->common;
1184     dma_cookie_init(&chan->common);
1185 
1186     /* find the IRQ line, if it exists in the device tree */
1187     chan->irq = irq_of_parse_and_map(node, 0);
1188 
1189     /* Add the channel to DMA device channel list */
1190     list_add_tail(&chan->common.device_node, &fdev->common.channels);
1191 
1192     dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1193          chan->irq ? chan->irq : fdev->irq);
1194 
1195     return 0;
1196 
1197 out_iounmap_regs:
1198     iounmap(chan->regs);
1199 out_free_chan:
1200     kfree(chan);
1201 out_return:
1202     return err;
1203 }
1204 
1205 static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1206 {
1207     irq_dispose_mapping(chan->irq);
1208     list_del(&chan->common.device_node);
1209     iounmap(chan->regs);
1210     kfree(chan);
1211 }
1212 
1213 static int fsldma_of_probe(struct platform_device *op)
1214 {
1215     struct fsldma_device *fdev;
1216     struct device_node *child;
1217     unsigned int i;
1218     int err;
1219 
1220     fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1221     if (!fdev) {
1222         err = -ENOMEM;
1223         goto out_return;
1224     }
1225 
1226     fdev->dev = &op->dev;
1227     INIT_LIST_HEAD(&fdev->common.channels);
1228 
1229     /* ioremap the registers for use */
1230     fdev->regs = of_iomap(op->dev.of_node, 0);
1231     if (!fdev->regs) {
1232         dev_err(&op->dev, "unable to ioremap registers\n");
1233         err = -ENOMEM;
1234         goto out_free;
1235     }
1236 
1237     /* map the channel IRQ if it exists, but don't hookup the handler yet */
1238     fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1239 
1240     dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1241     dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1242     fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1243     fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1244     fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1245     fdev->common.device_tx_status = fsl_tx_status;
1246     fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1247     fdev->common.device_config = fsl_dma_device_config;
1248     fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1249     fdev->common.dev = &op->dev;
1250 
1251     fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
1252     fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
1253     fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1254     fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1255 
1256     dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1257 
1258     platform_set_drvdata(op, fdev);
1259 
1260     /*
1261      * We cannot use of_platform_bus_probe() because there is no
1262      * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1263      * channel object.
1264      */
1265     for_each_child_of_node(op->dev.of_node, child) {
1266         if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1267             fsl_dma_chan_probe(fdev, child,
1268                 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1269                 "fsl,eloplus-dma-channel");
1270         }
1271 
1272         if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1273             fsl_dma_chan_probe(fdev, child,
1274                 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1275                 "fsl,elo-dma-channel");
1276         }
1277     }
1278 
1279     /*
1280      * Hookup the IRQ handler(s)
1281      *
1282      * If we have a per-controller interrupt, we prefer that to the
1283      * per-channel interrupts to reduce the number of shared interrupt
1284      * handlers on the same IRQ line
1285      */
1286     err = fsldma_request_irqs(fdev);
1287     if (err) {
1288         dev_err(fdev->dev, "unable to request IRQs\n");
1289         goto out_free_fdev;
1290     }
1291 
1292     dma_async_device_register(&fdev->common);
1293     return 0;
1294 
1295 out_free_fdev:
1296     for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1297         if (fdev->chan[i])
1298             fsl_dma_chan_remove(fdev->chan[i]);
1299     }
1300     irq_dispose_mapping(fdev->irq);
1301     iounmap(fdev->regs);
1302 out_free:
1303     kfree(fdev);
1304 out_return:
1305     return err;
1306 }
1307 
1308 static int fsldma_of_remove(struct platform_device *op)
1309 {
1310     struct fsldma_device *fdev;
1311     unsigned int i;
1312 
1313     fdev = platform_get_drvdata(op);
1314     dma_async_device_unregister(&fdev->common);
1315 
1316     fsldma_free_irqs(fdev);
1317 
1318     for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1319         if (fdev->chan[i])
1320             fsl_dma_chan_remove(fdev->chan[i]);
1321     }
1322     irq_dispose_mapping(fdev->irq);
1323 
1324     iounmap(fdev->regs);
1325     kfree(fdev);
1326 
1327     return 0;
1328 }
1329 
1330 #ifdef CONFIG_PM
1331 static int fsldma_suspend_late(struct device *dev)
1332 {
1333     struct fsldma_device *fdev = dev_get_drvdata(dev);
1334     struct fsldma_chan *chan;
1335     int i;
1336 
1337     for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1338         chan = fdev->chan[i];
1339         if (!chan)
1340             continue;
1341 
1342         spin_lock_bh(&chan->desc_lock);
1343         if (unlikely(!chan->idle))
1344             goto out;
1345         chan->regs_save.mr = get_mr(chan);
1346         chan->pm_state = SUSPENDED;
1347         spin_unlock_bh(&chan->desc_lock);
1348     }
1349     return 0;
1350 
1351 out:
1352     for (; i >= 0; i--) {
1353         chan = fdev->chan[i];
1354         if (!chan)
1355             continue;
1356         chan->pm_state = RUNNING;
1357         spin_unlock_bh(&chan->desc_lock);
1358     }
1359     return -EBUSY;
1360 }
1361 
1362 static int fsldma_resume_early(struct device *dev)
1363 {
1364     struct fsldma_device *fdev = dev_get_drvdata(dev);
1365     struct fsldma_chan *chan;
1366     u32 mode;
1367     int i;
1368 
1369     for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1370         chan = fdev->chan[i];
1371         if (!chan)
1372             continue;
1373 
1374         spin_lock_bh(&chan->desc_lock);
1375         mode = chan->regs_save.mr
1376             & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
1377         set_mr(chan, mode);
1378         chan->pm_state = RUNNING;
1379         spin_unlock_bh(&chan->desc_lock);
1380     }
1381 
1382     return 0;
1383 }
1384 
1385 static const struct dev_pm_ops fsldma_pm_ops = {
1386     .suspend_late   = fsldma_suspend_late,
1387     .resume_early   = fsldma_resume_early,
1388 };
1389 #endif
1390 
1391 static const struct of_device_id fsldma_of_ids[] = {
1392     { .compatible = "fsl,elo3-dma", },
1393     { .compatible = "fsl,eloplus-dma", },
1394     { .compatible = "fsl,elo-dma", },
1395     {}
1396 };
1397 MODULE_DEVICE_TABLE(of, fsldma_of_ids);
1398 
1399 static struct platform_driver fsldma_of_driver = {
1400     .driver = {
1401         .name = "fsl-elo-dma",
1402         .of_match_table = fsldma_of_ids,
1403 #ifdef CONFIG_PM
1404         .pm = &fsldma_pm_ops,
1405 #endif
1406     },
1407     .probe = fsldma_of_probe,
1408     .remove = fsldma_of_remove,
1409 };
1410 
1411 /*----------------------------------------------------------------------------*/
1412 /* Module Init / Exit                                                         */
1413 /*----------------------------------------------------------------------------*/
1414 
1415 static __init int fsldma_init(void)
1416 {
1417     pr_info("Freescale Elo series DMA driver\n");
1418     return platform_driver_register(&fsldma_of_driver);
1419 }
1420 
1421 static void __exit fsldma_exit(void)
1422 {
1423     platform_driver_unregister(&fsldma_of_driver);
1424 }
1425 
1426 subsys_initcall(fsldma_init);
1427 module_exit(fsldma_exit);
1428 
1429 MODULE_DESCRIPTION("Freescale Elo series DMA driver");
1430 MODULE_LICENSE("GPL");