Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 #include <linux/delay.h>
0003 #include <linux/dmaengine.h>
0004 #include <linux/dma-mapping.h>
0005 #include <linux/platform_device.h>
0006 #include <linux/module.h>
0007 #include <linux/of.h>
0008 #include <linux/slab.h>
0009 #include <linux/of_dma.h>
0010 #include <linux/of_irq.h>
0011 #include <linux/dmapool.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/of_address.h>
0014 #include <linux/pm_runtime.h>
0015 #include "../dmaengine.h"
0016 
0017 #define DESC_TYPE   27
0018 #define DESC_TYPE_HOST  0x10
0019 #define DESC_TYPE_TEARD 0x13
0020 
0021 #define TD_DESC_IS_RX   (1 << 16)
0022 #define TD_DESC_DMA_NUM 10
0023 
0024 #define DESC_LENGTH_BITS_NUM    21
0025 
0026 #define DESC_TYPE_USB   (5 << 26)
0027 #define DESC_PD_COMPLETE    (1 << 31)
0028 
0029 /* DMA engine */
0030 #define DMA_TDFDQ   4
0031 #define DMA_TXGCR(x)    (0x800 + (x) * 0x20)
0032 #define DMA_RXGCR(x)    (0x808 + (x) * 0x20)
0033 #define RXHPCRA0        4
0034 
0035 #define GCR_CHAN_ENABLE     (1 << 31)
0036 #define GCR_TEARDOWN        (1 << 30)
0037 #define GCR_STARV_RETRY     (1 << 24)
0038 #define GCR_DESC_TYPE_HOST  (1 << 14)
0039 
0040 /* DMA scheduler */
0041 #define DMA_SCHED_CTRL      0
0042 #define DMA_SCHED_CTRL_EN   (1 << 31)
0043 #define DMA_SCHED_WORD(x)   ((x) * 4 + 0x800)
0044 
0045 #define SCHED_ENTRY0_CHAN(x)    ((x) << 0)
0046 #define SCHED_ENTRY0_IS_RX  (1 << 7)
0047 
0048 #define SCHED_ENTRY1_CHAN(x)    ((x) << 8)
0049 #define SCHED_ENTRY1_IS_RX  (1 << 15)
0050 
0051 #define SCHED_ENTRY2_CHAN(x)    ((x) << 16)
0052 #define SCHED_ENTRY2_IS_RX  (1 << 23)
0053 
0054 #define SCHED_ENTRY3_CHAN(x)    ((x) << 24)
0055 #define SCHED_ENTRY3_IS_RX  (1 << 31)
0056 
0057 /* Queue manager */
0058 /* 4 KiB of memory for descriptors, 2 for each endpoint */
0059 #define ALLOC_DECS_NUM      128
0060 #define DESCS_AREAS     1
0061 #define TOTAL_DESCS_NUM     (ALLOC_DECS_NUM * DESCS_AREAS)
0062 #define QMGR_SCRATCH_SIZE   (TOTAL_DESCS_NUM * 4)
0063 
0064 #define QMGR_LRAM0_BASE     0x80
0065 #define QMGR_LRAM_SIZE      0x84
0066 #define QMGR_LRAM1_BASE     0x88
0067 #define QMGR_MEMBASE(x)     (0x1000 + (x) * 0x10)
0068 #define QMGR_MEMCTRL(x)     (0x1004 + (x) * 0x10)
0069 #define QMGR_MEMCTRL_IDX_SH 16
0070 #define QMGR_MEMCTRL_DESC_SH    8
0071 
0072 #define QMGR_PEND(x)    (0x90 + (x) * 4)
0073 
0074 #define QMGR_PENDING_SLOT_Q(x)  (x / 32)
0075 #define QMGR_PENDING_BIT_Q(x)   (x % 32)
0076 
0077 #define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
0078 #define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
0079 #define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
0080 #define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
0081 
0082 /* Packet Descriptor */
0083 #define PD2_ZERO_LENGTH     (1 << 19)
0084 
0085 struct cppi41_channel {
0086     struct dma_chan chan;
0087     struct dma_async_tx_descriptor txd;
0088     struct cppi41_dd *cdd;
0089     struct cppi41_desc *desc;
0090     dma_addr_t desc_phys;
0091     void __iomem *gcr_reg;
0092     int is_tx;
0093     u32 residue;
0094 
0095     unsigned int q_num;
0096     unsigned int q_comp_num;
0097     unsigned int port_num;
0098 
0099     unsigned td_retry;
0100     unsigned td_queued:1;
0101     unsigned td_seen:1;
0102     unsigned td_desc_seen:1;
0103 
0104     struct list_head node;      /* Node for pending list */
0105 };
0106 
0107 struct cppi41_desc {
0108     u32 pd0;
0109     u32 pd1;
0110     u32 pd2;
0111     u32 pd3;
0112     u32 pd4;
0113     u32 pd5;
0114     u32 pd6;
0115     u32 pd7;
0116 } __aligned(32);
0117 
0118 struct chan_queues {
0119     u16 submit;
0120     u16 complete;
0121 };
0122 
0123 struct cppi41_dd {
0124     struct dma_device ddev;
0125 
0126     void *qmgr_scratch;
0127     dma_addr_t scratch_phys;
0128 
0129     struct cppi41_desc *cd;
0130     dma_addr_t descs_phys;
0131     u32 first_td_desc;
0132     struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
0133 
0134     void __iomem *ctrl_mem;
0135     void __iomem *sched_mem;
0136     void __iomem *qmgr_mem;
0137     unsigned int irq;
0138     const struct chan_queues *queues_rx;
0139     const struct chan_queues *queues_tx;
0140     struct chan_queues td_queue;
0141     u16 first_completion_queue;
0142     u16 qmgr_num_pend;
0143     u32 n_chans;
0144     u8 platform;
0145 
0146     struct list_head pending;   /* Pending queued transfers */
0147     spinlock_t lock;        /* Lock for pending list */
0148 
0149     /* context for suspend/resume */
0150     unsigned int dma_tdfdq;
0151 
0152     bool is_suspended;
0153 };
0154 
0155 static struct chan_queues am335x_usb_queues_tx[] = {
0156     /* USB0 ENDP 1 */
0157     [ 0] = { .submit = 32, .complete =  93},
0158     [ 1] = { .submit = 34, .complete =  94},
0159     [ 2] = { .submit = 36, .complete =  95},
0160     [ 3] = { .submit = 38, .complete =  96},
0161     [ 4] = { .submit = 40, .complete =  97},
0162     [ 5] = { .submit = 42, .complete =  98},
0163     [ 6] = { .submit = 44, .complete =  99},
0164     [ 7] = { .submit = 46, .complete = 100},
0165     [ 8] = { .submit = 48, .complete = 101},
0166     [ 9] = { .submit = 50, .complete = 102},
0167     [10] = { .submit = 52, .complete = 103},
0168     [11] = { .submit = 54, .complete = 104},
0169     [12] = { .submit = 56, .complete = 105},
0170     [13] = { .submit = 58, .complete = 106},
0171     [14] = { .submit = 60, .complete = 107},
0172 
0173     /* USB1 ENDP1 */
0174     [15] = { .submit = 62, .complete = 125},
0175     [16] = { .submit = 64, .complete = 126},
0176     [17] = { .submit = 66, .complete = 127},
0177     [18] = { .submit = 68, .complete = 128},
0178     [19] = { .submit = 70, .complete = 129},
0179     [20] = { .submit = 72, .complete = 130},
0180     [21] = { .submit = 74, .complete = 131},
0181     [22] = { .submit = 76, .complete = 132},
0182     [23] = { .submit = 78, .complete = 133},
0183     [24] = { .submit = 80, .complete = 134},
0184     [25] = { .submit = 82, .complete = 135},
0185     [26] = { .submit = 84, .complete = 136},
0186     [27] = { .submit = 86, .complete = 137},
0187     [28] = { .submit = 88, .complete = 138},
0188     [29] = { .submit = 90, .complete = 139},
0189 };
0190 
0191 static const struct chan_queues am335x_usb_queues_rx[] = {
0192     /* USB0 ENDP 1 */
0193     [ 0] = { .submit =  1, .complete = 109},
0194     [ 1] = { .submit =  2, .complete = 110},
0195     [ 2] = { .submit =  3, .complete = 111},
0196     [ 3] = { .submit =  4, .complete = 112},
0197     [ 4] = { .submit =  5, .complete = 113},
0198     [ 5] = { .submit =  6, .complete = 114},
0199     [ 6] = { .submit =  7, .complete = 115},
0200     [ 7] = { .submit =  8, .complete = 116},
0201     [ 8] = { .submit =  9, .complete = 117},
0202     [ 9] = { .submit = 10, .complete = 118},
0203     [10] = { .submit = 11, .complete = 119},
0204     [11] = { .submit = 12, .complete = 120},
0205     [12] = { .submit = 13, .complete = 121},
0206     [13] = { .submit = 14, .complete = 122},
0207     [14] = { .submit = 15, .complete = 123},
0208 
0209     /* USB1 ENDP 1 */
0210     [15] = { .submit = 16, .complete = 141},
0211     [16] = { .submit = 17, .complete = 142},
0212     [17] = { .submit = 18, .complete = 143},
0213     [18] = { .submit = 19, .complete = 144},
0214     [19] = { .submit = 20, .complete = 145},
0215     [20] = { .submit = 21, .complete = 146},
0216     [21] = { .submit = 22, .complete = 147},
0217     [22] = { .submit = 23, .complete = 148},
0218     [23] = { .submit = 24, .complete = 149},
0219     [24] = { .submit = 25, .complete = 150},
0220     [25] = { .submit = 26, .complete = 151},
0221     [26] = { .submit = 27, .complete = 152},
0222     [27] = { .submit = 28, .complete = 153},
0223     [28] = { .submit = 29, .complete = 154},
0224     [29] = { .submit = 30, .complete = 155},
0225 };
0226 
0227 static const struct chan_queues da8xx_usb_queues_tx[] = {
0228     [0] = { .submit =  16, .complete = 24},
0229     [1] = { .submit =  18, .complete = 24},
0230     [2] = { .submit =  20, .complete = 24},
0231     [3] = { .submit =  22, .complete = 24},
0232 };
0233 
0234 static const struct chan_queues da8xx_usb_queues_rx[] = {
0235     [0] = { .submit =  1, .complete = 26},
0236     [1] = { .submit =  3, .complete = 26},
0237     [2] = { .submit =  5, .complete = 26},
0238     [3] = { .submit =  7, .complete = 26},
0239 };
0240 
0241 struct cppi_glue_infos {
0242     const struct chan_queues *queues_rx;
0243     const struct chan_queues *queues_tx;
0244     struct chan_queues td_queue;
0245     u16 first_completion_queue;
0246     u16 qmgr_num_pend;
0247 };
0248 
0249 static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
0250 {
0251     return container_of(c, struct cppi41_channel, chan);
0252 }
0253 
0254 static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
0255 {
0256     struct cppi41_channel *c;
0257     u32 descs_size;
0258     u32 desc_num;
0259 
0260     descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
0261 
0262     if (!((desc >= cdd->descs_phys) &&
0263             (desc < (cdd->descs_phys + descs_size)))) {
0264         return NULL;
0265     }
0266 
0267     desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
0268     BUG_ON(desc_num >= ALLOC_DECS_NUM);
0269     c = cdd->chan_busy[desc_num];
0270     cdd->chan_busy[desc_num] = NULL;
0271 
0272     /* Usecount for chan_busy[], paired with push_desc_queue() */
0273     pm_runtime_put(cdd->ddev.dev);
0274 
0275     return c;
0276 }
0277 
0278 static void cppi_writel(u32 val, void *__iomem *mem)
0279 {
0280     __raw_writel(val, mem);
0281 }
0282 
0283 static u32 cppi_readl(void *__iomem *mem)
0284 {
0285     return __raw_readl(mem);
0286 }
0287 
0288 static u32 pd_trans_len(u32 val)
0289 {
0290     return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
0291 }
0292 
0293 static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
0294 {
0295     u32 desc;
0296 
0297     desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
0298     desc &= ~0x1f;
0299     return desc;
0300 }
0301 
0302 static irqreturn_t cppi41_irq(int irq, void *data)
0303 {
0304     struct cppi41_dd *cdd = data;
0305     u16 first_completion_queue = cdd->first_completion_queue;
0306     u16 qmgr_num_pend = cdd->qmgr_num_pend;
0307     struct cppi41_channel *c;
0308     int i;
0309 
0310     for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend;
0311             i++) {
0312         u32 val;
0313         u32 q_num;
0314 
0315         val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
0316         if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) {
0317             u32 mask;
0318             /* set corresponding bit for completion Q 93 */
0319             mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue);
0320             /* not set all bits for queues less than Q 93 */
0321             mask--;
0322             /* now invert and keep only Q 93+ set */
0323             val &= ~mask;
0324         }
0325 
0326         if (val)
0327             __iormb();
0328 
0329         while (val) {
0330             u32 desc, len;
0331 
0332             /*
0333              * This should never trigger, see the comments in
0334              * push_desc_queue()
0335              */
0336             WARN_ON(cdd->is_suspended);
0337 
0338             q_num = __fls(val);
0339             val &= ~(1 << q_num);
0340             q_num += 32 * i;
0341             desc = cppi41_pop_desc(cdd, q_num);
0342             c = desc_to_chan(cdd, desc);
0343             if (WARN_ON(!c)) {
0344                 pr_err("%s() q %d desc %08x\n", __func__,
0345                         q_num, desc);
0346                 continue;
0347             }
0348 
0349             if (c->desc->pd2 & PD2_ZERO_LENGTH)
0350                 len = 0;
0351             else
0352                 len = pd_trans_len(c->desc->pd0);
0353 
0354             c->residue = pd_trans_len(c->desc->pd6) - len;
0355             dma_cookie_complete(&c->txd);
0356             dmaengine_desc_get_callback_invoke(&c->txd, NULL);
0357         }
0358     }
0359     return IRQ_HANDLED;
0360 }
0361 
0362 static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
0363 {
0364     dma_cookie_t cookie;
0365 
0366     cookie = dma_cookie_assign(tx);
0367 
0368     return cookie;
0369 }
0370 
0371 static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
0372 {
0373     struct cppi41_channel *c = to_cpp41_chan(chan);
0374     struct cppi41_dd *cdd = c->cdd;
0375     int error;
0376 
0377     error = pm_runtime_get_sync(cdd->ddev.dev);
0378     if (error < 0) {
0379         dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
0380             __func__, error);
0381         pm_runtime_put_noidle(cdd->ddev.dev);
0382 
0383         return error;
0384     }
0385 
0386     dma_cookie_init(chan);
0387     dma_async_tx_descriptor_init(&c->txd, chan);
0388     c->txd.tx_submit = cppi41_tx_submit;
0389 
0390     if (!c->is_tx)
0391         cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
0392 
0393     pm_runtime_mark_last_busy(cdd->ddev.dev);
0394     pm_runtime_put_autosuspend(cdd->ddev.dev);
0395 
0396     return 0;
0397 }
0398 
0399 static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
0400 {
0401     struct cppi41_channel *c = to_cpp41_chan(chan);
0402     struct cppi41_dd *cdd = c->cdd;
0403     int error;
0404 
0405     error = pm_runtime_get_sync(cdd->ddev.dev);
0406     if (error < 0) {
0407         pm_runtime_put_noidle(cdd->ddev.dev);
0408 
0409         return;
0410     }
0411 
0412     WARN_ON(!list_empty(&cdd->pending));
0413 
0414     pm_runtime_mark_last_busy(cdd->ddev.dev);
0415     pm_runtime_put_autosuspend(cdd->ddev.dev);
0416 }
0417 
0418 static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
0419     dma_cookie_t cookie, struct dma_tx_state *txstate)
0420 {
0421     struct cppi41_channel *c = to_cpp41_chan(chan);
0422     enum dma_status ret;
0423 
0424     ret = dma_cookie_status(chan, cookie, txstate);
0425 
0426     dma_set_residue(txstate, c->residue);
0427 
0428     return ret;
0429 }
0430 
0431 static void push_desc_queue(struct cppi41_channel *c)
0432 {
0433     struct cppi41_dd *cdd = c->cdd;
0434     u32 desc_num;
0435     u32 desc_phys;
0436     u32 reg;
0437 
0438     c->residue = 0;
0439 
0440     reg = GCR_CHAN_ENABLE;
0441     if (!c->is_tx) {
0442         reg |= GCR_STARV_RETRY;
0443         reg |= GCR_DESC_TYPE_HOST;
0444         reg |= c->q_comp_num;
0445     }
0446 
0447     cppi_writel(reg, c->gcr_reg);
0448 
0449     /*
0450      * We don't use writel() but __raw_writel() so we have to make sure
0451      * that the DMA descriptor in coherent memory made to the main memory
0452      * before starting the dma engine.
0453      */
0454     __iowmb();
0455 
0456     /*
0457      * DMA transfers can take at least 200ms to complete with USB mass
0458      * storage connected. To prevent autosuspend timeouts, we must use
0459      * pm_runtime_get/put() when chan_busy[] is modified. This will get
0460      * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
0461      * outcome of the transfer.
0462      */
0463     pm_runtime_get(cdd->ddev.dev);
0464 
0465     desc_phys = lower_32_bits(c->desc_phys);
0466     desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
0467     WARN_ON(cdd->chan_busy[desc_num]);
0468     cdd->chan_busy[desc_num] = c;
0469 
0470     reg = (sizeof(struct cppi41_desc) - 24) / 4;
0471     reg |= desc_phys;
0472     cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
0473 }
0474 
0475 /*
0476  * Caller must hold cdd->lock to prevent push_desc_queue()
0477  * getting called out of order. We have both cppi41_dma_issue_pending()
0478  * and cppi41_runtime_resume() call this function.
0479  */
0480 static void cppi41_run_queue(struct cppi41_dd *cdd)
0481 {
0482     struct cppi41_channel *c, *_c;
0483 
0484     list_for_each_entry_safe(c, _c, &cdd->pending, node) {
0485         push_desc_queue(c);
0486         list_del(&c->node);
0487     }
0488 }
0489 
0490 static void cppi41_dma_issue_pending(struct dma_chan *chan)
0491 {
0492     struct cppi41_channel *c = to_cpp41_chan(chan);
0493     struct cppi41_dd *cdd = c->cdd;
0494     unsigned long flags;
0495     int error;
0496 
0497     error = pm_runtime_get(cdd->ddev.dev);
0498     if ((error != -EINPROGRESS) && error < 0) {
0499         pm_runtime_put_noidle(cdd->ddev.dev);
0500         dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
0501             error);
0502 
0503         return;
0504     }
0505 
0506     spin_lock_irqsave(&cdd->lock, flags);
0507     list_add_tail(&c->node, &cdd->pending);
0508     if (!cdd->is_suspended)
0509         cppi41_run_queue(cdd);
0510     spin_unlock_irqrestore(&cdd->lock, flags);
0511 
0512     pm_runtime_mark_last_busy(cdd->ddev.dev);
0513     pm_runtime_put_autosuspend(cdd->ddev.dev);
0514 }
0515 
0516 static u32 get_host_pd0(u32 length)
0517 {
0518     u32 reg;
0519 
0520     reg = DESC_TYPE_HOST << DESC_TYPE;
0521     reg |= length;
0522 
0523     return reg;
0524 }
0525 
0526 static u32 get_host_pd1(struct cppi41_channel *c)
0527 {
0528     u32 reg;
0529 
0530     reg = 0;
0531 
0532     return reg;
0533 }
0534 
0535 static u32 get_host_pd2(struct cppi41_channel *c)
0536 {
0537     u32 reg;
0538 
0539     reg = DESC_TYPE_USB;
0540     reg |= c->q_comp_num;
0541 
0542     return reg;
0543 }
0544 
0545 static u32 get_host_pd3(u32 length)
0546 {
0547     u32 reg;
0548 
0549     /* PD3 = packet size */
0550     reg = length;
0551 
0552     return reg;
0553 }
0554 
0555 static u32 get_host_pd6(u32 length)
0556 {
0557     u32 reg;
0558 
0559     /* PD6 buffer size */
0560     reg = DESC_PD_COMPLETE;
0561     reg |= length;
0562 
0563     return reg;
0564 }
0565 
0566 static u32 get_host_pd4_or_7(u32 addr)
0567 {
0568     u32 reg;
0569 
0570     reg = addr;
0571 
0572     return reg;
0573 }
0574 
0575 static u32 get_host_pd5(void)
0576 {
0577     u32 reg;
0578 
0579     reg = 0;
0580 
0581     return reg;
0582 }
0583 
0584 static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
0585     struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
0586     enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
0587 {
0588     struct cppi41_channel *c = to_cpp41_chan(chan);
0589     struct dma_async_tx_descriptor *txd = NULL;
0590     struct cppi41_dd *cdd = c->cdd;
0591     struct cppi41_desc *d;
0592     struct scatterlist *sg;
0593     unsigned int i;
0594     int error;
0595 
0596     error = pm_runtime_get(cdd->ddev.dev);
0597     if (error < 0) {
0598         pm_runtime_put_noidle(cdd->ddev.dev);
0599 
0600         return NULL;
0601     }
0602 
0603     if (cdd->is_suspended)
0604         goto err_out_not_ready;
0605 
0606     d = c->desc;
0607     for_each_sg(sgl, sg, sg_len, i) {
0608         u32 addr;
0609         u32 len;
0610 
0611         /* We need to use more than one desc once musb supports sg */
0612         addr = lower_32_bits(sg_dma_address(sg));
0613         len = sg_dma_len(sg);
0614 
0615         d->pd0 = get_host_pd0(len);
0616         d->pd1 = get_host_pd1(c);
0617         d->pd2 = get_host_pd2(c);
0618         d->pd3 = get_host_pd3(len);
0619         d->pd4 = get_host_pd4_or_7(addr);
0620         d->pd5 = get_host_pd5();
0621         d->pd6 = get_host_pd6(len);
0622         d->pd7 = get_host_pd4_or_7(addr);
0623 
0624         d++;
0625     }
0626 
0627     txd = &c->txd;
0628 
0629 err_out_not_ready:
0630     pm_runtime_mark_last_busy(cdd->ddev.dev);
0631     pm_runtime_put_autosuspend(cdd->ddev.dev);
0632 
0633     return txd;
0634 }
0635 
0636 static void cppi41_compute_td_desc(struct cppi41_desc *d)
0637 {
0638     d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
0639 }
0640 
0641 static int cppi41_tear_down_chan(struct cppi41_channel *c)
0642 {
0643     struct dmaengine_result abort_result;
0644     struct cppi41_dd *cdd = c->cdd;
0645     struct cppi41_desc *td;
0646     u32 reg;
0647     u32 desc_phys;
0648     u32 td_desc_phys;
0649 
0650     td = cdd->cd;
0651     td += cdd->first_td_desc;
0652 
0653     td_desc_phys = cdd->descs_phys;
0654     td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
0655 
0656     if (!c->td_queued) {
0657         cppi41_compute_td_desc(td);
0658         __iowmb();
0659 
0660         reg = (sizeof(struct cppi41_desc) - 24) / 4;
0661         reg |= td_desc_phys;
0662         cppi_writel(reg, cdd->qmgr_mem +
0663                 QMGR_QUEUE_D(cdd->td_queue.submit));
0664 
0665         reg = GCR_CHAN_ENABLE;
0666         if (!c->is_tx) {
0667             reg |= GCR_STARV_RETRY;
0668             reg |= GCR_DESC_TYPE_HOST;
0669             reg |= cdd->td_queue.complete;
0670         }
0671         reg |= GCR_TEARDOWN;
0672         cppi_writel(reg, c->gcr_reg);
0673         c->td_queued = 1;
0674         c->td_retry = 500;
0675     }
0676 
0677     if (!c->td_seen || !c->td_desc_seen) {
0678 
0679         desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
0680         if (!desc_phys && c->is_tx)
0681             desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
0682 
0683         if (desc_phys == c->desc_phys) {
0684             c->td_desc_seen = 1;
0685 
0686         } else if (desc_phys == td_desc_phys) {
0687             u32 pd0;
0688 
0689             __iormb();
0690             pd0 = td->pd0;
0691             WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
0692             WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
0693             WARN_ON((pd0 & 0x1f) != c->port_num);
0694             c->td_seen = 1;
0695         } else if (desc_phys) {
0696             WARN_ON_ONCE(1);
0697         }
0698     }
0699     c->td_retry--;
0700     /*
0701      * If the TX descriptor / channel is in use, the caller needs to poke
0702      * his TD bit multiple times. After that he hardware releases the
0703      * transfer descriptor followed by TD descriptor. Waiting seems not to
0704      * cause any difference.
0705      * RX seems to be thrown out right away. However once the TearDown
0706      * descriptor gets through we are done. If we have seen the transfer
0707      * descriptor before the TD we fetch it from enqueue, it has to be
0708      * there waiting for us.
0709      */
0710     if (!c->td_seen && c->td_retry) {
0711         udelay(1);
0712         return -EAGAIN;
0713     }
0714     WARN_ON(!c->td_retry);
0715 
0716     if (!c->td_desc_seen) {
0717         desc_phys = cppi41_pop_desc(cdd, c->q_num);
0718         if (!desc_phys)
0719             desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
0720         WARN_ON(!desc_phys);
0721     }
0722 
0723     c->td_queued = 0;
0724     c->td_seen = 0;
0725     c->td_desc_seen = 0;
0726     cppi_writel(0, c->gcr_reg);
0727 
0728     /* Invoke the callback to do the necessary clean-up */
0729     abort_result.result = DMA_TRANS_ABORTED;
0730     dma_cookie_complete(&c->txd);
0731     dmaengine_desc_get_callback_invoke(&c->txd, &abort_result);
0732 
0733     return 0;
0734 }
0735 
0736 static int cppi41_stop_chan(struct dma_chan *chan)
0737 {
0738     struct cppi41_channel *c = to_cpp41_chan(chan);
0739     struct cppi41_dd *cdd = c->cdd;
0740     u32 desc_num;
0741     u32 desc_phys;
0742     int ret;
0743 
0744     desc_phys = lower_32_bits(c->desc_phys);
0745     desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
0746     if (!cdd->chan_busy[desc_num]) {
0747         struct cppi41_channel *cc, *_ct;
0748 
0749         /*
0750          * channels might still be in the pending list if
0751          * cppi41_dma_issue_pending() is called after
0752          * cppi41_runtime_suspend() is called
0753          */
0754         list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
0755             if (cc != c)
0756                 continue;
0757             list_del(&cc->node);
0758             break;
0759         }
0760         return 0;
0761     }
0762 
0763     ret = cppi41_tear_down_chan(c);
0764     if (ret)
0765         return ret;
0766 
0767     WARN_ON(!cdd->chan_busy[desc_num]);
0768     cdd->chan_busy[desc_num] = NULL;
0769 
0770     /* Usecount for chan_busy[], paired with push_desc_queue() */
0771     pm_runtime_put(cdd->ddev.dev);
0772 
0773     return 0;
0774 }
0775 
0776 static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
0777 {
0778     struct cppi41_channel *cchan, *chans;
0779     int i;
0780     u32 n_chans = cdd->n_chans;
0781 
0782     /*
0783      * The channels can only be used as TX or as RX. So we add twice
0784      * that much dma channels because USB can only do RX or TX.
0785      */
0786     n_chans *= 2;
0787 
0788     chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL);
0789     if (!chans)
0790         return -ENOMEM;
0791 
0792     for (i = 0; i < n_chans; i++) {
0793         cchan = &chans[i];
0794 
0795         cchan->cdd = cdd;
0796         if (i & 1) {
0797             cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
0798             cchan->is_tx = 1;
0799         } else {
0800             cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
0801             cchan->is_tx = 0;
0802         }
0803         cchan->port_num = i >> 1;
0804         cchan->desc = &cdd->cd[i];
0805         cchan->desc_phys = cdd->descs_phys;
0806         cchan->desc_phys += i * sizeof(struct cppi41_desc);
0807         cchan->chan.device = &cdd->ddev;
0808         list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
0809     }
0810     cdd->first_td_desc = n_chans;
0811 
0812     return 0;
0813 }
0814 
0815 static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
0816 {
0817     unsigned int mem_decs;
0818     int i;
0819 
0820     mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
0821 
0822     for (i = 0; i < DESCS_AREAS; i++) {
0823 
0824         cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
0825         cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
0826 
0827         dma_free_coherent(dev, mem_decs, cdd->cd,
0828                 cdd->descs_phys);
0829     }
0830 }
0831 
0832 static void disable_sched(struct cppi41_dd *cdd)
0833 {
0834     cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
0835 }
0836 
0837 static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
0838 {
0839     disable_sched(cdd);
0840 
0841     purge_descs(dev, cdd);
0842 
0843     cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
0844     cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
0845     dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
0846             cdd->scratch_phys);
0847 }
0848 
0849 static int init_descs(struct device *dev, struct cppi41_dd *cdd)
0850 {
0851     unsigned int desc_size;
0852     unsigned int mem_decs;
0853     int i;
0854     u32 reg;
0855     u32 idx;
0856 
0857     BUILD_BUG_ON(sizeof(struct cppi41_desc) &
0858             (sizeof(struct cppi41_desc) - 1));
0859     BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
0860     BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
0861 
0862     desc_size = sizeof(struct cppi41_desc);
0863     mem_decs = ALLOC_DECS_NUM * desc_size;
0864 
0865     idx = 0;
0866     for (i = 0; i < DESCS_AREAS; i++) {
0867 
0868         reg = idx << QMGR_MEMCTRL_IDX_SH;
0869         reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
0870         reg |= ilog2(ALLOC_DECS_NUM) - 5;
0871 
0872         BUILD_BUG_ON(DESCS_AREAS != 1);
0873         cdd->cd = dma_alloc_coherent(dev, mem_decs,
0874                 &cdd->descs_phys, GFP_KERNEL);
0875         if (!cdd->cd)
0876             return -ENOMEM;
0877 
0878         cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
0879         cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
0880 
0881         idx += ALLOC_DECS_NUM;
0882     }
0883     return 0;
0884 }
0885 
0886 static void init_sched(struct cppi41_dd *cdd)
0887 {
0888     unsigned ch;
0889     unsigned word;
0890     u32 reg;
0891 
0892     word = 0;
0893     cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
0894     for (ch = 0; ch < cdd->n_chans; ch += 2) {
0895 
0896         reg = SCHED_ENTRY0_CHAN(ch);
0897         reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
0898 
0899         reg |= SCHED_ENTRY2_CHAN(ch + 1);
0900         reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
0901         cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
0902         word++;
0903     }
0904     reg = cdd->n_chans * 2 - 1;
0905     reg |= DMA_SCHED_CTRL_EN;
0906     cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
0907 }
0908 
0909 static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
0910 {
0911     int ret;
0912 
0913     BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
0914     cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
0915             &cdd->scratch_phys, GFP_KERNEL);
0916     if (!cdd->qmgr_scratch)
0917         return -ENOMEM;
0918 
0919     cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
0920     cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE);
0921     cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
0922 
0923     ret = init_descs(dev, cdd);
0924     if (ret)
0925         goto err_td;
0926 
0927     cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
0928     init_sched(cdd);
0929 
0930     return 0;
0931 err_td:
0932     deinit_cppi41(dev, cdd);
0933     return ret;
0934 }
0935 
0936 static struct platform_driver cpp41_dma_driver;
0937 /*
0938  * The param format is:
0939  * X Y
0940  * X: Port
0941  * Y: 0 = RX else TX
0942  */
0943 #define INFO_PORT   0
0944 #define INFO_IS_TX  1
0945 
0946 static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
0947 {
0948     struct cppi41_channel *cchan;
0949     struct cppi41_dd *cdd;
0950     const struct chan_queues *queues;
0951     u32 *num = param;
0952 
0953     if (chan->device->dev->driver != &cpp41_dma_driver.driver)
0954         return false;
0955 
0956     cchan = to_cpp41_chan(chan);
0957 
0958     if (cchan->port_num != num[INFO_PORT])
0959         return false;
0960 
0961     if (cchan->is_tx && !num[INFO_IS_TX])
0962         return false;
0963     cdd = cchan->cdd;
0964     if (cchan->is_tx)
0965         queues = cdd->queues_tx;
0966     else
0967         queues = cdd->queues_rx;
0968 
0969     BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
0970              ARRAY_SIZE(am335x_usb_queues_tx));
0971     if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx)))
0972         return false;
0973 
0974     cchan->q_num = queues[cchan->port_num].submit;
0975     cchan->q_comp_num = queues[cchan->port_num].complete;
0976     return true;
0977 }
0978 
0979 static struct of_dma_filter_info cpp41_dma_info = {
0980     .filter_fn = cpp41_dma_filter_fn,
0981 };
0982 
0983 static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
0984         struct of_dma *ofdma)
0985 {
0986     int count = dma_spec->args_count;
0987     struct of_dma_filter_info *info = ofdma->of_dma_data;
0988 
0989     if (!info || !info->filter_fn)
0990         return NULL;
0991 
0992     if (count != 2)
0993         return NULL;
0994 
0995     return dma_request_channel(info->dma_cap, info->filter_fn,
0996             &dma_spec->args[0]);
0997 }
0998 
0999 static const struct cppi_glue_infos am335x_usb_infos = {
1000     .queues_rx = am335x_usb_queues_rx,
1001     .queues_tx = am335x_usb_queues_tx,
1002     .td_queue = { .submit = 31, .complete = 0 },
1003     .first_completion_queue = 93,
1004     .qmgr_num_pend = 5,
1005 };
1006 
1007 static const struct cppi_glue_infos da8xx_usb_infos = {
1008     .queues_rx = da8xx_usb_queues_rx,
1009     .queues_tx = da8xx_usb_queues_tx,
1010     .td_queue = { .submit = 31, .complete = 0 },
1011     .first_completion_queue = 24,
1012     .qmgr_num_pend = 2,
1013 };
1014 
1015 static const struct of_device_id cppi41_dma_ids[] = {
1016     { .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos},
1017     { .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos},
1018     {},
1019 };
1020 MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
1021 
1022 static const struct cppi_glue_infos *get_glue_info(struct device *dev)
1023 {
1024     const struct of_device_id *of_id;
1025 
1026     of_id = of_match_node(cppi41_dma_ids, dev->of_node);
1027     if (!of_id)
1028         return NULL;
1029     return of_id->data;
1030 }
1031 
1032 #define CPPI41_DMA_BUSWIDTHS    (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1033                 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1034                 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1035                 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1036 
1037 static int cppi41_dma_probe(struct platform_device *pdev)
1038 {
1039     struct cppi41_dd *cdd;
1040     struct device *dev = &pdev->dev;
1041     const struct cppi_glue_infos *glue_info;
1042     struct resource *mem;
1043     int index;
1044     int irq;
1045     int ret;
1046 
1047     glue_info = get_glue_info(dev);
1048     if (!glue_info)
1049         return -EINVAL;
1050 
1051     cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
1052     if (!cdd)
1053         return -ENOMEM;
1054 
1055     dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
1056     cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
1057     cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
1058     cdd->ddev.device_tx_status = cppi41_dma_tx_status;
1059     cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
1060     cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
1061     cdd->ddev.device_terminate_all = cppi41_stop_chan;
1062     cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1063     cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
1064     cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
1065     cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1066     cdd->ddev.dev = dev;
1067     INIT_LIST_HEAD(&cdd->ddev.channels);
1068     cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
1069 
1070     index = of_property_match_string(dev->of_node,
1071                      "reg-names", "controller");
1072     if (index < 0)
1073         return index;
1074 
1075     mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
1076     cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
1077     if (IS_ERR(cdd->ctrl_mem))
1078         return PTR_ERR(cdd->ctrl_mem);
1079 
1080     mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
1081     cdd->sched_mem = devm_ioremap_resource(dev, mem);
1082     if (IS_ERR(cdd->sched_mem))
1083         return PTR_ERR(cdd->sched_mem);
1084 
1085     mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
1086     cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
1087     if (IS_ERR(cdd->qmgr_mem))
1088         return PTR_ERR(cdd->qmgr_mem);
1089 
1090     spin_lock_init(&cdd->lock);
1091     INIT_LIST_HEAD(&cdd->pending);
1092 
1093     platform_set_drvdata(pdev, cdd);
1094 
1095     pm_runtime_enable(dev);
1096     pm_runtime_set_autosuspend_delay(dev, 100);
1097     pm_runtime_use_autosuspend(dev);
1098     ret = pm_runtime_get_sync(dev);
1099     if (ret < 0)
1100         goto err_get_sync;
1101 
1102     cdd->queues_rx = glue_info->queues_rx;
1103     cdd->queues_tx = glue_info->queues_tx;
1104     cdd->td_queue = glue_info->td_queue;
1105     cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
1106     cdd->first_completion_queue = glue_info->first_completion_queue;
1107 
1108     /* Parse new and deprecated dma-channels properties */
1109     ret = of_property_read_u32(dev->of_node,
1110                    "dma-channels", &cdd->n_chans);
1111     if (ret)
1112         ret = of_property_read_u32(dev->of_node,
1113                        "#dma-channels", &cdd->n_chans);
1114     if (ret)
1115         goto err_get_n_chans;
1116 
1117     ret = init_cppi41(dev, cdd);
1118     if (ret)
1119         goto err_init_cppi;
1120 
1121     ret = cppi41_add_chans(dev, cdd);
1122     if (ret)
1123         goto err_chans;
1124 
1125     irq = irq_of_parse_and_map(dev->of_node, 0);
1126     if (!irq) {
1127         ret = -EINVAL;
1128         goto err_chans;
1129     }
1130 
1131     ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED,
1132             dev_name(dev), cdd);
1133     if (ret)
1134         goto err_chans;
1135     cdd->irq = irq;
1136 
1137     ret = dma_async_device_register(&cdd->ddev);
1138     if (ret)
1139         goto err_chans;
1140 
1141     ret = of_dma_controller_register(dev->of_node,
1142             cppi41_dma_xlate, &cpp41_dma_info);
1143     if (ret)
1144         goto err_of;
1145 
1146     pm_runtime_mark_last_busy(dev);
1147     pm_runtime_put_autosuspend(dev);
1148 
1149     return 0;
1150 err_of:
1151     dma_async_device_unregister(&cdd->ddev);
1152 err_chans:
1153     deinit_cppi41(dev, cdd);
1154 err_init_cppi:
1155     pm_runtime_dont_use_autosuspend(dev);
1156 err_get_n_chans:
1157 err_get_sync:
1158     pm_runtime_put_sync(dev);
1159     pm_runtime_disable(dev);
1160     return ret;
1161 }
1162 
1163 static int cppi41_dma_remove(struct platform_device *pdev)
1164 {
1165     struct cppi41_dd *cdd = platform_get_drvdata(pdev);
1166     int error;
1167 
1168     error = pm_runtime_get_sync(&pdev->dev);
1169     if (error < 0)
1170         dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
1171             __func__, error);
1172     of_dma_controller_free(pdev->dev.of_node);
1173     dma_async_device_unregister(&cdd->ddev);
1174 
1175     devm_free_irq(&pdev->dev, cdd->irq, cdd);
1176     deinit_cppi41(&pdev->dev, cdd);
1177     pm_runtime_dont_use_autosuspend(&pdev->dev);
1178     pm_runtime_put_sync(&pdev->dev);
1179     pm_runtime_disable(&pdev->dev);
1180     return 0;
1181 }
1182 
1183 static int __maybe_unused cppi41_suspend(struct device *dev)
1184 {
1185     struct cppi41_dd *cdd = dev_get_drvdata(dev);
1186 
1187     cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
1188     disable_sched(cdd);
1189 
1190     return 0;
1191 }
1192 
1193 static int __maybe_unused cppi41_resume(struct device *dev)
1194 {
1195     struct cppi41_dd *cdd = dev_get_drvdata(dev);
1196     struct cppi41_channel *c;
1197     int i;
1198 
1199     for (i = 0; i < DESCS_AREAS; i++)
1200         cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
1201 
1202     list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
1203         if (!c->is_tx)
1204             cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
1205 
1206     init_sched(cdd);
1207 
1208     cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
1209     cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
1210     cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
1211     cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
1212 
1213     return 0;
1214 }
1215 
1216 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1217 {
1218     struct cppi41_dd *cdd = dev_get_drvdata(dev);
1219     unsigned long flags;
1220 
1221     spin_lock_irqsave(&cdd->lock, flags);
1222     cdd->is_suspended = true;
1223     WARN_ON(!list_empty(&cdd->pending));
1224     spin_unlock_irqrestore(&cdd->lock, flags);
1225 
1226     return 0;
1227 }
1228 
1229 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
1230 {
1231     struct cppi41_dd *cdd = dev_get_drvdata(dev);
1232     unsigned long flags;
1233 
1234     spin_lock_irqsave(&cdd->lock, flags);
1235     cdd->is_suspended = false;
1236     cppi41_run_queue(cdd);
1237     spin_unlock_irqrestore(&cdd->lock, flags);
1238 
1239     return 0;
1240 }
1241 
1242 static const struct dev_pm_ops cppi41_pm_ops = {
1243     SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
1244     SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
1245                cppi41_runtime_resume,
1246                NULL)
1247 };
1248 
1249 static struct platform_driver cpp41_dma_driver = {
1250     .probe  = cppi41_dma_probe,
1251     .remove = cppi41_dma_remove,
1252     .driver = {
1253         .name = "cppi41-dma-engine",
1254         .pm = &cppi41_pm_ops,
1255         .of_match_table = of_match_ptr(cppi41_dma_ids),
1256     },
1257 };
1258 
1259 module_platform_driver(cpp41_dma_driver);
1260 MODULE_LICENSE("GPL");
1261 MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");