Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2013 - 2015 Linaro Ltd.
0004  * Copyright (c) 2013 HiSilicon Limited.
0005  */
0006 #include <linux/sched.h>
0007 #include <linux/device.h>
0008 #include <linux/dma-mapping.h>
0009 #include <linux/dmapool.h>
0010 #include <linux/dmaengine.h>
0011 #include <linux/init.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/kernel.h>
0014 #include <linux/module.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/slab.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/of_device.h>
0019 #include <linux/of.h>
0020 #include <linux/clk.h>
0021 #include <linux/of_dma.h>
0022 
0023 #include "virt-dma.h"
0024 
0025 #define DRIVER_NAME     "k3-dma"
0026 #define DMA_MAX_SIZE        0x1ffc
0027 #define DMA_CYCLIC_MAX_PERIOD   0x1000
0028 #define LLI_BLOCK_SIZE      (4 * PAGE_SIZE)
0029 
0030 #define INT_STAT        0x00
0031 #define INT_TC1         0x04
0032 #define INT_TC2         0x08
0033 #define INT_ERR1        0x0c
0034 #define INT_ERR2        0x10
0035 #define INT_TC1_MASK        0x18
0036 #define INT_TC2_MASK        0x1c
0037 #define INT_ERR1_MASK       0x20
0038 #define INT_ERR2_MASK       0x24
0039 #define INT_TC1_RAW     0x600
0040 #define INT_TC2_RAW     0x608
0041 #define INT_ERR1_RAW        0x610
0042 #define INT_ERR2_RAW        0x618
0043 #define CH_PRI          0x688
0044 #define CH_STAT         0x690
0045 #define CX_CUR_CNT      0x704
0046 #define CX_LLI          0x800
0047 #define CX_CNT1         0x80c
0048 #define CX_CNT0         0x810
0049 #define CX_SRC          0x814
0050 #define CX_DST          0x818
0051 #define CX_CFG          0x81c
0052 
0053 #define CX_LLI_CHAIN_EN     0x2
0054 #define CX_CFG_EN       0x1
0055 #define CX_CFG_NODEIRQ      BIT(1)
0056 #define CX_CFG_MEM2PER      (0x1 << 2)
0057 #define CX_CFG_PER2MEM      (0x2 << 2)
0058 #define CX_CFG_SRCINCR      (0x1 << 31)
0059 #define CX_CFG_DSTINCR      (0x1 << 30)
0060 
0061 struct k3_desc_hw {
0062     u32 lli;
0063     u32 reserved[3];
0064     u32 count;
0065     u32 saddr;
0066     u32 daddr;
0067     u32 config;
0068 } __aligned(32);
0069 
0070 struct k3_dma_desc_sw {
0071     struct virt_dma_desc    vd;
0072     dma_addr_t      desc_hw_lli;
0073     size_t          desc_num;
0074     size_t          size;
0075     struct k3_desc_hw   *desc_hw;
0076 };
0077 
0078 struct k3_dma_phy;
0079 
0080 struct k3_dma_chan {
0081     u32         ccfg;
0082     struct virt_dma_chan    vc;
0083     struct k3_dma_phy   *phy;
0084     struct list_head    node;
0085     dma_addr_t      dev_addr;
0086     enum dma_status     status;
0087     bool            cyclic;
0088     struct dma_slave_config slave_config;
0089 };
0090 
0091 struct k3_dma_phy {
0092     u32         idx;
0093     void __iomem        *base;
0094     struct k3_dma_chan  *vchan;
0095     struct k3_dma_desc_sw   *ds_run;
0096     struct k3_dma_desc_sw   *ds_done;
0097 };
0098 
0099 struct k3_dma_dev {
0100     struct dma_device   slave;
0101     void __iomem        *base;
0102     struct tasklet_struct   task;
0103     spinlock_t      lock;
0104     struct list_head    chan_pending;
0105     struct k3_dma_phy   *phy;
0106     struct k3_dma_chan  *chans;
0107     struct clk      *clk;
0108     struct dma_pool     *pool;
0109     u32         dma_channels;
0110     u32         dma_requests;
0111     u32         dma_channel_mask;
0112     unsigned int        irq;
0113 };
0114 
0115 
0116 #define K3_FLAG_NOCLK   BIT(1)
0117 
0118 struct k3dma_soc_data {
0119     unsigned long flags;
0120 };
0121 
0122 
0123 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
0124 
0125 static int k3_dma_config_write(struct dma_chan *chan,
0126                    enum dma_transfer_direction dir,
0127                    struct dma_slave_config *cfg);
0128 
0129 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
0130 {
0131     return container_of(chan, struct k3_dma_chan, vc.chan);
0132 }
0133 
0134 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
0135 {
0136     u32 val = 0;
0137 
0138     if (on) {
0139         val = readl_relaxed(phy->base + CX_CFG);
0140         val |= CX_CFG_EN;
0141         writel_relaxed(val, phy->base + CX_CFG);
0142     } else {
0143         val = readl_relaxed(phy->base + CX_CFG);
0144         val &= ~CX_CFG_EN;
0145         writel_relaxed(val, phy->base + CX_CFG);
0146     }
0147 }
0148 
0149 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
0150 {
0151     u32 val = 0;
0152 
0153     k3_dma_pause_dma(phy, false);
0154 
0155     val = 0x1 << phy->idx;
0156     writel_relaxed(val, d->base + INT_TC1_RAW);
0157     writel_relaxed(val, d->base + INT_TC2_RAW);
0158     writel_relaxed(val, d->base + INT_ERR1_RAW);
0159     writel_relaxed(val, d->base + INT_ERR2_RAW);
0160 }
0161 
0162 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
0163 {
0164     writel_relaxed(hw->lli, phy->base + CX_LLI);
0165     writel_relaxed(hw->count, phy->base + CX_CNT0);
0166     writel_relaxed(hw->saddr, phy->base + CX_SRC);
0167     writel_relaxed(hw->daddr, phy->base + CX_DST);
0168     writel_relaxed(hw->config, phy->base + CX_CFG);
0169 }
0170 
0171 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
0172 {
0173     u32 cnt = 0;
0174 
0175     cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
0176     cnt &= 0xffff;
0177     return cnt;
0178 }
0179 
0180 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
0181 {
0182     return readl_relaxed(phy->base + CX_LLI);
0183 }
0184 
0185 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
0186 {
0187     return readl_relaxed(d->base + CH_STAT);
0188 }
0189 
0190 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
0191 {
0192     if (on) {
0193         /* set same priority */
0194         writel_relaxed(0x0, d->base + CH_PRI);
0195 
0196         /* unmask irq */
0197         writel_relaxed(0xffff, d->base + INT_TC1_MASK);
0198         writel_relaxed(0xffff, d->base + INT_TC2_MASK);
0199         writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
0200         writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
0201     } else {
0202         /* mask irq */
0203         writel_relaxed(0x0, d->base + INT_TC1_MASK);
0204         writel_relaxed(0x0, d->base + INT_TC2_MASK);
0205         writel_relaxed(0x0, d->base + INT_ERR1_MASK);
0206         writel_relaxed(0x0, d->base + INT_ERR2_MASK);
0207     }
0208 }
0209 
0210 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
0211 {
0212     struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
0213     struct k3_dma_phy *p;
0214     struct k3_dma_chan *c;
0215     u32 stat = readl_relaxed(d->base + INT_STAT);
0216     u32 tc1  = readl_relaxed(d->base + INT_TC1);
0217     u32 tc2  = readl_relaxed(d->base + INT_TC2);
0218     u32 err1 = readl_relaxed(d->base + INT_ERR1);
0219     u32 err2 = readl_relaxed(d->base + INT_ERR2);
0220     u32 i, irq_chan = 0;
0221 
0222     while (stat) {
0223         i = __ffs(stat);
0224         stat &= ~BIT(i);
0225         if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
0226 
0227             p = &d->phy[i];
0228             c = p->vchan;
0229             if (c && (tc1 & BIT(i))) {
0230                 spin_lock(&c->vc.lock);
0231                 if (p->ds_run != NULL) {
0232                     vchan_cookie_complete(&p->ds_run->vd);
0233                     p->ds_done = p->ds_run;
0234                     p->ds_run = NULL;
0235                 }
0236                 spin_unlock(&c->vc.lock);
0237             }
0238             if (c && (tc2 & BIT(i))) {
0239                 spin_lock(&c->vc.lock);
0240                 if (p->ds_run != NULL)
0241                     vchan_cyclic_callback(&p->ds_run->vd);
0242                 spin_unlock(&c->vc.lock);
0243             }
0244             irq_chan |= BIT(i);
0245         }
0246         if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
0247             dev_warn(d->slave.dev, "DMA ERR\n");
0248     }
0249 
0250     writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
0251     writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
0252     writel_relaxed(err1, d->base + INT_ERR1_RAW);
0253     writel_relaxed(err2, d->base + INT_ERR2_RAW);
0254 
0255     if (irq_chan)
0256         tasklet_schedule(&d->task);
0257 
0258     if (irq_chan || err1 || err2)
0259         return IRQ_HANDLED;
0260 
0261     return IRQ_NONE;
0262 }
0263 
0264 static int k3_dma_start_txd(struct k3_dma_chan *c)
0265 {
0266     struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
0267     struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
0268 
0269     if (!c->phy)
0270         return -EAGAIN;
0271 
0272     if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
0273         return -EAGAIN;
0274 
0275     /* Avoid losing track of  ds_run if a transaction is in flight */
0276     if (c->phy->ds_run)
0277         return -EAGAIN;
0278 
0279     if (vd) {
0280         struct k3_dma_desc_sw *ds =
0281             container_of(vd, struct k3_dma_desc_sw, vd);
0282         /*
0283          * fetch and remove request from vc->desc_issued
0284          * so vc->desc_issued only contains desc pending
0285          */
0286         list_del(&ds->vd.node);
0287 
0288         c->phy->ds_run = ds;
0289         c->phy->ds_done = NULL;
0290         /* start dma */
0291         k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
0292         return 0;
0293     }
0294     c->phy->ds_run = NULL;
0295     c->phy->ds_done = NULL;
0296     return -EAGAIN;
0297 }
0298 
0299 static void k3_dma_tasklet(struct tasklet_struct *t)
0300 {
0301     struct k3_dma_dev *d = from_tasklet(d, t, task);
0302     struct k3_dma_phy *p;
0303     struct k3_dma_chan *c, *cn;
0304     unsigned pch, pch_alloc = 0;
0305 
0306     /* check new dma request of running channel in vc->desc_issued */
0307     list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
0308         spin_lock_irq(&c->vc.lock);
0309         p = c->phy;
0310         if (p && p->ds_done) {
0311             if (k3_dma_start_txd(c)) {
0312                 /* No current txd associated with this channel */
0313                 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
0314                 /* Mark this channel free */
0315                 c->phy = NULL;
0316                 p->vchan = NULL;
0317             }
0318         }
0319         spin_unlock_irq(&c->vc.lock);
0320     }
0321 
0322     /* check new channel request in d->chan_pending */
0323     spin_lock_irq(&d->lock);
0324     for (pch = 0; pch < d->dma_channels; pch++) {
0325         if (!(d->dma_channel_mask & (1 << pch)))
0326             continue;
0327 
0328         p = &d->phy[pch];
0329 
0330         if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
0331             c = list_first_entry(&d->chan_pending,
0332                 struct k3_dma_chan, node);
0333             /* remove from d->chan_pending */
0334             list_del_init(&c->node);
0335             pch_alloc |= 1 << pch;
0336             /* Mark this channel allocated */
0337             p->vchan = c;
0338             c->phy = p;
0339             dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
0340         }
0341     }
0342     spin_unlock_irq(&d->lock);
0343 
0344     for (pch = 0; pch < d->dma_channels; pch++) {
0345         if (!(d->dma_channel_mask & (1 << pch)))
0346             continue;
0347 
0348         if (pch_alloc & (1 << pch)) {
0349             p = &d->phy[pch];
0350             c = p->vchan;
0351             if (c) {
0352                 spin_lock_irq(&c->vc.lock);
0353                 k3_dma_start_txd(c);
0354                 spin_unlock_irq(&c->vc.lock);
0355             }
0356         }
0357     }
0358 }
0359 
0360 static void k3_dma_free_chan_resources(struct dma_chan *chan)
0361 {
0362     struct k3_dma_chan *c = to_k3_chan(chan);
0363     struct k3_dma_dev *d = to_k3_dma(chan->device);
0364     unsigned long flags;
0365 
0366     spin_lock_irqsave(&d->lock, flags);
0367     list_del_init(&c->node);
0368     spin_unlock_irqrestore(&d->lock, flags);
0369 
0370     vchan_free_chan_resources(&c->vc);
0371     c->ccfg = 0;
0372 }
0373 
0374 static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
0375     dma_cookie_t cookie, struct dma_tx_state *state)
0376 {
0377     struct k3_dma_chan *c = to_k3_chan(chan);
0378     struct k3_dma_dev *d = to_k3_dma(chan->device);
0379     struct k3_dma_phy *p;
0380     struct virt_dma_desc *vd;
0381     unsigned long flags;
0382     enum dma_status ret;
0383     size_t bytes = 0;
0384 
0385     ret = dma_cookie_status(&c->vc.chan, cookie, state);
0386     if (ret == DMA_COMPLETE)
0387         return ret;
0388 
0389     spin_lock_irqsave(&c->vc.lock, flags);
0390     p = c->phy;
0391     ret = c->status;
0392 
0393     /*
0394      * If the cookie is on our issue queue, then the residue is
0395      * its total size.
0396      */
0397     vd = vchan_find_desc(&c->vc, cookie);
0398     if (vd && !c->cyclic) {
0399         bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
0400     } else if ((!p) || (!p->ds_run)) {
0401         bytes = 0;
0402     } else {
0403         struct k3_dma_desc_sw *ds = p->ds_run;
0404         u32 clli = 0, index = 0;
0405 
0406         bytes = k3_dma_get_curr_cnt(d, p);
0407         clli = k3_dma_get_curr_lli(p);
0408         index = ((clli - ds->desc_hw_lli) /
0409                 sizeof(struct k3_desc_hw)) + 1;
0410         for (; index < ds->desc_num; index++) {
0411             bytes += ds->desc_hw[index].count;
0412             /* end of lli */
0413             if (!ds->desc_hw[index].lli)
0414                 break;
0415         }
0416     }
0417     spin_unlock_irqrestore(&c->vc.lock, flags);
0418     dma_set_residue(state, bytes);
0419     return ret;
0420 }
0421 
0422 static void k3_dma_issue_pending(struct dma_chan *chan)
0423 {
0424     struct k3_dma_chan *c = to_k3_chan(chan);
0425     struct k3_dma_dev *d = to_k3_dma(chan->device);
0426     unsigned long flags;
0427 
0428     spin_lock_irqsave(&c->vc.lock, flags);
0429     /* add request to vc->desc_issued */
0430     if (vchan_issue_pending(&c->vc)) {
0431         spin_lock(&d->lock);
0432         if (!c->phy) {
0433             if (list_empty(&c->node)) {
0434                 /* if new channel, add chan_pending */
0435                 list_add_tail(&c->node, &d->chan_pending);
0436                 /* check in tasklet */
0437                 tasklet_schedule(&d->task);
0438                 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
0439             }
0440         }
0441         spin_unlock(&d->lock);
0442     } else
0443         dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
0444     spin_unlock_irqrestore(&c->vc.lock, flags);
0445 }
0446 
0447 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
0448             dma_addr_t src, size_t len, u32 num, u32 ccfg)
0449 {
0450     if (num != ds->desc_num - 1)
0451         ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
0452             sizeof(struct k3_desc_hw);
0453 
0454     ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
0455     ds->desc_hw[num].count = len;
0456     ds->desc_hw[num].saddr = src;
0457     ds->desc_hw[num].daddr = dst;
0458     ds->desc_hw[num].config = ccfg;
0459 }
0460 
0461 static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
0462                             struct dma_chan *chan)
0463 {
0464     struct k3_dma_chan *c = to_k3_chan(chan);
0465     struct k3_dma_desc_sw *ds;
0466     struct k3_dma_dev *d = to_k3_dma(chan->device);
0467     int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
0468 
0469     if (num > lli_limit) {
0470         dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
0471             &c->vc, num, lli_limit);
0472         return NULL;
0473     }
0474 
0475     ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
0476     if (!ds)
0477         return NULL;
0478 
0479     ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
0480     if (!ds->desc_hw) {
0481         dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
0482         kfree(ds);
0483         return NULL;
0484     }
0485     ds->desc_num = num;
0486     return ds;
0487 }
0488 
0489 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
0490     struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
0491     size_t len, unsigned long flags)
0492 {
0493     struct k3_dma_chan *c = to_k3_chan(chan);
0494     struct k3_dma_desc_sw *ds;
0495     size_t copy = 0;
0496     int num = 0;
0497 
0498     if (!len)
0499         return NULL;
0500 
0501     num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
0502 
0503     ds = k3_dma_alloc_desc_resource(num, chan);
0504     if (!ds)
0505         return NULL;
0506 
0507     c->cyclic = 0;
0508     ds->size = len;
0509     num = 0;
0510 
0511     if (!c->ccfg) {
0512         /* default is memtomem, without calling device_config */
0513         c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
0514         c->ccfg |= (0xf << 20) | (0xf << 24);   /* burst = 16 */
0515         c->ccfg |= (0x3 << 12) | (0x3 << 16);   /* width = 64 bit */
0516     }
0517 
0518     do {
0519         copy = min_t(size_t, len, DMA_MAX_SIZE);
0520         k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
0521 
0522         src += copy;
0523         dst += copy;
0524         len -= copy;
0525     } while (len);
0526 
0527     ds->desc_hw[num-1].lli = 0; /* end of link */
0528     return vchan_tx_prep(&c->vc, &ds->vd, flags);
0529 }
0530 
0531 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
0532     struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
0533     enum dma_transfer_direction dir, unsigned long flags, void *context)
0534 {
0535     struct k3_dma_chan *c = to_k3_chan(chan);
0536     struct k3_dma_desc_sw *ds;
0537     size_t len, avail, total = 0;
0538     struct scatterlist *sg;
0539     dma_addr_t addr, src = 0, dst = 0;
0540     int num = sglen, i;
0541 
0542     if (sgl == NULL)
0543         return NULL;
0544 
0545     c->cyclic = 0;
0546 
0547     for_each_sg(sgl, sg, sglen, i) {
0548         avail = sg_dma_len(sg);
0549         if (avail > DMA_MAX_SIZE)
0550             num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
0551     }
0552 
0553     ds = k3_dma_alloc_desc_resource(num, chan);
0554     if (!ds)
0555         return NULL;
0556     num = 0;
0557     k3_dma_config_write(chan, dir, &c->slave_config);
0558 
0559     for_each_sg(sgl, sg, sglen, i) {
0560         addr = sg_dma_address(sg);
0561         avail = sg_dma_len(sg);
0562         total += avail;
0563 
0564         do {
0565             len = min_t(size_t, avail, DMA_MAX_SIZE);
0566 
0567             if (dir == DMA_MEM_TO_DEV) {
0568                 src = addr;
0569                 dst = c->dev_addr;
0570             } else if (dir == DMA_DEV_TO_MEM) {
0571                 src = c->dev_addr;
0572                 dst = addr;
0573             }
0574 
0575             k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
0576 
0577             addr += len;
0578             avail -= len;
0579         } while (avail);
0580     }
0581 
0582     ds->desc_hw[num-1].lli = 0; /* end of link */
0583     ds->size = total;
0584     return vchan_tx_prep(&c->vc, &ds->vd, flags);
0585 }
0586 
0587 static struct dma_async_tx_descriptor *
0588 k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
0589                size_t buf_len, size_t period_len,
0590                enum dma_transfer_direction dir,
0591                unsigned long flags)
0592 {
0593     struct k3_dma_chan *c = to_k3_chan(chan);
0594     struct k3_dma_desc_sw *ds;
0595     size_t len, avail, total = 0;
0596     dma_addr_t addr, src = 0, dst = 0;
0597     int num = 1, since = 0;
0598     size_t modulo = DMA_CYCLIC_MAX_PERIOD;
0599     u32 en_tc2 = 0;
0600 
0601     dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
0602            __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
0603            buf_len, period_len, (int)dir);
0604 
0605     avail = buf_len;
0606     if (avail > modulo)
0607         num += DIV_ROUND_UP(avail, modulo) - 1;
0608 
0609     ds = k3_dma_alloc_desc_resource(num, chan);
0610     if (!ds)
0611         return NULL;
0612 
0613     c->cyclic = 1;
0614     addr = buf_addr;
0615     avail = buf_len;
0616     total = avail;
0617     num = 0;
0618     k3_dma_config_write(chan, dir, &c->slave_config);
0619 
0620     if (period_len < modulo)
0621         modulo = period_len;
0622 
0623     do {
0624         len = min_t(size_t, avail, modulo);
0625 
0626         if (dir == DMA_MEM_TO_DEV) {
0627             src = addr;
0628             dst = c->dev_addr;
0629         } else if (dir == DMA_DEV_TO_MEM) {
0630             src = c->dev_addr;
0631             dst = addr;
0632         }
0633         since += len;
0634         if (since >= period_len) {
0635             /* descriptor asks for TC2 interrupt on completion */
0636             en_tc2 = CX_CFG_NODEIRQ;
0637             since -= period_len;
0638         } else
0639             en_tc2 = 0;
0640 
0641         k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
0642 
0643         addr += len;
0644         avail -= len;
0645     } while (avail);
0646 
0647     /* "Cyclic" == end of link points back to start of link */
0648     ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
0649 
0650     ds->size = total;
0651 
0652     return vchan_tx_prep(&c->vc, &ds->vd, flags);
0653 }
0654 
0655 static int k3_dma_config(struct dma_chan *chan,
0656              struct dma_slave_config *cfg)
0657 {
0658     struct k3_dma_chan *c = to_k3_chan(chan);
0659 
0660     memcpy(&c->slave_config, cfg, sizeof(*cfg));
0661 
0662     return 0;
0663 }
0664 
0665 static int k3_dma_config_write(struct dma_chan *chan,
0666                    enum dma_transfer_direction dir,
0667                    struct dma_slave_config *cfg)
0668 {
0669     struct k3_dma_chan *c = to_k3_chan(chan);
0670     u32 maxburst = 0, val = 0;
0671     enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
0672 
0673     if (dir == DMA_DEV_TO_MEM) {
0674         c->ccfg = CX_CFG_DSTINCR;
0675         c->dev_addr = cfg->src_addr;
0676         maxburst = cfg->src_maxburst;
0677         width = cfg->src_addr_width;
0678     } else if (dir == DMA_MEM_TO_DEV) {
0679         c->ccfg = CX_CFG_SRCINCR;
0680         c->dev_addr = cfg->dst_addr;
0681         maxburst = cfg->dst_maxburst;
0682         width = cfg->dst_addr_width;
0683     }
0684     switch (width) {
0685     case DMA_SLAVE_BUSWIDTH_1_BYTE:
0686     case DMA_SLAVE_BUSWIDTH_2_BYTES:
0687     case DMA_SLAVE_BUSWIDTH_4_BYTES:
0688     case DMA_SLAVE_BUSWIDTH_8_BYTES:
0689         val =  __ffs(width);
0690         break;
0691     default:
0692         val = 3;
0693         break;
0694     }
0695     c->ccfg |= (val << 12) | (val << 16);
0696 
0697     if ((maxburst == 0) || (maxburst > 16))
0698         val = 15;
0699     else
0700         val = maxburst - 1;
0701     c->ccfg |= (val << 20) | (val << 24);
0702     c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
0703 
0704     /* specific request line */
0705     c->ccfg |= c->vc.chan.chan_id << 4;
0706 
0707     return 0;
0708 }
0709 
0710 static void k3_dma_free_desc(struct virt_dma_desc *vd)
0711 {
0712     struct k3_dma_desc_sw *ds =
0713         container_of(vd, struct k3_dma_desc_sw, vd);
0714     struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
0715 
0716     dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
0717     kfree(ds);
0718 }
0719 
0720 static int k3_dma_terminate_all(struct dma_chan *chan)
0721 {
0722     struct k3_dma_chan *c = to_k3_chan(chan);
0723     struct k3_dma_dev *d = to_k3_dma(chan->device);
0724     struct k3_dma_phy *p = c->phy;
0725     unsigned long flags;
0726     LIST_HEAD(head);
0727 
0728     dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
0729 
0730     /* Prevent this channel being scheduled */
0731     spin_lock(&d->lock);
0732     list_del_init(&c->node);
0733     spin_unlock(&d->lock);
0734 
0735     /* Clear the tx descriptor lists */
0736     spin_lock_irqsave(&c->vc.lock, flags);
0737     vchan_get_all_descriptors(&c->vc, &head);
0738     if (p) {
0739         /* vchan is assigned to a pchan - stop the channel */
0740         k3_dma_terminate_chan(p, d);
0741         c->phy = NULL;
0742         p->vchan = NULL;
0743         if (p->ds_run) {
0744             vchan_terminate_vdesc(&p->ds_run->vd);
0745             p->ds_run = NULL;
0746         }
0747         p->ds_done = NULL;
0748     }
0749     spin_unlock_irqrestore(&c->vc.lock, flags);
0750     vchan_dma_desc_free_list(&c->vc, &head);
0751 
0752     return 0;
0753 }
0754 
0755 static void k3_dma_synchronize(struct dma_chan *chan)
0756 {
0757     struct k3_dma_chan *c = to_k3_chan(chan);
0758 
0759     vchan_synchronize(&c->vc);
0760 }
0761 
0762 static int k3_dma_transfer_pause(struct dma_chan *chan)
0763 {
0764     struct k3_dma_chan *c = to_k3_chan(chan);
0765     struct k3_dma_dev *d = to_k3_dma(chan->device);
0766     struct k3_dma_phy *p = c->phy;
0767 
0768     dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
0769     if (c->status == DMA_IN_PROGRESS) {
0770         c->status = DMA_PAUSED;
0771         if (p) {
0772             k3_dma_pause_dma(p, false);
0773         } else {
0774             spin_lock(&d->lock);
0775             list_del_init(&c->node);
0776             spin_unlock(&d->lock);
0777         }
0778     }
0779 
0780     return 0;
0781 }
0782 
0783 static int k3_dma_transfer_resume(struct dma_chan *chan)
0784 {
0785     struct k3_dma_chan *c = to_k3_chan(chan);
0786     struct k3_dma_dev *d = to_k3_dma(chan->device);
0787     struct k3_dma_phy *p = c->phy;
0788     unsigned long flags;
0789 
0790     dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
0791     spin_lock_irqsave(&c->vc.lock, flags);
0792     if (c->status == DMA_PAUSED) {
0793         c->status = DMA_IN_PROGRESS;
0794         if (p) {
0795             k3_dma_pause_dma(p, true);
0796         } else if (!list_empty(&c->vc.desc_issued)) {
0797             spin_lock(&d->lock);
0798             list_add_tail(&c->node, &d->chan_pending);
0799             spin_unlock(&d->lock);
0800         }
0801     }
0802     spin_unlock_irqrestore(&c->vc.lock, flags);
0803 
0804     return 0;
0805 }
0806 
0807 static const struct k3dma_soc_data k3_v1_dma_data = {
0808     .flags = 0,
0809 };
0810 
0811 static const struct k3dma_soc_data asp_v1_dma_data = {
0812     .flags = K3_FLAG_NOCLK,
0813 };
0814 
0815 static const struct of_device_id k3_pdma_dt_ids[] = {
0816     { .compatible = "hisilicon,k3-dma-1.0",
0817       .data = &k3_v1_dma_data
0818     },
0819     { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
0820       .data = &asp_v1_dma_data
0821     },
0822     {}
0823 };
0824 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
0825 
0826 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
0827                         struct of_dma *ofdma)
0828 {
0829     struct k3_dma_dev *d = ofdma->of_dma_data;
0830     unsigned int request = dma_spec->args[0];
0831 
0832     if (request >= d->dma_requests)
0833         return NULL;
0834 
0835     return dma_get_slave_channel(&(d->chans[request].vc.chan));
0836 }
0837 
0838 static int k3_dma_probe(struct platform_device *op)
0839 {
0840     const struct k3dma_soc_data *soc_data;
0841     struct k3_dma_dev *d;
0842     const struct of_device_id *of_id;
0843     int i, ret, irq = 0;
0844 
0845     d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
0846     if (!d)
0847         return -ENOMEM;
0848 
0849     soc_data = device_get_match_data(&op->dev);
0850     if (!soc_data)
0851         return -EINVAL;
0852 
0853     d->base = devm_platform_ioremap_resource(op, 0);
0854     if (IS_ERR(d->base))
0855         return PTR_ERR(d->base);
0856 
0857     of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
0858     if (of_id) {
0859         of_property_read_u32((&op->dev)->of_node,
0860                 "dma-channels", &d->dma_channels);
0861         of_property_read_u32((&op->dev)->of_node,
0862                 "dma-requests", &d->dma_requests);
0863         ret = of_property_read_u32((&op->dev)->of_node,
0864                 "dma-channel-mask", &d->dma_channel_mask);
0865         if (ret) {
0866             dev_warn(&op->dev,
0867                  "dma-channel-mask doesn't exist, considering all as available.\n");
0868             d->dma_channel_mask = (u32)~0UL;
0869         }
0870     }
0871 
0872     if (!(soc_data->flags & K3_FLAG_NOCLK)) {
0873         d->clk = devm_clk_get(&op->dev, NULL);
0874         if (IS_ERR(d->clk)) {
0875             dev_err(&op->dev, "no dma clk\n");
0876             return PTR_ERR(d->clk);
0877         }
0878     }
0879 
0880     irq = platform_get_irq(op, 0);
0881     ret = devm_request_irq(&op->dev, irq,
0882             k3_dma_int_handler, 0, DRIVER_NAME, d);
0883     if (ret)
0884         return ret;
0885 
0886     d->irq = irq;
0887 
0888     /* A DMA memory pool for LLIs, align on 32-byte boundary */
0889     d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
0890                     LLI_BLOCK_SIZE, 32, 0);
0891     if (!d->pool)
0892         return -ENOMEM;
0893 
0894     /* init phy channel */
0895     d->phy = devm_kcalloc(&op->dev,
0896         d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL);
0897     if (d->phy == NULL)
0898         return -ENOMEM;
0899 
0900     for (i = 0; i < d->dma_channels; i++) {
0901         struct k3_dma_phy *p;
0902 
0903         if (!(d->dma_channel_mask & BIT(i)))
0904             continue;
0905 
0906         p = &d->phy[i];
0907         p->idx = i;
0908         p->base = d->base + i * 0x40;
0909     }
0910 
0911     INIT_LIST_HEAD(&d->slave.channels);
0912     dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
0913     dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
0914     dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
0915     d->slave.dev = &op->dev;
0916     d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
0917     d->slave.device_tx_status = k3_dma_tx_status;
0918     d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
0919     d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
0920     d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
0921     d->slave.device_issue_pending = k3_dma_issue_pending;
0922     d->slave.device_config = k3_dma_config;
0923     d->slave.device_pause = k3_dma_transfer_pause;
0924     d->slave.device_resume = k3_dma_transfer_resume;
0925     d->slave.device_terminate_all = k3_dma_terminate_all;
0926     d->slave.device_synchronize = k3_dma_synchronize;
0927     d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
0928 
0929     /* init virtual channel */
0930     d->chans = devm_kcalloc(&op->dev,
0931         d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL);
0932     if (d->chans == NULL)
0933         return -ENOMEM;
0934 
0935     for (i = 0; i < d->dma_requests; i++) {
0936         struct k3_dma_chan *c = &d->chans[i];
0937 
0938         c->status = DMA_IN_PROGRESS;
0939         INIT_LIST_HEAD(&c->node);
0940         c->vc.desc_free = k3_dma_free_desc;
0941         vchan_init(&c->vc, &d->slave);
0942     }
0943 
0944     /* Enable clock before accessing registers */
0945     ret = clk_prepare_enable(d->clk);
0946     if (ret < 0) {
0947         dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
0948         return ret;
0949     }
0950 
0951     k3_dma_enable_dma(d, true);
0952 
0953     ret = dma_async_device_register(&d->slave);
0954     if (ret)
0955         goto dma_async_register_fail;
0956 
0957     ret = of_dma_controller_register((&op->dev)->of_node,
0958                     k3_of_dma_simple_xlate, d);
0959     if (ret)
0960         goto of_dma_register_fail;
0961 
0962     spin_lock_init(&d->lock);
0963     INIT_LIST_HEAD(&d->chan_pending);
0964     tasklet_setup(&d->task, k3_dma_tasklet);
0965     platform_set_drvdata(op, d);
0966     dev_info(&op->dev, "initialized\n");
0967 
0968     return 0;
0969 
0970 of_dma_register_fail:
0971     dma_async_device_unregister(&d->slave);
0972 dma_async_register_fail:
0973     clk_disable_unprepare(d->clk);
0974     return ret;
0975 }
0976 
0977 static int k3_dma_remove(struct platform_device *op)
0978 {
0979     struct k3_dma_chan *c, *cn;
0980     struct k3_dma_dev *d = platform_get_drvdata(op);
0981 
0982     dma_async_device_unregister(&d->slave);
0983     of_dma_controller_free((&op->dev)->of_node);
0984 
0985     devm_free_irq(&op->dev, d->irq, d);
0986 
0987     list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
0988         list_del(&c->vc.chan.device_node);
0989         tasklet_kill(&c->vc.task);
0990     }
0991     tasklet_kill(&d->task);
0992     clk_disable_unprepare(d->clk);
0993     return 0;
0994 }
0995 
0996 #ifdef CONFIG_PM_SLEEP
0997 static int k3_dma_suspend_dev(struct device *dev)
0998 {
0999     struct k3_dma_dev *d = dev_get_drvdata(dev);
1000     u32 stat = 0;
1001 
1002     stat = k3_dma_get_chan_stat(d);
1003     if (stat) {
1004         dev_warn(d->slave.dev,
1005             "chan %d is running fail to suspend\n", stat);
1006         return -1;
1007     }
1008     k3_dma_enable_dma(d, false);
1009     clk_disable_unprepare(d->clk);
1010     return 0;
1011 }
1012 
1013 static int k3_dma_resume_dev(struct device *dev)
1014 {
1015     struct k3_dma_dev *d = dev_get_drvdata(dev);
1016     int ret = 0;
1017 
1018     ret = clk_prepare_enable(d->clk);
1019     if (ret < 0) {
1020         dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
1021         return ret;
1022     }
1023     k3_dma_enable_dma(d, true);
1024     return 0;
1025 }
1026 #endif
1027 
1028 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
1029 
1030 static struct platform_driver k3_pdma_driver = {
1031     .driver     = {
1032         .name   = DRIVER_NAME,
1033         .pm = &k3_dma_pmops,
1034         .of_match_table = k3_pdma_dt_ids,
1035     },
1036     .probe      = k3_dma_probe,
1037     .remove     = k3_dma_remove,
1038 };
1039 
1040 module_platform_driver(k3_pdma_driver);
1041 
1042 MODULE_DESCRIPTION("HiSilicon k3 DMA Driver");
1043 MODULE_ALIAS("platform:k3dma");
1044 MODULE_LICENSE("GPL v2");