Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * SA11x0 DMAengine support
0004  *
0005  * Copyright (C) 2012 Russell King
0006  *   Derived in part from arch/arm/mach-sa1100/dma.c,
0007  *   Copyright (C) 2000, 2001 by Nicolas Pitre
0008  */
0009 #include <linux/sched.h>
0010 #include <linux/device.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/init.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/slab.h>
0018 #include <linux/spinlock.h>
0019 
0020 #include "virt-dma.h"
0021 
0022 #define NR_PHY_CHAN 6
0023 #define DMA_ALIGN   3
0024 #define DMA_MAX_SIZE    0x1fff
0025 #define DMA_CHUNK_SIZE  0x1000
0026 
0027 #define DMA_DDAR    0x00
0028 #define DMA_DCSR_S  0x04
0029 #define DMA_DCSR_C  0x08
0030 #define DMA_DCSR_R  0x0c
0031 #define DMA_DBSA    0x10
0032 #define DMA_DBTA    0x14
0033 #define DMA_DBSB    0x18
0034 #define DMA_DBTB    0x1c
0035 #define DMA_SIZE    0x20
0036 
0037 #define DCSR_RUN    (1 << 0)
0038 #define DCSR_IE     (1 << 1)
0039 #define DCSR_ERROR  (1 << 2)
0040 #define DCSR_DONEA  (1 << 3)
0041 #define DCSR_STRTA  (1 << 4)
0042 #define DCSR_DONEB  (1 << 5)
0043 #define DCSR_STRTB  (1 << 6)
0044 #define DCSR_BIU    (1 << 7)
0045 
0046 #define DDAR_RW     (1 << 0)    /* 0 = W, 1 = R */
0047 #define DDAR_E      (1 << 1)    /* 0 = LE, 1 = BE */
0048 #define DDAR_BS     (1 << 2)    /* 0 = BS4, 1 = BS8 */
0049 #define DDAR_DW     (1 << 3)    /* 0 = 8b, 1 = 16b */
0050 #define DDAR_Ser0UDCTr  (0x0 << 4)
0051 #define DDAR_Ser0UDCRc  (0x1 << 4)
0052 #define DDAR_Ser1SDLCTr (0x2 << 4)
0053 #define DDAR_Ser1SDLCRc (0x3 << 4)
0054 #define DDAR_Ser1UARTTr (0x4 << 4)
0055 #define DDAR_Ser1UARTRc (0x5 << 4)
0056 #define DDAR_Ser2ICPTr  (0x6 << 4)
0057 #define DDAR_Ser2ICPRc  (0x7 << 4)
0058 #define DDAR_Ser3UARTTr (0x8 << 4)
0059 #define DDAR_Ser3UARTRc (0x9 << 4)
0060 #define DDAR_Ser4MCP0Tr (0xa << 4)
0061 #define DDAR_Ser4MCP0Rc (0xb << 4)
0062 #define DDAR_Ser4MCP1Tr (0xc << 4)
0063 #define DDAR_Ser4MCP1Rc (0xd << 4)
0064 #define DDAR_Ser4SSPTr  (0xe << 4)
0065 #define DDAR_Ser4SSPRc  (0xf << 4)
0066 
0067 struct sa11x0_dma_sg {
0068     u32         addr;
0069     u32         len;
0070 };
0071 
0072 struct sa11x0_dma_desc {
0073     struct virt_dma_desc    vd;
0074 
0075     u32         ddar;
0076     size_t          size;
0077     unsigned        period;
0078     bool            cyclic;
0079 
0080     unsigned        sglen;
0081     struct sa11x0_dma_sg    sg[];
0082 };
0083 
0084 struct sa11x0_dma_phy;
0085 
0086 struct sa11x0_dma_chan {
0087     struct virt_dma_chan    vc;
0088 
0089     /* protected by c->vc.lock */
0090     struct sa11x0_dma_phy   *phy;
0091     enum dma_status     status;
0092 
0093     /* protected by d->lock */
0094     struct list_head    node;
0095 
0096     u32         ddar;
0097     const char      *name;
0098 };
0099 
0100 struct sa11x0_dma_phy {
0101     void __iomem        *base;
0102     struct sa11x0_dma_dev   *dev;
0103     unsigned        num;
0104 
0105     struct sa11x0_dma_chan  *vchan;
0106 
0107     /* Protected by c->vc.lock */
0108     unsigned        sg_load;
0109     struct sa11x0_dma_desc  *txd_load;
0110     unsigned        sg_done;
0111     struct sa11x0_dma_desc  *txd_done;
0112     u32         dbs[2];
0113     u32         dbt[2];
0114     u32         dcsr;
0115 };
0116 
0117 struct sa11x0_dma_dev {
0118     struct dma_device   slave;
0119     void __iomem        *base;
0120     spinlock_t      lock;
0121     struct tasklet_struct   task;
0122     struct list_head    chan_pending;
0123     struct sa11x0_dma_phy   phy[NR_PHY_CHAN];
0124 };
0125 
0126 static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
0127 {
0128     return container_of(chan, struct sa11x0_dma_chan, vc.chan);
0129 }
0130 
0131 static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
0132 {
0133     return container_of(dmadev, struct sa11x0_dma_dev, slave);
0134 }
0135 
0136 static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
0137 {
0138     struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
0139 
0140     return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
0141 }
0142 
0143 static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
0144 {
0145     kfree(container_of(vd, struct sa11x0_dma_desc, vd));
0146 }
0147 
0148 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
0149 {
0150     list_del(&txd->vd.node);
0151     p->txd_load = txd;
0152     p->sg_load = 0;
0153 
0154     dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
0155         p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
0156 }
0157 
0158 static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
0159     struct sa11x0_dma_chan *c)
0160 {
0161     struct sa11x0_dma_desc *txd = p->txd_load;
0162     struct sa11x0_dma_sg *sg;
0163     void __iomem *base = p->base;
0164     unsigned dbsx, dbtx;
0165     u32 dcsr;
0166 
0167     if (!txd)
0168         return;
0169 
0170     dcsr = readl_relaxed(base + DMA_DCSR_R);
0171 
0172     /* Don't try to load the next transfer if both buffers are started */
0173     if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
0174         return;
0175 
0176     if (p->sg_load == txd->sglen) {
0177         if (!txd->cyclic) {
0178             struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
0179 
0180             /*
0181              * We have reached the end of the current descriptor.
0182              * Peek at the next descriptor, and if compatible with
0183              * the current, start processing it.
0184              */
0185             if (txn && txn->ddar == txd->ddar) {
0186                 txd = txn;
0187                 sa11x0_dma_start_desc(p, txn);
0188             } else {
0189                 p->txd_load = NULL;
0190                 return;
0191             }
0192         } else {
0193             /* Cyclic: reset back to beginning */
0194             p->sg_load = 0;
0195         }
0196     }
0197 
0198     sg = &txd->sg[p->sg_load++];
0199 
0200     /* Select buffer to load according to channel status */
0201     if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
0202         ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
0203         dbsx = DMA_DBSA;
0204         dbtx = DMA_DBTA;
0205         dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
0206     } else {
0207         dbsx = DMA_DBSB;
0208         dbtx = DMA_DBTB;
0209         dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
0210     }
0211 
0212     writel_relaxed(sg->addr, base + dbsx);
0213     writel_relaxed(sg->len, base + dbtx);
0214     writel(dcsr, base + DMA_DCSR_S);
0215 
0216     dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
0217         p->num, dcsr,
0218         'A' + (dbsx == DMA_DBSB), sg->addr,
0219         'A' + (dbtx == DMA_DBTB), sg->len);
0220 }
0221 
0222 static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
0223     struct sa11x0_dma_chan *c)
0224 {
0225     struct sa11x0_dma_desc *txd = p->txd_done;
0226 
0227     if (++p->sg_done == txd->sglen) {
0228         if (!txd->cyclic) {
0229             vchan_cookie_complete(&txd->vd);
0230 
0231             p->sg_done = 0;
0232             p->txd_done = p->txd_load;
0233 
0234             if (!p->txd_done)
0235                 tasklet_schedule(&p->dev->task);
0236         } else {
0237             if ((p->sg_done % txd->period) == 0)
0238                 vchan_cyclic_callback(&txd->vd);
0239 
0240             /* Cyclic: reset back to beginning */
0241             p->sg_done = 0;
0242         }
0243     }
0244 
0245     sa11x0_dma_start_sg(p, c);
0246 }
0247 
0248 static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
0249 {
0250     struct sa11x0_dma_phy *p = dev_id;
0251     struct sa11x0_dma_dev *d = p->dev;
0252     struct sa11x0_dma_chan *c;
0253     u32 dcsr;
0254 
0255     dcsr = readl_relaxed(p->base + DMA_DCSR_R);
0256     if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
0257         return IRQ_NONE;
0258 
0259     /* Clear reported status bits */
0260     writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
0261         p->base + DMA_DCSR_C);
0262 
0263     dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
0264 
0265     if (dcsr & DCSR_ERROR) {
0266         dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
0267             p->num, dcsr,
0268             readl_relaxed(p->base + DMA_DDAR),
0269             readl_relaxed(p->base + DMA_DBSA),
0270             readl_relaxed(p->base + DMA_DBTA),
0271             readl_relaxed(p->base + DMA_DBSB),
0272             readl_relaxed(p->base + DMA_DBTB));
0273     }
0274 
0275     c = p->vchan;
0276     if (c) {
0277         unsigned long flags;
0278 
0279         spin_lock_irqsave(&c->vc.lock, flags);
0280         /*
0281          * Now that we're holding the lock, check that the vchan
0282          * really is associated with this pchan before touching the
0283          * hardware.  This should always succeed, because we won't
0284          * change p->vchan or c->phy while the channel is actively
0285          * transferring.
0286          */
0287         if (c->phy == p) {
0288             if (dcsr & DCSR_DONEA)
0289                 sa11x0_dma_complete(p, c);
0290             if (dcsr & DCSR_DONEB)
0291                 sa11x0_dma_complete(p, c);
0292         }
0293         spin_unlock_irqrestore(&c->vc.lock, flags);
0294     }
0295 
0296     return IRQ_HANDLED;
0297 }
0298 
0299 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
0300 {
0301     struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
0302 
0303     /* If the issued list is empty, we have no further txds to process */
0304     if (txd) {
0305         struct sa11x0_dma_phy *p = c->phy;
0306 
0307         sa11x0_dma_start_desc(p, txd);
0308         p->txd_done = txd;
0309         p->sg_done = 0;
0310 
0311         /* The channel should not have any transfers started */
0312         WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
0313                       (DCSR_STRTA | DCSR_STRTB));
0314 
0315         /* Clear the run and start bits before changing DDAR */
0316         writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
0317                    p->base + DMA_DCSR_C);
0318         writel_relaxed(txd->ddar, p->base + DMA_DDAR);
0319 
0320         /* Try to start both buffers */
0321         sa11x0_dma_start_sg(p, c);
0322         sa11x0_dma_start_sg(p, c);
0323     }
0324 }
0325 
0326 static void sa11x0_dma_tasklet(struct tasklet_struct *t)
0327 {
0328     struct sa11x0_dma_dev *d = from_tasklet(d, t, task);
0329     struct sa11x0_dma_phy *p;
0330     struct sa11x0_dma_chan *c;
0331     unsigned pch, pch_alloc = 0;
0332 
0333     dev_dbg(d->slave.dev, "tasklet enter\n");
0334 
0335     list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
0336         spin_lock_irq(&c->vc.lock);
0337         p = c->phy;
0338         if (p && !p->txd_done) {
0339             sa11x0_dma_start_txd(c);
0340             if (!p->txd_done) {
0341                 /* No current txd associated with this channel */
0342                 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
0343 
0344                 /* Mark this channel free */
0345                 c->phy = NULL;
0346                 p->vchan = NULL;
0347             }
0348         }
0349         spin_unlock_irq(&c->vc.lock);
0350     }
0351 
0352     spin_lock_irq(&d->lock);
0353     for (pch = 0; pch < NR_PHY_CHAN; pch++) {
0354         p = &d->phy[pch];
0355 
0356         if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
0357             c = list_first_entry(&d->chan_pending,
0358                 struct sa11x0_dma_chan, node);
0359             list_del_init(&c->node);
0360 
0361             pch_alloc |= 1 << pch;
0362 
0363             /* Mark this channel allocated */
0364             p->vchan = c;
0365 
0366             dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
0367         }
0368     }
0369     spin_unlock_irq(&d->lock);
0370 
0371     for (pch = 0; pch < NR_PHY_CHAN; pch++) {
0372         if (pch_alloc & (1 << pch)) {
0373             p = &d->phy[pch];
0374             c = p->vchan;
0375 
0376             spin_lock_irq(&c->vc.lock);
0377             c->phy = p;
0378 
0379             sa11x0_dma_start_txd(c);
0380             spin_unlock_irq(&c->vc.lock);
0381         }
0382     }
0383 
0384     dev_dbg(d->slave.dev, "tasklet exit\n");
0385 }
0386 
0387 
0388 static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
0389 {
0390     struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
0391     struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
0392     unsigned long flags;
0393 
0394     spin_lock_irqsave(&d->lock, flags);
0395     list_del_init(&c->node);
0396     spin_unlock_irqrestore(&d->lock, flags);
0397 
0398     vchan_free_chan_resources(&c->vc);
0399 }
0400 
0401 static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
0402 {
0403     unsigned reg;
0404     u32 dcsr;
0405 
0406     dcsr = readl_relaxed(p->base + DMA_DCSR_R);
0407 
0408     if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
0409         (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
0410         reg = DMA_DBSA;
0411     else
0412         reg = DMA_DBSB;
0413 
0414     return readl_relaxed(p->base + reg);
0415 }
0416 
0417 static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
0418     dma_cookie_t cookie, struct dma_tx_state *state)
0419 {
0420     struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
0421     struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
0422     struct sa11x0_dma_phy *p;
0423     struct virt_dma_desc *vd;
0424     unsigned long flags;
0425     enum dma_status ret;
0426 
0427     ret = dma_cookie_status(&c->vc.chan, cookie, state);
0428     if (ret == DMA_COMPLETE)
0429         return ret;
0430 
0431     if (!state)
0432         return c->status;
0433 
0434     spin_lock_irqsave(&c->vc.lock, flags);
0435     p = c->phy;
0436 
0437     /*
0438      * If the cookie is on our issue queue, then the residue is
0439      * its total size.
0440      */
0441     vd = vchan_find_desc(&c->vc, cookie);
0442     if (vd) {
0443         state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
0444     } else if (!p) {
0445         state->residue = 0;
0446     } else {
0447         struct sa11x0_dma_desc *txd;
0448         size_t bytes = 0;
0449 
0450         if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
0451             txd = p->txd_done;
0452         else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
0453             txd = p->txd_load;
0454         else
0455             txd = NULL;
0456 
0457         ret = c->status;
0458         if (txd) {
0459             dma_addr_t addr = sa11x0_dma_pos(p);
0460             unsigned i;
0461 
0462             dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr);
0463 
0464             for (i = 0; i < txd->sglen; i++) {
0465                 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
0466                     i, txd->sg[i].addr, txd->sg[i].len);
0467                 if (addr >= txd->sg[i].addr &&
0468                     addr < txd->sg[i].addr + txd->sg[i].len) {
0469                     unsigned len;
0470 
0471                     len = txd->sg[i].len -
0472                         (addr - txd->sg[i].addr);
0473                     dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
0474                         i, len);
0475                     bytes += len;
0476                     i++;
0477                     break;
0478                 }
0479             }
0480             for (; i < txd->sglen; i++) {
0481                 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
0482                     i, txd->sg[i].addr, txd->sg[i].len);
0483                 bytes += txd->sg[i].len;
0484             }
0485         }
0486         state->residue = bytes;
0487     }
0488     spin_unlock_irqrestore(&c->vc.lock, flags);
0489 
0490     dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue);
0491 
0492     return ret;
0493 }
0494 
0495 /*
0496  * Move pending txds to the issued list, and re-init pending list.
0497  * If not already pending, add this channel to the list of pending
0498  * channels and trigger the tasklet to run.
0499  */
0500 static void sa11x0_dma_issue_pending(struct dma_chan *chan)
0501 {
0502     struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
0503     struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
0504     unsigned long flags;
0505 
0506     spin_lock_irqsave(&c->vc.lock, flags);
0507     if (vchan_issue_pending(&c->vc)) {
0508         if (!c->phy) {
0509             spin_lock(&d->lock);
0510             if (list_empty(&c->node)) {
0511                 list_add_tail(&c->node, &d->chan_pending);
0512                 tasklet_schedule(&d->task);
0513                 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
0514             }
0515             spin_unlock(&d->lock);
0516         }
0517     } else
0518         dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
0519     spin_unlock_irqrestore(&c->vc.lock, flags);
0520 }
0521 
0522 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
0523     struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
0524     enum dma_transfer_direction dir, unsigned long flags, void *context)
0525 {
0526     struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
0527     struct sa11x0_dma_desc *txd;
0528     struct scatterlist *sgent;
0529     unsigned i, j = sglen;
0530     size_t size = 0;
0531 
0532     /* SA11x0 channels can only operate in their native direction */
0533     if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
0534         dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
0535             &c->vc, c->ddar, dir);
0536         return NULL;
0537     }
0538 
0539     /* Do not allow zero-sized txds */
0540     if (sglen == 0)
0541         return NULL;
0542 
0543     for_each_sg(sg, sgent, sglen, i) {
0544         dma_addr_t addr = sg_dma_address(sgent);
0545         unsigned int len = sg_dma_len(sgent);
0546 
0547         if (len > DMA_MAX_SIZE)
0548             j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
0549         if (addr & DMA_ALIGN) {
0550             dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n",
0551                 &c->vc, &addr);
0552             return NULL;
0553         }
0554     }
0555 
0556     txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC);
0557     if (!txd) {
0558         dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
0559         return NULL;
0560     }
0561 
0562     j = 0;
0563     for_each_sg(sg, sgent, sglen, i) {
0564         dma_addr_t addr = sg_dma_address(sgent);
0565         unsigned len = sg_dma_len(sgent);
0566 
0567         size += len;
0568 
0569         do {
0570             unsigned tlen = len;
0571 
0572             /*
0573              * Check whether the transfer will fit.  If not, try
0574              * to split the transfer up such that we end up with
0575              * equal chunks - but make sure that we preserve the
0576              * alignment.  This avoids small segments.
0577              */
0578             if (tlen > DMA_MAX_SIZE) {
0579                 unsigned mult = DIV_ROUND_UP(tlen,
0580                     DMA_MAX_SIZE & ~DMA_ALIGN);
0581 
0582                 tlen = (tlen / mult) & ~DMA_ALIGN;
0583             }
0584 
0585             txd->sg[j].addr = addr;
0586             txd->sg[j].len = tlen;
0587 
0588             addr += tlen;
0589             len -= tlen;
0590             j++;
0591         } while (len);
0592     }
0593 
0594     txd->ddar = c->ddar;
0595     txd->size = size;
0596     txd->sglen = j;
0597 
0598     dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
0599         &c->vc, &txd->vd, txd->size, txd->sglen);
0600 
0601     return vchan_tx_prep(&c->vc, &txd->vd, flags);
0602 }
0603 
0604 static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
0605     struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
0606     enum dma_transfer_direction dir, unsigned long flags)
0607 {
0608     struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
0609     struct sa11x0_dma_desc *txd;
0610     unsigned i, j, k, sglen, sgperiod;
0611 
0612     /* SA11x0 channels can only operate in their native direction */
0613     if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
0614         dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
0615             &c->vc, c->ddar, dir);
0616         return NULL;
0617     }
0618 
0619     sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
0620     sglen = size * sgperiod / period;
0621 
0622     /* Do not allow zero-sized txds */
0623     if (sglen == 0)
0624         return NULL;
0625 
0626     txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC);
0627     if (!txd) {
0628         dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
0629         return NULL;
0630     }
0631 
0632     for (i = k = 0; i < size / period; i++) {
0633         size_t tlen, len = period;
0634 
0635         for (j = 0; j < sgperiod; j++, k++) {
0636             tlen = len;
0637 
0638             if (tlen > DMA_MAX_SIZE) {
0639                 unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
0640                 tlen = (tlen / mult) & ~DMA_ALIGN;
0641             }
0642 
0643             txd->sg[k].addr = addr;
0644             txd->sg[k].len = tlen;
0645             addr += tlen;
0646             len -= tlen;
0647         }
0648 
0649         WARN_ON(len != 0);
0650     }
0651 
0652     WARN_ON(k != sglen);
0653 
0654     txd->ddar = c->ddar;
0655     txd->size = size;
0656     txd->sglen = sglen;
0657     txd->cyclic = 1;
0658     txd->period = sgperiod;
0659 
0660     return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0661 }
0662 
0663 static int sa11x0_dma_device_config(struct dma_chan *chan,
0664                     struct dma_slave_config *cfg)
0665 {
0666     struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
0667     u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
0668     dma_addr_t addr;
0669     enum dma_slave_buswidth width;
0670     u32 maxburst;
0671 
0672     if (ddar & DDAR_RW) {
0673         addr = cfg->src_addr;
0674         width = cfg->src_addr_width;
0675         maxburst = cfg->src_maxburst;
0676     } else {
0677         addr = cfg->dst_addr;
0678         width = cfg->dst_addr_width;
0679         maxburst = cfg->dst_maxburst;
0680     }
0681 
0682     if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
0683          width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
0684         (maxburst != 4 && maxburst != 8))
0685         return -EINVAL;
0686 
0687     if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
0688         ddar |= DDAR_DW;
0689     if (maxburst == 8)
0690         ddar |= DDAR_BS;
0691 
0692     dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
0693         &c->vc, &addr, width, maxburst);
0694 
0695     c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
0696 
0697     return 0;
0698 }
0699 
0700 static int sa11x0_dma_device_pause(struct dma_chan *chan)
0701 {
0702     struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
0703     struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
0704     struct sa11x0_dma_phy *p;
0705     unsigned long flags;
0706 
0707     dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
0708     spin_lock_irqsave(&c->vc.lock, flags);
0709     if (c->status == DMA_IN_PROGRESS) {
0710         c->status = DMA_PAUSED;
0711 
0712         p = c->phy;
0713         if (p) {
0714             writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
0715         } else {
0716             spin_lock(&d->lock);
0717             list_del_init(&c->node);
0718             spin_unlock(&d->lock);
0719         }
0720     }
0721     spin_unlock_irqrestore(&c->vc.lock, flags);
0722 
0723     return 0;
0724 }
0725 
0726 static int sa11x0_dma_device_resume(struct dma_chan *chan)
0727 {
0728     struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
0729     struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
0730     struct sa11x0_dma_phy *p;
0731     unsigned long flags;
0732 
0733     dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
0734     spin_lock_irqsave(&c->vc.lock, flags);
0735     if (c->status == DMA_PAUSED) {
0736         c->status = DMA_IN_PROGRESS;
0737 
0738         p = c->phy;
0739         if (p) {
0740             writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
0741         } else if (!list_empty(&c->vc.desc_issued)) {
0742             spin_lock(&d->lock);
0743             list_add_tail(&c->node, &d->chan_pending);
0744             spin_unlock(&d->lock);
0745         }
0746     }
0747     spin_unlock_irqrestore(&c->vc.lock, flags);
0748 
0749     return 0;
0750 }
0751 
0752 static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
0753 {
0754     struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
0755     struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
0756     struct sa11x0_dma_phy *p;
0757     LIST_HEAD(head);
0758     unsigned long flags;
0759 
0760     dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
0761     /* Clear the tx descriptor lists */
0762     spin_lock_irqsave(&c->vc.lock, flags);
0763     vchan_get_all_descriptors(&c->vc, &head);
0764 
0765     p = c->phy;
0766     if (p) {
0767         dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
0768         /* vchan is assigned to a pchan - stop the channel */
0769         writel(DCSR_RUN | DCSR_IE |
0770                DCSR_STRTA | DCSR_DONEA |
0771                DCSR_STRTB | DCSR_DONEB,
0772                p->base + DMA_DCSR_C);
0773 
0774         if (p->txd_load) {
0775             if (p->txd_load != p->txd_done)
0776                 list_add_tail(&p->txd_load->vd.node, &head);
0777             p->txd_load = NULL;
0778         }
0779         if (p->txd_done) {
0780             list_add_tail(&p->txd_done->vd.node, &head);
0781             p->txd_done = NULL;
0782         }
0783         c->phy = NULL;
0784         spin_lock(&d->lock);
0785         p->vchan = NULL;
0786         spin_unlock(&d->lock);
0787         tasklet_schedule(&d->task);
0788     }
0789     spin_unlock_irqrestore(&c->vc.lock, flags);
0790     vchan_dma_desc_free_list(&c->vc, &head);
0791 
0792     return 0;
0793 }
0794 
0795 struct sa11x0_dma_channel_desc {
0796     u32 ddar;
0797     const char *name;
0798 };
0799 
0800 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
0801 static const struct sa11x0_dma_channel_desc chan_desc[] = {
0802     CD(Ser0UDCTr, 0),
0803     CD(Ser0UDCRc, DDAR_RW),
0804     CD(Ser1SDLCTr, 0),
0805     CD(Ser1SDLCRc, DDAR_RW),
0806     CD(Ser1UARTTr, 0),
0807     CD(Ser1UARTRc, DDAR_RW),
0808     CD(Ser2ICPTr, 0),
0809     CD(Ser2ICPRc, DDAR_RW),
0810     CD(Ser3UARTTr, 0),
0811     CD(Ser3UARTRc, DDAR_RW),
0812     CD(Ser4MCP0Tr, 0),
0813     CD(Ser4MCP0Rc, DDAR_RW),
0814     CD(Ser4MCP1Tr, 0),
0815     CD(Ser4MCP1Rc, DDAR_RW),
0816     CD(Ser4SSPTr, 0),
0817     CD(Ser4SSPRc, DDAR_RW),
0818 };
0819 
0820 static const struct dma_slave_map sa11x0_dma_map[] = {
0821     { "sa11x0-ir", "tx", "Ser2ICPTr" },
0822     { "sa11x0-ir", "rx", "Ser2ICPRc" },
0823     { "sa11x0-ssp", "tx", "Ser4SSPTr" },
0824     { "sa11x0-ssp", "rx", "Ser4SSPRc" },
0825 };
0826 
0827 static bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
0828 {
0829     struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
0830     const char *p = param;
0831 
0832     return !strcmp(c->name, p);
0833 }
0834 
0835 static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
0836     struct device *dev)
0837 {
0838     unsigned i;
0839 
0840     INIT_LIST_HEAD(&dmadev->channels);
0841     dmadev->dev = dev;
0842     dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
0843     dmadev->device_config = sa11x0_dma_device_config;
0844     dmadev->device_pause = sa11x0_dma_device_pause;
0845     dmadev->device_resume = sa11x0_dma_device_resume;
0846     dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
0847     dmadev->device_tx_status = sa11x0_dma_tx_status;
0848     dmadev->device_issue_pending = sa11x0_dma_issue_pending;
0849 
0850     for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
0851         struct sa11x0_dma_chan *c;
0852 
0853         c = kzalloc(sizeof(*c), GFP_KERNEL);
0854         if (!c) {
0855             dev_err(dev, "no memory for channel %u\n", i);
0856             return -ENOMEM;
0857         }
0858 
0859         c->status = DMA_IN_PROGRESS;
0860         c->ddar = chan_desc[i].ddar;
0861         c->name = chan_desc[i].name;
0862         INIT_LIST_HEAD(&c->node);
0863 
0864         c->vc.desc_free = sa11x0_dma_free_desc;
0865         vchan_init(&c->vc, dmadev);
0866     }
0867 
0868     return dma_async_device_register(dmadev);
0869 }
0870 
0871 static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
0872     void *data)
0873 {
0874     int irq = platform_get_irq(pdev, nr);
0875 
0876     if (irq <= 0)
0877         return -ENXIO;
0878 
0879     return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
0880 }
0881 
0882 static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
0883     void *data)
0884 {
0885     int irq = platform_get_irq(pdev, nr);
0886     if (irq > 0)
0887         free_irq(irq, data);
0888 }
0889 
0890 static void sa11x0_dma_free_channels(struct dma_device *dmadev)
0891 {
0892     struct sa11x0_dma_chan *c, *cn;
0893 
0894     list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
0895         list_del(&c->vc.chan.device_node);
0896         tasklet_kill(&c->vc.task);
0897         kfree(c);
0898     }
0899 }
0900 
0901 static int sa11x0_dma_probe(struct platform_device *pdev)
0902 {
0903     struct sa11x0_dma_dev *d;
0904     struct resource *res;
0905     unsigned i;
0906     int ret;
0907 
0908     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0909     if (!res)
0910         return -ENXIO;
0911 
0912     d = kzalloc(sizeof(*d), GFP_KERNEL);
0913     if (!d) {
0914         ret = -ENOMEM;
0915         goto err_alloc;
0916     }
0917 
0918     spin_lock_init(&d->lock);
0919     INIT_LIST_HEAD(&d->chan_pending);
0920 
0921     d->slave.filter.fn = sa11x0_dma_filter_fn;
0922     d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map);
0923     d->slave.filter.map = sa11x0_dma_map;
0924 
0925     d->base = ioremap(res->start, resource_size(res));
0926     if (!d->base) {
0927         ret = -ENOMEM;
0928         goto err_ioremap;
0929     }
0930 
0931     tasklet_setup(&d->task, sa11x0_dma_tasklet);
0932 
0933     for (i = 0; i < NR_PHY_CHAN; i++) {
0934         struct sa11x0_dma_phy *p = &d->phy[i];
0935 
0936         p->dev = d;
0937         p->num = i;
0938         p->base = d->base + i * DMA_SIZE;
0939         writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
0940             DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
0941             p->base + DMA_DCSR_C);
0942         writel_relaxed(0, p->base + DMA_DDAR);
0943 
0944         ret = sa11x0_dma_request_irq(pdev, i, p);
0945         if (ret) {
0946             while (i) {
0947                 i--;
0948                 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
0949             }
0950             goto err_irq;
0951         }
0952     }
0953 
0954     dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
0955     dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
0956     d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
0957     d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
0958     d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
0959     d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
0960     d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
0961                    BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
0962     d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
0963                    BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
0964     ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
0965     if (ret) {
0966         dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
0967             ret);
0968         goto err_slave_reg;
0969     }
0970 
0971     platform_set_drvdata(pdev, d);
0972     return 0;
0973 
0974  err_slave_reg:
0975     sa11x0_dma_free_channels(&d->slave);
0976     for (i = 0; i < NR_PHY_CHAN; i++)
0977         sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
0978  err_irq:
0979     tasklet_kill(&d->task);
0980     iounmap(d->base);
0981  err_ioremap:
0982     kfree(d);
0983  err_alloc:
0984     return ret;
0985 }
0986 
0987 static int sa11x0_dma_remove(struct platform_device *pdev)
0988 {
0989     struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
0990     unsigned pch;
0991 
0992     dma_async_device_unregister(&d->slave);
0993 
0994     sa11x0_dma_free_channels(&d->slave);
0995     for (pch = 0; pch < NR_PHY_CHAN; pch++)
0996         sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
0997     tasklet_kill(&d->task);
0998     iounmap(d->base);
0999     kfree(d);
1000 
1001     return 0;
1002 }
1003 
1004 static __maybe_unused int sa11x0_dma_suspend(struct device *dev)
1005 {
1006     struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1007     unsigned pch;
1008 
1009     for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1010         struct sa11x0_dma_phy *p = &d->phy[pch];
1011         u32 dcsr, saved_dcsr;
1012 
1013         dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1014         if (dcsr & DCSR_RUN) {
1015             writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
1016             dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1017         }
1018 
1019         saved_dcsr &= DCSR_RUN | DCSR_IE;
1020         if (dcsr & DCSR_BIU) {
1021             p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
1022             p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
1023             p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
1024             p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
1025             saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
1026                       (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
1027         } else {
1028             p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
1029             p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
1030             p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
1031             p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
1032             saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
1033         }
1034         p->dcsr = saved_dcsr;
1035 
1036         writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
1037     }
1038 
1039     return 0;
1040 }
1041 
1042 static __maybe_unused int sa11x0_dma_resume(struct device *dev)
1043 {
1044     struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1045     unsigned pch;
1046 
1047     for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1048         struct sa11x0_dma_phy *p = &d->phy[pch];
1049         struct sa11x0_dma_desc *txd = NULL;
1050         u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1051 
1052         WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
1053 
1054         if (p->txd_done)
1055             txd = p->txd_done;
1056         else if (p->txd_load)
1057             txd = p->txd_load;
1058 
1059         if (!txd)
1060             continue;
1061 
1062         writel_relaxed(txd->ddar, p->base + DMA_DDAR);
1063 
1064         writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
1065         writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
1066         writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
1067         writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
1068         writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
1069     }
1070 
1071     return 0;
1072 }
1073 
1074 static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1075     SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sa11x0_dma_suspend, sa11x0_dma_resume)
1076 };
1077 
1078 static struct platform_driver sa11x0_dma_driver = {
1079     .driver = {
1080         .name   = "sa11x0-dma",
1081         .pm = &sa11x0_dma_pm_ops,
1082     },
1083     .probe      = sa11x0_dma_probe,
1084     .remove     = sa11x0_dma_remove,
1085 };
1086 
1087 static int __init sa11x0_dma_init(void)
1088 {
1089     return platform_driver_register(&sa11x0_dma_driver);
1090 }
1091 subsys_initcall(sa11x0_dma_init);
1092 
1093 static void __exit sa11x0_dma_exit(void)
1094 {
1095     platform_driver_unregister(&sa11x0_dma_driver);
1096 }
1097 module_exit(sa11x0_dma_exit);
1098 
1099 MODULE_AUTHOR("Russell King");
1100 MODULE_DESCRIPTION("SA-11x0 DMA driver");
1101 MODULE_LICENSE("GPL v2");
1102 MODULE_ALIAS("platform:sa11x0-dma");