Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * OMAP DMAengine support
0004  */
0005 #include <linux/cpu_pm.h>
0006 #include <linux/delay.h>
0007 #include <linux/dmaengine.h>
0008 #include <linux/dma-mapping.h>
0009 #include <linux/dmapool.h>
0010 #include <linux/err.h>
0011 #include <linux/init.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/list.h>
0014 #include <linux/module.h>
0015 #include <linux/omap-dma.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/slab.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/of_dma.h>
0020 #include <linux/of_device.h>
0021 
0022 #include "../virt-dma.h"
0023 
0024 #define OMAP_SDMA_REQUESTS  127
0025 #define OMAP_SDMA_CHANNELS  32
0026 
0027 struct omap_dma_config {
0028     int lch_end;
0029     unsigned int rw_priority:1;
0030     unsigned int needs_busy_check:1;
0031     unsigned int may_lose_context:1;
0032     unsigned int needs_lch_clear:1;
0033 };
0034 
0035 struct omap_dma_context {
0036     u32 irqenable_l0;
0037     u32 irqenable_l1;
0038     u32 ocp_sysconfig;
0039     u32 gcr;
0040 };
0041 
0042 struct omap_dmadev {
0043     struct dma_device ddev;
0044     spinlock_t lock;
0045     void __iomem *base;
0046     const struct omap_dma_reg *reg_map;
0047     struct omap_system_dma_plat_info *plat;
0048     const struct omap_dma_config *cfg;
0049     struct notifier_block nb;
0050     struct omap_dma_context context;
0051     int lch_count;
0052     DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS);
0053     struct mutex lch_lock;      /* for assigning logical channels */
0054     bool legacy;
0055     bool ll123_supported;
0056     struct dma_pool *desc_pool;
0057     unsigned dma_requests;
0058     spinlock_t irq_lock;
0059     uint32_t irq_enable_mask;
0060     struct omap_chan **lch_map;
0061 };
0062 
0063 struct omap_chan {
0064     struct virt_dma_chan vc;
0065     void __iomem *channel_base;
0066     const struct omap_dma_reg *reg_map;
0067     uint32_t ccr;
0068 
0069     struct dma_slave_config cfg;
0070     unsigned dma_sig;
0071     bool cyclic;
0072     bool paused;
0073     bool running;
0074 
0075     int dma_ch;
0076     struct omap_desc *desc;
0077     unsigned sgidx;
0078 };
0079 
0080 #define DESC_NXT_SV_REFRESH (0x1 << 24)
0081 #define DESC_NXT_SV_REUSE   (0x2 << 24)
0082 #define DESC_NXT_DV_REFRESH (0x1 << 26)
0083 #define DESC_NXT_DV_REUSE   (0x2 << 26)
0084 #define DESC_NTYPE_TYPE2    (0x2 << 29)
0085 
0086 /* Type 2 descriptor with Source or Destination address update */
0087 struct omap_type2_desc {
0088     uint32_t next_desc;
0089     uint32_t en;
0090     uint32_t addr; /* src or dst */
0091     uint16_t fn;
0092     uint16_t cicr;
0093     int16_t cdei;
0094     int16_t csei;
0095     int32_t cdfi;
0096     int32_t csfi;
0097 } __packed;
0098 
0099 struct omap_sg {
0100     dma_addr_t addr;
0101     uint32_t en;        /* number of elements (24-bit) */
0102     uint32_t fn;        /* number of frames (16-bit) */
0103     int32_t fi;     /* for double indexing */
0104     int16_t ei;     /* for double indexing */
0105 
0106     /* Linked list */
0107     struct omap_type2_desc *t2_desc;
0108     dma_addr_t t2_desc_paddr;
0109 };
0110 
0111 struct omap_desc {
0112     struct virt_dma_desc vd;
0113     bool using_ll;
0114     enum dma_transfer_direction dir;
0115     dma_addr_t dev_addr;
0116     bool polled;
0117 
0118     int32_t fi;     /* for OMAP_DMA_SYNC_PACKET / double indexing */
0119     int16_t ei;     /* for double indexing */
0120     uint8_t es;     /* CSDP_DATA_TYPE_xxx */
0121     uint32_t ccr;       /* CCR value */
0122     uint16_t clnk_ctrl; /* CLNK_CTRL value */
0123     uint16_t cicr;      /* CICR value */
0124     uint32_t csdp;      /* CSDP value */
0125 
0126     unsigned sglen;
0127     struct omap_sg sg[];
0128 };
0129 
0130 enum {
0131     CAPS_0_SUPPORT_LL123    = BIT(20),  /* Linked List type1/2/3 */
0132     CAPS_0_SUPPORT_LL4  = BIT(21),  /* Linked List type4 */
0133 
0134     CCR_FS          = BIT(5),
0135     CCR_READ_PRIORITY   = BIT(6),
0136     CCR_ENABLE      = BIT(7),
0137     CCR_AUTO_INIT       = BIT(8),   /* OMAP1 only */
0138     CCR_REPEAT      = BIT(9),   /* OMAP1 only */
0139     CCR_OMAP31_DISABLE  = BIT(10),  /* OMAP1 only */
0140     CCR_SUSPEND_SENSITIVE   = BIT(8),   /* OMAP2+ only */
0141     CCR_RD_ACTIVE       = BIT(9),   /* OMAP2+ only */
0142     CCR_WR_ACTIVE       = BIT(10),  /* OMAP2+ only */
0143     CCR_SRC_AMODE_CONSTANT  = 0 << 12,
0144     CCR_SRC_AMODE_POSTINC   = 1 << 12,
0145     CCR_SRC_AMODE_SGLIDX    = 2 << 12,
0146     CCR_SRC_AMODE_DBLIDX    = 3 << 12,
0147     CCR_DST_AMODE_CONSTANT  = 0 << 14,
0148     CCR_DST_AMODE_POSTINC   = 1 << 14,
0149     CCR_DST_AMODE_SGLIDX    = 2 << 14,
0150     CCR_DST_AMODE_DBLIDX    = 3 << 14,
0151     CCR_CONSTANT_FILL   = BIT(16),
0152     CCR_TRANSPARENT_COPY    = BIT(17),
0153     CCR_BS          = BIT(18),
0154     CCR_SUPERVISOR      = BIT(22),
0155     CCR_PREFETCH        = BIT(23),
0156     CCR_TRIGGER_SRC     = BIT(24),
0157     CCR_BUFFERING_DISABLE   = BIT(25),
0158     CCR_WRITE_PRIORITY  = BIT(26),
0159     CCR_SYNC_ELEMENT    = 0,
0160     CCR_SYNC_FRAME      = CCR_FS,
0161     CCR_SYNC_BLOCK      = CCR_BS,
0162     CCR_SYNC_PACKET     = CCR_BS | CCR_FS,
0163 
0164     CSDP_DATA_TYPE_8    = 0,
0165     CSDP_DATA_TYPE_16   = 1,
0166     CSDP_DATA_TYPE_32   = 2,
0167     CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */
0168     CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */
0169     CSDP_SRC_PORT_OCP_T1    = 2 << 2, /* OMAP1 only */
0170     CSDP_SRC_PORT_TIPB  = 3 << 2, /* OMAP1 only */
0171     CSDP_SRC_PORT_OCP_T2    = 4 << 2, /* OMAP1 only */
0172     CSDP_SRC_PORT_MPUI  = 5 << 2, /* OMAP1 only */
0173     CSDP_SRC_PACKED     = BIT(6),
0174     CSDP_SRC_BURST_1    = 0 << 7,
0175     CSDP_SRC_BURST_16   = 1 << 7,
0176     CSDP_SRC_BURST_32   = 2 << 7,
0177     CSDP_SRC_BURST_64   = 3 << 7,
0178     CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */
0179     CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */
0180     CSDP_DST_PORT_OCP_T1    = 2 << 9, /* OMAP1 only */
0181     CSDP_DST_PORT_TIPB  = 3 << 9, /* OMAP1 only */
0182     CSDP_DST_PORT_OCP_T2    = 4 << 9, /* OMAP1 only */
0183     CSDP_DST_PORT_MPUI  = 5 << 9, /* OMAP1 only */
0184     CSDP_DST_PACKED     = BIT(13),
0185     CSDP_DST_BURST_1    = 0 << 14,
0186     CSDP_DST_BURST_16   = 1 << 14,
0187     CSDP_DST_BURST_32   = 2 << 14,
0188     CSDP_DST_BURST_64   = 3 << 14,
0189     CSDP_WRITE_NON_POSTED   = 0 << 16,
0190     CSDP_WRITE_POSTED   = 1 << 16,
0191     CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
0192 
0193     CICR_TOUT_IE        = BIT(0),   /* OMAP1 only */
0194     CICR_DROP_IE        = BIT(1),
0195     CICR_HALF_IE        = BIT(2),
0196     CICR_FRAME_IE       = BIT(3),
0197     CICR_LAST_IE        = BIT(4),
0198     CICR_BLOCK_IE       = BIT(5),
0199     CICR_PKT_IE     = BIT(7),   /* OMAP2+ only */
0200     CICR_TRANS_ERR_IE   = BIT(8),   /* OMAP2+ only */
0201     CICR_SUPERVISOR_ERR_IE  = BIT(10),  /* OMAP2+ only */
0202     CICR_MISALIGNED_ERR_IE  = BIT(11),  /* OMAP2+ only */
0203     CICR_DRAIN_IE       = BIT(12),  /* OMAP2+ only */
0204     CICR_SUPER_BLOCK_IE = BIT(14),  /* OMAP2+ only */
0205 
0206     CLNK_CTRL_ENABLE_LNK    = BIT(15),
0207 
0208     CDP_DST_VALID_INC   = 0 << 0,
0209     CDP_DST_VALID_RELOAD    = 1 << 0,
0210     CDP_DST_VALID_REUSE = 2 << 0,
0211     CDP_SRC_VALID_INC   = 0 << 2,
0212     CDP_SRC_VALID_RELOAD    = 1 << 2,
0213     CDP_SRC_VALID_REUSE = 2 << 2,
0214     CDP_NTYPE_TYPE1     = 1 << 4,
0215     CDP_NTYPE_TYPE2     = 2 << 4,
0216     CDP_NTYPE_TYPE3     = 3 << 4,
0217     CDP_TMODE_NORMAL    = 0 << 8,
0218     CDP_TMODE_LLIST     = 1 << 8,
0219     CDP_FAST        = BIT(10),
0220 };
0221 
0222 static const unsigned es_bytes[] = {
0223     [CSDP_DATA_TYPE_8] = 1,
0224     [CSDP_DATA_TYPE_16] = 2,
0225     [CSDP_DATA_TYPE_32] = 4,
0226 };
0227 
0228 static bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
0229 static struct of_dma_filter_info omap_dma_info = {
0230     .filter_fn = omap_dma_filter_fn,
0231 };
0232 
0233 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
0234 {
0235     return container_of(d, struct omap_dmadev, ddev);
0236 }
0237 
0238 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
0239 {
0240     return container_of(c, struct omap_chan, vc.chan);
0241 }
0242 
0243 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
0244 {
0245     return container_of(t, struct omap_desc, vd.tx);
0246 }
0247 
0248 static void omap_dma_desc_free(struct virt_dma_desc *vd)
0249 {
0250     struct omap_desc *d = to_omap_dma_desc(&vd->tx);
0251 
0252     if (d->using_ll) {
0253         struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
0254         int i;
0255 
0256         for (i = 0; i < d->sglen; i++) {
0257             if (d->sg[i].t2_desc)
0258                 dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
0259                           d->sg[i].t2_desc_paddr);
0260         }
0261     }
0262 
0263     kfree(d);
0264 }
0265 
0266 static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx,
0267                      enum dma_transfer_direction dir, bool last)
0268 {
0269     struct omap_sg *sg = &d->sg[idx];
0270     struct omap_type2_desc *t2_desc = sg->t2_desc;
0271 
0272     if (idx)
0273         d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr;
0274     if (last)
0275         t2_desc->next_desc = 0xfffffffc;
0276 
0277     t2_desc->en = sg->en;
0278     t2_desc->addr = sg->addr;
0279     t2_desc->fn = sg->fn & 0xffff;
0280     t2_desc->cicr = d->cicr;
0281     if (!last)
0282         t2_desc->cicr &= ~CICR_BLOCK_IE;
0283 
0284     switch (dir) {
0285     case DMA_DEV_TO_MEM:
0286         t2_desc->cdei = sg->ei;
0287         t2_desc->csei = d->ei;
0288         t2_desc->cdfi = sg->fi;
0289         t2_desc->csfi = d->fi;
0290 
0291         t2_desc->en |= DESC_NXT_DV_REFRESH;
0292         t2_desc->en |= DESC_NXT_SV_REUSE;
0293         break;
0294     case DMA_MEM_TO_DEV:
0295         t2_desc->cdei = d->ei;
0296         t2_desc->csei = sg->ei;
0297         t2_desc->cdfi = d->fi;
0298         t2_desc->csfi = sg->fi;
0299 
0300         t2_desc->en |= DESC_NXT_SV_REFRESH;
0301         t2_desc->en |= DESC_NXT_DV_REUSE;
0302         break;
0303     default:
0304         return;
0305     }
0306 
0307     t2_desc->en |= DESC_NTYPE_TYPE2;
0308 }
0309 
0310 static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
0311 {
0312     switch (type) {
0313     case OMAP_DMA_REG_16BIT:
0314         writew_relaxed(val, addr);
0315         break;
0316     case OMAP_DMA_REG_2X16BIT:
0317         writew_relaxed(val, addr);
0318         writew_relaxed(val >> 16, addr + 2);
0319         break;
0320     case OMAP_DMA_REG_32BIT:
0321         writel_relaxed(val, addr);
0322         break;
0323     default:
0324         WARN_ON(1);
0325     }
0326 }
0327 
0328 static unsigned omap_dma_read(unsigned type, void __iomem *addr)
0329 {
0330     unsigned val;
0331 
0332     switch (type) {
0333     case OMAP_DMA_REG_16BIT:
0334         val = readw_relaxed(addr);
0335         break;
0336     case OMAP_DMA_REG_2X16BIT:
0337         val = readw_relaxed(addr);
0338         val |= readw_relaxed(addr + 2) << 16;
0339         break;
0340     case OMAP_DMA_REG_32BIT:
0341         val = readl_relaxed(addr);
0342         break;
0343     default:
0344         WARN_ON(1);
0345         val = 0;
0346     }
0347 
0348     return val;
0349 }
0350 
0351 static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
0352 {
0353     const struct omap_dma_reg *r = od->reg_map + reg;
0354 
0355     WARN_ON(r->stride);
0356 
0357     omap_dma_write(val, r->type, od->base + r->offset);
0358 }
0359 
0360 static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
0361 {
0362     const struct omap_dma_reg *r = od->reg_map + reg;
0363 
0364     WARN_ON(r->stride);
0365 
0366     return omap_dma_read(r->type, od->base + r->offset);
0367 }
0368 
0369 static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
0370 {
0371     const struct omap_dma_reg *r = c->reg_map + reg;
0372 
0373     omap_dma_write(val, r->type, c->channel_base + r->offset);
0374 }
0375 
0376 static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
0377 {
0378     const struct omap_dma_reg *r = c->reg_map + reg;
0379 
0380     return omap_dma_read(r->type, c->channel_base + r->offset);
0381 }
0382 
0383 static void omap_dma_clear_csr(struct omap_chan *c)
0384 {
0385     if (dma_omap1())
0386         omap_dma_chan_read(c, CSR);
0387     else
0388         omap_dma_chan_write(c, CSR, ~0);
0389 }
0390 
0391 static unsigned omap_dma_get_csr(struct omap_chan *c)
0392 {
0393     unsigned val = omap_dma_chan_read(c, CSR);
0394 
0395     if (!dma_omap1())
0396         omap_dma_chan_write(c, CSR, val);
0397 
0398     return val;
0399 }
0400 
0401 static void omap_dma_clear_lch(struct omap_dmadev *od, int lch)
0402 {
0403     struct omap_chan *c;
0404     int i;
0405 
0406     c = od->lch_map[lch];
0407     if (!c)
0408         return;
0409 
0410     for (i = CSDP; i <= od->cfg->lch_end; i++)
0411         omap_dma_chan_write(c, i, 0);
0412 }
0413 
0414 static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
0415     unsigned lch)
0416 {
0417     c->channel_base = od->base + od->plat->channel_stride * lch;
0418 
0419     od->lch_map[lch] = c;
0420 }
0421 
0422 static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
0423 {
0424     struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
0425     uint16_t cicr = d->cicr;
0426 
0427     if (__dma_omap15xx(od->plat->dma_attr))
0428         omap_dma_chan_write(c, CPC, 0);
0429     else
0430         omap_dma_chan_write(c, CDAC, 0);
0431 
0432     omap_dma_clear_csr(c);
0433 
0434     if (d->using_ll) {
0435         uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST;
0436 
0437         if (d->dir == DMA_DEV_TO_MEM)
0438             cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE);
0439         else
0440             cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD);
0441         omap_dma_chan_write(c, CDP, cdp);
0442 
0443         omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr);
0444         omap_dma_chan_write(c, CCDN, 0);
0445         omap_dma_chan_write(c, CCFN, 0xffff);
0446         omap_dma_chan_write(c, CCEN, 0xffffff);
0447 
0448         cicr &= ~CICR_BLOCK_IE;
0449     } else if (od->ll123_supported) {
0450         omap_dma_chan_write(c, CDP, 0);
0451     }
0452 
0453     /* Enable interrupts */
0454     omap_dma_chan_write(c, CICR, cicr);
0455 
0456     /* Enable channel */
0457     omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
0458 
0459     c->running = true;
0460 }
0461 
0462 static void omap_dma_drain_chan(struct omap_chan *c)
0463 {
0464     int i;
0465     u32 val;
0466 
0467     /* Wait for sDMA FIFO to drain */
0468     for (i = 0; ; i++) {
0469         val = omap_dma_chan_read(c, CCR);
0470         if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
0471             break;
0472 
0473         if (i > 100)
0474             break;
0475 
0476         udelay(5);
0477     }
0478 
0479     if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
0480         dev_err(c->vc.chan.device->dev,
0481             "DMA drain did not complete on lch %d\n",
0482             c->dma_ch);
0483 }
0484 
0485 static int omap_dma_stop(struct omap_chan *c)
0486 {
0487     struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
0488     uint32_t val;
0489 
0490     /* disable irq */
0491     omap_dma_chan_write(c, CICR, 0);
0492 
0493     omap_dma_clear_csr(c);
0494 
0495     val = omap_dma_chan_read(c, CCR);
0496     if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
0497         uint32_t sysconfig;
0498 
0499         sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
0500         val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
0501         val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
0502         omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
0503 
0504         val = omap_dma_chan_read(c, CCR);
0505         val &= ~CCR_ENABLE;
0506         omap_dma_chan_write(c, CCR, val);
0507 
0508         if (!(c->ccr & CCR_BUFFERING_DISABLE))
0509             omap_dma_drain_chan(c);
0510 
0511         omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
0512     } else {
0513         if (!(val & CCR_ENABLE))
0514             return -EINVAL;
0515 
0516         val &= ~CCR_ENABLE;
0517         omap_dma_chan_write(c, CCR, val);
0518 
0519         if (!(c->ccr & CCR_BUFFERING_DISABLE))
0520             omap_dma_drain_chan(c);
0521     }
0522 
0523     mb();
0524 
0525     if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
0526         val = omap_dma_chan_read(c, CLNK_CTRL);
0527 
0528         if (dma_omap1())
0529             val |= 1 << 14; /* set the STOP_LNK bit */
0530         else
0531             val &= ~CLNK_CTRL_ENABLE_LNK;
0532 
0533         omap_dma_chan_write(c, CLNK_CTRL, val);
0534     }
0535     c->running = false;
0536     return 0;
0537 }
0538 
0539 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
0540 {
0541     struct omap_sg *sg = d->sg + c->sgidx;
0542     unsigned cxsa, cxei, cxfi;
0543 
0544     if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
0545         cxsa = CDSA;
0546         cxei = CDEI;
0547         cxfi = CDFI;
0548     } else {
0549         cxsa = CSSA;
0550         cxei = CSEI;
0551         cxfi = CSFI;
0552     }
0553 
0554     omap_dma_chan_write(c, cxsa, sg->addr);
0555     omap_dma_chan_write(c, cxei, sg->ei);
0556     omap_dma_chan_write(c, cxfi, sg->fi);
0557     omap_dma_chan_write(c, CEN, sg->en);
0558     omap_dma_chan_write(c, CFN, sg->fn);
0559 
0560     omap_dma_start(c, d);
0561     c->sgidx++;
0562 }
0563 
0564 static void omap_dma_start_desc(struct omap_chan *c)
0565 {
0566     struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
0567     struct omap_desc *d;
0568     unsigned cxsa, cxei, cxfi;
0569 
0570     if (!vd) {
0571         c->desc = NULL;
0572         return;
0573     }
0574 
0575     list_del(&vd->node);
0576 
0577     c->desc = d = to_omap_dma_desc(&vd->tx);
0578     c->sgidx = 0;
0579 
0580     /*
0581      * This provides the necessary barrier to ensure data held in
0582      * DMA coherent memory is visible to the DMA engine prior to
0583      * the transfer starting.
0584      */
0585     mb();
0586 
0587     omap_dma_chan_write(c, CCR, d->ccr);
0588     if (dma_omap1())
0589         omap_dma_chan_write(c, CCR2, d->ccr >> 16);
0590 
0591     if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
0592         cxsa = CSSA;
0593         cxei = CSEI;
0594         cxfi = CSFI;
0595     } else {
0596         cxsa = CDSA;
0597         cxei = CDEI;
0598         cxfi = CDFI;
0599     }
0600 
0601     omap_dma_chan_write(c, cxsa, d->dev_addr);
0602     omap_dma_chan_write(c, cxei, d->ei);
0603     omap_dma_chan_write(c, cxfi, d->fi);
0604     omap_dma_chan_write(c, CSDP, d->csdp);
0605     omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
0606 
0607     omap_dma_start_sg(c, d);
0608 }
0609 
0610 static void omap_dma_callback(int ch, u16 status, void *data)
0611 {
0612     struct omap_chan *c = data;
0613     struct omap_desc *d;
0614     unsigned long flags;
0615 
0616     spin_lock_irqsave(&c->vc.lock, flags);
0617     d = c->desc;
0618     if (d) {
0619         if (c->cyclic) {
0620             vchan_cyclic_callback(&d->vd);
0621         } else if (d->using_ll || c->sgidx == d->sglen) {
0622             omap_dma_start_desc(c);
0623             vchan_cookie_complete(&d->vd);
0624         } else {
0625             omap_dma_start_sg(c, d);
0626         }
0627     }
0628     spin_unlock_irqrestore(&c->vc.lock, flags);
0629 }
0630 
0631 static irqreturn_t omap_dma_irq(int irq, void *devid)
0632 {
0633     struct omap_dmadev *od = devid;
0634     unsigned status, channel;
0635 
0636     spin_lock(&od->irq_lock);
0637 
0638     status = omap_dma_glbl_read(od, IRQSTATUS_L1);
0639     status &= od->irq_enable_mask;
0640     if (status == 0) {
0641         spin_unlock(&od->irq_lock);
0642         return IRQ_NONE;
0643     }
0644 
0645     while ((channel = ffs(status)) != 0) {
0646         unsigned mask, csr;
0647         struct omap_chan *c;
0648 
0649         channel -= 1;
0650         mask = BIT(channel);
0651         status &= ~mask;
0652 
0653         c = od->lch_map[channel];
0654         if (c == NULL) {
0655             /* This should never happen */
0656             dev_err(od->ddev.dev, "invalid channel %u\n", channel);
0657             continue;
0658         }
0659 
0660         csr = omap_dma_get_csr(c);
0661         omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
0662 
0663         omap_dma_callback(channel, csr, c);
0664     }
0665 
0666     spin_unlock(&od->irq_lock);
0667 
0668     return IRQ_HANDLED;
0669 }
0670 
0671 static int omap_dma_get_lch(struct omap_dmadev *od, int *lch)
0672 {
0673     int channel;
0674 
0675     mutex_lock(&od->lch_lock);
0676     channel = find_first_zero_bit(od->lch_bitmap, od->lch_count);
0677     if (channel >= od->lch_count)
0678         goto out_busy;
0679     set_bit(channel, od->lch_bitmap);
0680     mutex_unlock(&od->lch_lock);
0681 
0682     omap_dma_clear_lch(od, channel);
0683     *lch = channel;
0684 
0685     return 0;
0686 
0687 out_busy:
0688     mutex_unlock(&od->lch_lock);
0689     *lch = -EINVAL;
0690 
0691     return -EBUSY;
0692 }
0693 
0694 static void omap_dma_put_lch(struct omap_dmadev *od, int lch)
0695 {
0696     omap_dma_clear_lch(od, lch);
0697     mutex_lock(&od->lch_lock);
0698     clear_bit(lch, od->lch_bitmap);
0699     mutex_unlock(&od->lch_lock);
0700 }
0701 
0702 static inline bool omap_dma_legacy(struct omap_dmadev *od)
0703 {
0704     return IS_ENABLED(CONFIG_ARCH_OMAP1) && od->legacy;
0705 }
0706 
0707 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
0708 {
0709     struct omap_dmadev *od = to_omap_dma_dev(chan->device);
0710     struct omap_chan *c = to_omap_dma_chan(chan);
0711     struct device *dev = od->ddev.dev;
0712     int ret;
0713 
0714     if (omap_dma_legacy(od)) {
0715         ret = omap_request_dma(c->dma_sig, "DMA engine",
0716                        omap_dma_callback, c, &c->dma_ch);
0717     } else {
0718         ret = omap_dma_get_lch(od, &c->dma_ch);
0719     }
0720 
0721     dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
0722 
0723     if (ret >= 0) {
0724         omap_dma_assign(od, c, c->dma_ch);
0725 
0726         if (!omap_dma_legacy(od)) {
0727             unsigned val;
0728 
0729             spin_lock_irq(&od->irq_lock);
0730             val = BIT(c->dma_ch);
0731             omap_dma_glbl_write(od, IRQSTATUS_L1, val);
0732             od->irq_enable_mask |= val;
0733             omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
0734 
0735             val = omap_dma_glbl_read(od, IRQENABLE_L0);
0736             val &= ~BIT(c->dma_ch);
0737             omap_dma_glbl_write(od, IRQENABLE_L0, val);
0738             spin_unlock_irq(&od->irq_lock);
0739         }
0740     }
0741 
0742     if (dma_omap1()) {
0743         if (__dma_omap16xx(od->plat->dma_attr)) {
0744             c->ccr = CCR_OMAP31_DISABLE;
0745             /* Duplicate what plat-omap/dma.c does */
0746             c->ccr |= c->dma_ch + 1;
0747         } else {
0748             c->ccr = c->dma_sig & 0x1f;
0749         }
0750     } else {
0751         c->ccr = c->dma_sig & 0x1f;
0752         c->ccr |= (c->dma_sig & ~0x1f) << 14;
0753     }
0754     if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
0755         c->ccr |= CCR_BUFFERING_DISABLE;
0756 
0757     return ret;
0758 }
0759 
0760 static void omap_dma_free_chan_resources(struct dma_chan *chan)
0761 {
0762     struct omap_dmadev *od = to_omap_dma_dev(chan->device);
0763     struct omap_chan *c = to_omap_dma_chan(chan);
0764 
0765     if (!omap_dma_legacy(od)) {
0766         spin_lock_irq(&od->irq_lock);
0767         od->irq_enable_mask &= ~BIT(c->dma_ch);
0768         omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
0769         spin_unlock_irq(&od->irq_lock);
0770     }
0771 
0772     c->channel_base = NULL;
0773     od->lch_map[c->dma_ch] = NULL;
0774     vchan_free_chan_resources(&c->vc);
0775 
0776     if (omap_dma_legacy(od))
0777         omap_free_dma(c->dma_ch);
0778     else
0779         omap_dma_put_lch(od, c->dma_ch);
0780 
0781     dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
0782         c->dma_sig);
0783     c->dma_sig = 0;
0784 }
0785 
0786 static size_t omap_dma_sg_size(struct omap_sg *sg)
0787 {
0788     return sg->en * sg->fn;
0789 }
0790 
0791 static size_t omap_dma_desc_size(struct omap_desc *d)
0792 {
0793     unsigned i;
0794     size_t size;
0795 
0796     for (size = i = 0; i < d->sglen; i++)
0797         size += omap_dma_sg_size(&d->sg[i]);
0798 
0799     return size * es_bytes[d->es];
0800 }
0801 
0802 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
0803 {
0804     unsigned i;
0805     size_t size, es_size = es_bytes[d->es];
0806 
0807     for (size = i = 0; i < d->sglen; i++) {
0808         size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
0809 
0810         if (size)
0811             size += this_size;
0812         else if (addr >= d->sg[i].addr &&
0813              addr < d->sg[i].addr + this_size)
0814             size += d->sg[i].addr + this_size - addr;
0815     }
0816     return size;
0817 }
0818 
0819 /*
0820  * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
0821  * read before the DMA controller finished disabling the channel.
0822  */
0823 static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
0824 {
0825     struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
0826     uint32_t val;
0827 
0828     val = omap_dma_chan_read(c, reg);
0829     if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
0830         val = omap_dma_chan_read(c, reg);
0831 
0832     return val;
0833 }
0834 
0835 static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
0836 {
0837     struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
0838     dma_addr_t addr, cdac;
0839 
0840     if (__dma_omap15xx(od->plat->dma_attr)) {
0841         addr = omap_dma_chan_read(c, CPC);
0842     } else {
0843         addr = omap_dma_chan_read_3_3(c, CSAC);
0844         cdac = omap_dma_chan_read_3_3(c, CDAC);
0845 
0846         /*
0847          * CDAC == 0 indicates that the DMA transfer on the channel has
0848          * not been started (no data has been transferred so far).
0849          * Return the programmed source start address in this case.
0850          */
0851         if (cdac == 0)
0852             addr = omap_dma_chan_read(c, CSSA);
0853     }
0854 
0855     if (dma_omap1())
0856         addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
0857 
0858     return addr;
0859 }
0860 
0861 static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
0862 {
0863     struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
0864     dma_addr_t addr;
0865 
0866     if (__dma_omap15xx(od->plat->dma_attr)) {
0867         addr = omap_dma_chan_read(c, CPC);
0868     } else {
0869         addr = omap_dma_chan_read_3_3(c, CDAC);
0870 
0871         /*
0872          * CDAC == 0 indicates that the DMA transfer on the channel
0873          * has not been started (no data has been transferred so
0874          * far).  Return the programmed destination start address in
0875          * this case.
0876          */
0877         if (addr == 0)
0878             addr = omap_dma_chan_read(c, CDSA);
0879     }
0880 
0881     if (dma_omap1())
0882         addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
0883 
0884     return addr;
0885 }
0886 
0887 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
0888     dma_cookie_t cookie, struct dma_tx_state *txstate)
0889 {
0890     struct omap_chan *c = to_omap_dma_chan(chan);
0891     enum dma_status ret;
0892     unsigned long flags;
0893     struct omap_desc *d = NULL;
0894 
0895     ret = dma_cookie_status(chan, cookie, txstate);
0896     if (ret == DMA_COMPLETE)
0897         return ret;
0898 
0899     spin_lock_irqsave(&c->vc.lock, flags);
0900     if (c->desc && c->desc->vd.tx.cookie == cookie)
0901         d = c->desc;
0902 
0903     if (!txstate)
0904         goto out;
0905 
0906     if (d) {
0907         dma_addr_t pos;
0908 
0909         if (d->dir == DMA_MEM_TO_DEV)
0910             pos = omap_dma_get_src_pos(c);
0911         else if (d->dir == DMA_DEV_TO_MEM  || d->dir == DMA_MEM_TO_MEM)
0912             pos = omap_dma_get_dst_pos(c);
0913         else
0914             pos = 0;
0915 
0916         txstate->residue = omap_dma_desc_size_pos(d, pos);
0917     } else {
0918         struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie);
0919 
0920         if (vd)
0921             txstate->residue = omap_dma_desc_size(
0922                         to_omap_dma_desc(&vd->tx));
0923         else
0924             txstate->residue = 0;
0925     }
0926 
0927 out:
0928     if (ret == DMA_IN_PROGRESS && c->paused) {
0929         ret = DMA_PAUSED;
0930     } else if (d && d->polled && c->running) {
0931         uint32_t ccr = omap_dma_chan_read(c, CCR);
0932         /*
0933          * The channel is no longer active, set the return value
0934          * accordingly and mark it as completed
0935          */
0936         if (!(ccr & CCR_ENABLE)) {
0937             ret = DMA_COMPLETE;
0938             omap_dma_start_desc(c);
0939             vchan_cookie_complete(&d->vd);
0940         }
0941     }
0942 
0943     spin_unlock_irqrestore(&c->vc.lock, flags);
0944 
0945     return ret;
0946 }
0947 
0948 static void omap_dma_issue_pending(struct dma_chan *chan)
0949 {
0950     struct omap_chan *c = to_omap_dma_chan(chan);
0951     unsigned long flags;
0952 
0953     spin_lock_irqsave(&c->vc.lock, flags);
0954     if (vchan_issue_pending(&c->vc) && !c->desc)
0955         omap_dma_start_desc(c);
0956     spin_unlock_irqrestore(&c->vc.lock, flags);
0957 }
0958 
0959 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
0960     struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
0961     enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
0962 {
0963     struct omap_dmadev *od = to_omap_dma_dev(chan->device);
0964     struct omap_chan *c = to_omap_dma_chan(chan);
0965     enum dma_slave_buswidth dev_width;
0966     struct scatterlist *sgent;
0967     struct omap_desc *d;
0968     dma_addr_t dev_addr;
0969     unsigned i, es, en, frame_bytes;
0970     bool ll_failed = false;
0971     u32 burst;
0972     u32 port_window, port_window_bytes;
0973 
0974     if (dir == DMA_DEV_TO_MEM) {
0975         dev_addr = c->cfg.src_addr;
0976         dev_width = c->cfg.src_addr_width;
0977         burst = c->cfg.src_maxburst;
0978         port_window = c->cfg.src_port_window_size;
0979     } else if (dir == DMA_MEM_TO_DEV) {
0980         dev_addr = c->cfg.dst_addr;
0981         dev_width = c->cfg.dst_addr_width;
0982         burst = c->cfg.dst_maxburst;
0983         port_window = c->cfg.dst_port_window_size;
0984     } else {
0985         dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
0986         return NULL;
0987     }
0988 
0989     /* Bus width translates to the element size (ES) */
0990     switch (dev_width) {
0991     case DMA_SLAVE_BUSWIDTH_1_BYTE:
0992         es = CSDP_DATA_TYPE_8;
0993         break;
0994     case DMA_SLAVE_BUSWIDTH_2_BYTES:
0995         es = CSDP_DATA_TYPE_16;
0996         break;
0997     case DMA_SLAVE_BUSWIDTH_4_BYTES:
0998         es = CSDP_DATA_TYPE_32;
0999         break;
1000     default: /* not reached */
1001         return NULL;
1002     }
1003 
1004     /* Now allocate and setup the descriptor. */
1005     d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC);
1006     if (!d)
1007         return NULL;
1008 
1009     d->dir = dir;
1010     d->dev_addr = dev_addr;
1011     d->es = es;
1012 
1013     /* When the port_window is used, one frame must cover the window */
1014     if (port_window) {
1015         burst = port_window;
1016         port_window_bytes = port_window * es_bytes[es];
1017 
1018         d->ei = 1;
1019         /*
1020          * One frame covers the port_window and by  configure
1021          * the source frame index to be -1 * (port_window - 1)
1022          * we instruct the sDMA that after a frame is processed
1023          * it should move back to the start of the window.
1024          */
1025         d->fi = -(port_window_bytes - 1);
1026     }
1027 
1028     d->ccr = c->ccr | CCR_SYNC_FRAME;
1029     if (dir == DMA_DEV_TO_MEM) {
1030         d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
1031 
1032         d->ccr |= CCR_DST_AMODE_POSTINC;
1033         if (port_window) {
1034             d->ccr |= CCR_SRC_AMODE_DBLIDX;
1035 
1036             if (port_window_bytes >= 64)
1037                 d->csdp |= CSDP_SRC_BURST_64;
1038             else if (port_window_bytes >= 32)
1039                 d->csdp |= CSDP_SRC_BURST_32;
1040             else if (port_window_bytes >= 16)
1041                 d->csdp |= CSDP_SRC_BURST_16;
1042 
1043         } else {
1044             d->ccr |= CCR_SRC_AMODE_CONSTANT;
1045         }
1046     } else {
1047         d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
1048 
1049         d->ccr |= CCR_SRC_AMODE_POSTINC;
1050         if (port_window) {
1051             d->ccr |= CCR_DST_AMODE_DBLIDX;
1052 
1053             if (port_window_bytes >= 64)
1054                 d->csdp |= CSDP_DST_BURST_64;
1055             else if (port_window_bytes >= 32)
1056                 d->csdp |= CSDP_DST_BURST_32;
1057             else if (port_window_bytes >= 16)
1058                 d->csdp |= CSDP_DST_BURST_16;
1059         } else {
1060             d->ccr |= CCR_DST_AMODE_CONSTANT;
1061         }
1062     }
1063 
1064     d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
1065     d->csdp |= es;
1066 
1067     if (dma_omap1()) {
1068         d->cicr |= CICR_TOUT_IE;
1069 
1070         if (dir == DMA_DEV_TO_MEM)
1071             d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
1072         else
1073             d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
1074     } else {
1075         if (dir == DMA_DEV_TO_MEM)
1076             d->ccr |= CCR_TRIGGER_SRC;
1077 
1078         d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1079 
1080         if (port_window)
1081             d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
1082     }
1083     if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
1084         d->clnk_ctrl = c->dma_ch;
1085 
1086     /*
1087      * Build our scatterlist entries: each contains the address,
1088      * the number of elements (EN) in each frame, and the number of
1089      * frames (FN).  Number of bytes for this entry = ES * EN * FN.
1090      *
1091      * Burst size translates to number of elements with frame sync.
1092      * Note: DMA engine defines burst to be the number of dev-width
1093      * transfers.
1094      */
1095     en = burst;
1096     frame_bytes = es_bytes[es] * en;
1097 
1098     if (sglen >= 2)
1099         d->using_ll = od->ll123_supported;
1100 
1101     for_each_sg(sgl, sgent, sglen, i) {
1102         struct omap_sg *osg = &d->sg[i];
1103 
1104         osg->addr = sg_dma_address(sgent);
1105         osg->en = en;
1106         osg->fn = sg_dma_len(sgent) / frame_bytes;
1107 
1108         if (d->using_ll) {
1109             osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
1110                               &osg->t2_desc_paddr);
1111             if (!osg->t2_desc) {
1112                 dev_err(chan->device->dev,
1113                     "t2_desc[%d] allocation failed\n", i);
1114                 ll_failed = true;
1115                 d->using_ll = false;
1116                 continue;
1117             }
1118 
1119             omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1));
1120         }
1121     }
1122 
1123     d->sglen = sglen;
1124 
1125     /* Release the dma_pool entries if one allocation failed */
1126     if (ll_failed) {
1127         for (i = 0; i < d->sglen; i++) {
1128             struct omap_sg *osg = &d->sg[i];
1129 
1130             if (osg->t2_desc) {
1131                 dma_pool_free(od->desc_pool, osg->t2_desc,
1132                           osg->t2_desc_paddr);
1133                 osg->t2_desc = NULL;
1134             }
1135         }
1136     }
1137 
1138     return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1139 }
1140 
1141 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
1142     struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1143     size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
1144 {
1145     struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1146     struct omap_chan *c = to_omap_dma_chan(chan);
1147     enum dma_slave_buswidth dev_width;
1148     struct omap_desc *d;
1149     dma_addr_t dev_addr;
1150     unsigned es;
1151     u32 burst;
1152 
1153     if (dir == DMA_DEV_TO_MEM) {
1154         dev_addr = c->cfg.src_addr;
1155         dev_width = c->cfg.src_addr_width;
1156         burst = c->cfg.src_maxburst;
1157     } else if (dir == DMA_MEM_TO_DEV) {
1158         dev_addr = c->cfg.dst_addr;
1159         dev_width = c->cfg.dst_addr_width;
1160         burst = c->cfg.dst_maxburst;
1161     } else {
1162         dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
1163         return NULL;
1164     }
1165 
1166     /* Bus width translates to the element size (ES) */
1167     switch (dev_width) {
1168     case DMA_SLAVE_BUSWIDTH_1_BYTE:
1169         es = CSDP_DATA_TYPE_8;
1170         break;
1171     case DMA_SLAVE_BUSWIDTH_2_BYTES:
1172         es = CSDP_DATA_TYPE_16;
1173         break;
1174     case DMA_SLAVE_BUSWIDTH_4_BYTES:
1175         es = CSDP_DATA_TYPE_32;
1176         break;
1177     default: /* not reached */
1178         return NULL;
1179     }
1180 
1181     /* Now allocate and setup the descriptor. */
1182     d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1183     if (!d)
1184         return NULL;
1185 
1186     d->dir = dir;
1187     d->dev_addr = dev_addr;
1188     d->fi = burst;
1189     d->es = es;
1190     d->sg[0].addr = buf_addr;
1191     d->sg[0].en = period_len / es_bytes[es];
1192     d->sg[0].fn = buf_len / period_len;
1193     d->sglen = 1;
1194 
1195     d->ccr = c->ccr;
1196     if (dir == DMA_DEV_TO_MEM)
1197         d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
1198     else
1199         d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
1200 
1201     d->cicr = CICR_DROP_IE;
1202     if (flags & DMA_PREP_INTERRUPT)
1203         d->cicr |= CICR_FRAME_IE;
1204 
1205     d->csdp = es;
1206 
1207     if (dma_omap1()) {
1208         d->cicr |= CICR_TOUT_IE;
1209 
1210         if (dir == DMA_DEV_TO_MEM)
1211             d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
1212         else
1213             d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
1214     } else {
1215         if (burst)
1216             d->ccr |= CCR_SYNC_PACKET;
1217         else
1218             d->ccr |= CCR_SYNC_ELEMENT;
1219 
1220         if (dir == DMA_DEV_TO_MEM) {
1221             d->ccr |= CCR_TRIGGER_SRC;
1222             d->csdp |= CSDP_DST_PACKED;
1223         } else {
1224             d->csdp |= CSDP_SRC_PACKED;
1225         }
1226 
1227         d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1228 
1229         d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1230     }
1231 
1232     if (__dma_omap15xx(od->plat->dma_attr))
1233         d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
1234     else
1235         d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
1236 
1237     c->cyclic = true;
1238 
1239     return vchan_tx_prep(&c->vc, &d->vd, flags);
1240 }
1241 
1242 static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
1243     struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1244     size_t len, unsigned long tx_flags)
1245 {
1246     struct omap_chan *c = to_omap_dma_chan(chan);
1247     struct omap_desc *d;
1248     uint8_t data_type;
1249 
1250     d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1251     if (!d)
1252         return NULL;
1253 
1254     data_type = __ffs((src | dest | len));
1255     if (data_type > CSDP_DATA_TYPE_32)
1256         data_type = CSDP_DATA_TYPE_32;
1257 
1258     d->dir = DMA_MEM_TO_MEM;
1259     d->dev_addr = src;
1260     d->fi = 0;
1261     d->es = data_type;
1262     d->sg[0].en = len / BIT(data_type);
1263     d->sg[0].fn = 1;
1264     d->sg[0].addr = dest;
1265     d->sglen = 1;
1266     d->ccr = c->ccr;
1267     d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
1268 
1269     if (tx_flags & DMA_PREP_INTERRUPT)
1270         d->cicr |= CICR_FRAME_IE;
1271     else
1272         d->polled = true;
1273 
1274     d->csdp = data_type;
1275 
1276     if (dma_omap1()) {
1277         d->cicr |= CICR_TOUT_IE;
1278         d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1279     } else {
1280         d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1281         d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1282         d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1283     }
1284 
1285     return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1286 }
1287 
1288 static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1289     struct dma_chan *chan, struct dma_interleaved_template *xt,
1290     unsigned long flags)
1291 {
1292     struct omap_chan *c = to_omap_dma_chan(chan);
1293     struct omap_desc *d;
1294     struct omap_sg *sg;
1295     uint8_t data_type;
1296     size_t src_icg, dst_icg;
1297 
1298     /* Slave mode is not supported */
1299     if (is_slave_direction(xt->dir))
1300         return NULL;
1301 
1302     if (xt->frame_size != 1 || xt->numf == 0)
1303         return NULL;
1304 
1305     d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1306     if (!d)
1307         return NULL;
1308 
1309     data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
1310     if (data_type > CSDP_DATA_TYPE_32)
1311         data_type = CSDP_DATA_TYPE_32;
1312 
1313     sg = &d->sg[0];
1314     d->dir = DMA_MEM_TO_MEM;
1315     d->dev_addr = xt->src_start;
1316     d->es = data_type;
1317     sg->en = xt->sgl[0].size / BIT(data_type);
1318     sg->fn = xt->numf;
1319     sg->addr = xt->dst_start;
1320     d->sglen = 1;
1321     d->ccr = c->ccr;
1322 
1323     src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
1324     dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
1325     if (src_icg) {
1326         d->ccr |= CCR_SRC_AMODE_DBLIDX;
1327         d->ei = 1;
1328         d->fi = src_icg + 1;
1329     } else if (xt->src_inc) {
1330         d->ccr |= CCR_SRC_AMODE_POSTINC;
1331         d->fi = 0;
1332     } else {
1333         dev_err(chan->device->dev,
1334             "%s: SRC constant addressing is not supported\n",
1335             __func__);
1336         kfree(d);
1337         return NULL;
1338     }
1339 
1340     if (dst_icg) {
1341         d->ccr |= CCR_DST_AMODE_DBLIDX;
1342         sg->ei = 1;
1343         sg->fi = dst_icg + 1;
1344     } else if (xt->dst_inc) {
1345         d->ccr |= CCR_DST_AMODE_POSTINC;
1346         sg->fi = 0;
1347     } else {
1348         dev_err(chan->device->dev,
1349             "%s: DST constant addressing is not supported\n",
1350             __func__);
1351         kfree(d);
1352         return NULL;
1353     }
1354 
1355     d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
1356 
1357     d->csdp = data_type;
1358 
1359     if (dma_omap1()) {
1360         d->cicr |= CICR_TOUT_IE;
1361         d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1362     } else {
1363         d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1364         d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1365         d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1366     }
1367 
1368     return vchan_tx_prep(&c->vc, &d->vd, flags);
1369 }
1370 
1371 static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
1372 {
1373     struct omap_chan *c = to_omap_dma_chan(chan);
1374 
1375     if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1376         cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1377         return -EINVAL;
1378 
1379     if (cfg->src_maxburst > chan->device->max_burst ||
1380         cfg->dst_maxburst > chan->device->max_burst)
1381         return -EINVAL;
1382 
1383     memcpy(&c->cfg, cfg, sizeof(c->cfg));
1384 
1385     return 0;
1386 }
1387 
1388 static int omap_dma_terminate_all(struct dma_chan *chan)
1389 {
1390     struct omap_chan *c = to_omap_dma_chan(chan);
1391     unsigned long flags;
1392     LIST_HEAD(head);
1393 
1394     spin_lock_irqsave(&c->vc.lock, flags);
1395 
1396     /*
1397      * Stop DMA activity: we assume the callback will not be called
1398      * after omap_dma_stop() returns (even if it does, it will see
1399      * c->desc is NULL and exit.)
1400      */
1401     if (c->desc) {
1402         vchan_terminate_vdesc(&c->desc->vd);
1403         c->desc = NULL;
1404         /* Avoid stopping the dma twice */
1405         if (!c->paused)
1406             omap_dma_stop(c);
1407     }
1408 
1409     c->cyclic = false;
1410     c->paused = false;
1411 
1412     vchan_get_all_descriptors(&c->vc, &head);
1413     spin_unlock_irqrestore(&c->vc.lock, flags);
1414     vchan_dma_desc_free_list(&c->vc, &head);
1415 
1416     return 0;
1417 }
1418 
1419 static void omap_dma_synchronize(struct dma_chan *chan)
1420 {
1421     struct omap_chan *c = to_omap_dma_chan(chan);
1422 
1423     vchan_synchronize(&c->vc);
1424 }
1425 
1426 static int omap_dma_pause(struct dma_chan *chan)
1427 {
1428     struct omap_chan *c = to_omap_dma_chan(chan);
1429     struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1430     unsigned long flags;
1431     int ret = -EINVAL;
1432     bool can_pause = false;
1433 
1434     spin_lock_irqsave(&od->irq_lock, flags);
1435 
1436     if (!c->desc)
1437         goto out;
1438 
1439     if (c->cyclic)
1440         can_pause = true;
1441 
1442     /*
1443      * We do not allow DMA_MEM_TO_DEV transfers to be paused.
1444      * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
1445      * "When a channel is disabled during a transfer, the channel undergoes
1446      * an abort, unless it is hardware-source-synchronized …".
1447      * A source-synchronised channel is one where the fetching of data is
1448      * under control of the device. In other words, a device-to-memory
1449      * transfer. So, a destination-synchronised channel (which would be a
1450      * memory-to-device transfer) undergoes an abort if the CCR_ENABLE
1451      * bit is cleared.
1452      * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
1453      * aborts immediately after completion of current read/write
1454      * transactions and then the FIFO is cleaned up." The term "cleaned up"
1455      * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
1456      * are both clear _before_ disabling the channel, otherwise data loss
1457      * will occur.
1458      * The problem is that if the channel is active, then device activity
1459      * can result in DMA activity starting between reading those as both
1460      * clear and the write to DMA_CCR to clear the enable bit hitting the
1461      * hardware. If the DMA hardware can't drain the data in its FIFO to the
1462      * destination, then data loss "might" occur (say if we write to an UART
1463      * and the UART is not accepting any further data).
1464      */
1465     else if (c->desc->dir == DMA_DEV_TO_MEM)
1466         can_pause = true;
1467 
1468     if (can_pause && !c->paused) {
1469         ret = omap_dma_stop(c);
1470         if (!ret)
1471             c->paused = true;
1472     }
1473 out:
1474     spin_unlock_irqrestore(&od->irq_lock, flags);
1475 
1476     return ret;
1477 }
1478 
1479 static int omap_dma_resume(struct dma_chan *chan)
1480 {
1481     struct omap_chan *c = to_omap_dma_chan(chan);
1482     struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1483     unsigned long flags;
1484     int ret = -EINVAL;
1485 
1486     spin_lock_irqsave(&od->irq_lock, flags);
1487 
1488     if (c->paused && c->desc) {
1489         mb();
1490 
1491         /* Restore channel link register */
1492         omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
1493 
1494         omap_dma_start(c, c->desc);
1495         c->paused = false;
1496         ret = 0;
1497     }
1498     spin_unlock_irqrestore(&od->irq_lock, flags);
1499 
1500     return ret;
1501 }
1502 
1503 static int omap_dma_chan_init(struct omap_dmadev *od)
1504 {
1505     struct omap_chan *c;
1506 
1507     c = kzalloc(sizeof(*c), GFP_KERNEL);
1508     if (!c)
1509         return -ENOMEM;
1510 
1511     c->reg_map = od->reg_map;
1512     c->vc.desc_free = omap_dma_desc_free;
1513     vchan_init(&c->vc, &od->ddev);
1514 
1515     return 0;
1516 }
1517 
1518 static void omap_dma_free(struct omap_dmadev *od)
1519 {
1520     while (!list_empty(&od->ddev.channels)) {
1521         struct omap_chan *c = list_first_entry(&od->ddev.channels,
1522             struct omap_chan, vc.chan.device_node);
1523 
1524         list_del(&c->vc.chan.device_node);
1525         tasklet_kill(&c->vc.task);
1526         kfree(c);
1527     }
1528 }
1529 
1530 /* Currently used by omap2 & 3 to block deeper SoC idle states */
1531 static bool omap_dma_busy(struct omap_dmadev *od)
1532 {
1533     struct omap_chan *c;
1534     int lch = -1;
1535 
1536     while (1) {
1537         lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1);
1538         if (lch >= od->lch_count)
1539             break;
1540         c = od->lch_map[lch];
1541         if (!c)
1542             continue;
1543         if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
1544             return true;
1545     }
1546 
1547     return false;
1548 }
1549 
1550 /* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
1551 static int omap_dma_busy_notifier(struct notifier_block *nb,
1552                   unsigned long cmd, void *v)
1553 {
1554     struct omap_dmadev *od;
1555 
1556     od = container_of(nb, struct omap_dmadev, nb);
1557 
1558     switch (cmd) {
1559     case CPU_CLUSTER_PM_ENTER:
1560         if (omap_dma_busy(od))
1561             return NOTIFY_BAD;
1562         break;
1563     case CPU_CLUSTER_PM_ENTER_FAILED:
1564     case CPU_CLUSTER_PM_EXIT:
1565         break;
1566     }
1567 
1568     return NOTIFY_OK;
1569 }
1570 
1571 /*
1572  * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0.
1573  * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for
1574  * now. Context save seems to be only currently needed on omap3.
1575  */
1576 static void omap_dma_context_save(struct omap_dmadev *od)
1577 {
1578     od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0);
1579     od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1);
1580     od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
1581     od->context.gcr = omap_dma_glbl_read(od, GCR);
1582 }
1583 
1584 static void omap_dma_context_restore(struct omap_dmadev *od)
1585 {
1586     int i;
1587 
1588     omap_dma_glbl_write(od, GCR, od->context.gcr);
1589     omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig);
1590     omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0);
1591     omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1);
1592 
1593     /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */
1594     if (od->plat->errata & DMA_ROMCODE_BUG)
1595         omap_dma_glbl_write(od, IRQSTATUS_L0, 0);
1596 
1597     /* Clear dma channels */
1598     for (i = 0; i < od->lch_count; i++)
1599         omap_dma_clear_lch(od, i);
1600 }
1601 
1602 /* Currently only used for omap3 */
1603 static int omap_dma_context_notifier(struct notifier_block *nb,
1604                      unsigned long cmd, void *v)
1605 {
1606     struct omap_dmadev *od;
1607 
1608     od = container_of(nb, struct omap_dmadev, nb);
1609 
1610     switch (cmd) {
1611     case CPU_CLUSTER_PM_ENTER:
1612         if (omap_dma_busy(od))
1613             return NOTIFY_BAD;
1614         omap_dma_context_save(od);
1615         break;
1616     case CPU_CLUSTER_PM_ENTER_FAILED:   /* No need to restore context */
1617         break;
1618     case CPU_CLUSTER_PM_EXIT:
1619         omap_dma_context_restore(od);
1620         break;
1621     }
1622 
1623     return NOTIFY_OK;
1624 }
1625 
1626 static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate,
1627                   int max_fifo_depth, int tparams)
1628 {
1629     u32 val;
1630 
1631     /* Set only for omap2430 and later */
1632     if (!od->cfg->rw_priority)
1633         return;
1634 
1635     if (max_fifo_depth == 0)
1636         max_fifo_depth = 1;
1637     if (arb_rate == 0)
1638         arb_rate = 1;
1639 
1640     val = 0xff & max_fifo_depth;
1641     val |= (0x3 & tparams) << 12;
1642     val |= (arb_rate & 0xff) << 16;
1643 
1644     omap_dma_glbl_write(od, GCR, val);
1645 }
1646 
1647 #define OMAP_DMA_BUSWIDTHS  (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1648                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1649                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1650 
1651 /*
1652  * No flags currently set for default configuration as omap1 is still
1653  * using platform data.
1654  */
1655 static const struct omap_dma_config default_cfg;
1656 
1657 static int omap_dma_probe(struct platform_device *pdev)
1658 {
1659     const struct omap_dma_config *conf;
1660     struct omap_dmadev *od;
1661     struct resource *res;
1662     int rc, i, irq;
1663     u32 val;
1664 
1665     od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1666     if (!od)
1667         return -ENOMEM;
1668 
1669     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1670     od->base = devm_ioremap_resource(&pdev->dev, res);
1671     if (IS_ERR(od->base))
1672         return PTR_ERR(od->base);
1673 
1674     conf = of_device_get_match_data(&pdev->dev);
1675     if (conf) {
1676         od->cfg = conf;
1677         od->plat = dev_get_platdata(&pdev->dev);
1678         if (!od->plat) {
1679             dev_err(&pdev->dev, "omap_system_dma_plat_info is missing");
1680             return -ENODEV;
1681         }
1682     } else if (IS_ENABLED(CONFIG_ARCH_OMAP1)) {
1683         od->cfg = &default_cfg;
1684 
1685         od->plat = omap_get_plat_info();
1686         if (!od->plat)
1687             return -EPROBE_DEFER;
1688     } else {
1689         return -ENODEV;
1690     }
1691 
1692     od->reg_map = od->plat->reg_map;
1693 
1694     dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
1695     dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
1696     dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
1697     dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
1698     od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
1699     od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
1700     od->ddev.device_tx_status = omap_dma_tx_status;
1701     od->ddev.device_issue_pending = omap_dma_issue_pending;
1702     od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1703     od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1704     od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
1705     od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
1706     od->ddev.device_config = omap_dma_slave_config;
1707     od->ddev.device_pause = omap_dma_pause;
1708     od->ddev.device_resume = omap_dma_resume;
1709     od->ddev.device_terminate_all = omap_dma_terminate_all;
1710     od->ddev.device_synchronize = omap_dma_synchronize;
1711     od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1712     od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1713     od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1714     if (__dma_omap15xx(od->plat->dma_attr))
1715         od->ddev.residue_granularity =
1716                 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1717     else
1718         od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1719     od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
1720     od->ddev.dev = &pdev->dev;
1721     INIT_LIST_HEAD(&od->ddev.channels);
1722     mutex_init(&od->lch_lock);
1723     spin_lock_init(&od->lock);
1724     spin_lock_init(&od->irq_lock);
1725 
1726     /* Number of DMA requests */
1727     od->dma_requests = OMAP_SDMA_REQUESTS;
1728     if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
1729                               "dma-requests",
1730                               &od->dma_requests)) {
1731         dev_info(&pdev->dev,
1732              "Missing dma-requests property, using %u.\n",
1733              OMAP_SDMA_REQUESTS);
1734     }
1735 
1736     /* Number of available logical channels */
1737     if (!pdev->dev.of_node) {
1738         od->lch_count = od->plat->dma_attr->lch_count;
1739         if (unlikely(!od->lch_count))
1740             od->lch_count = OMAP_SDMA_CHANNELS;
1741     } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
1742                     &od->lch_count)) {
1743         dev_info(&pdev->dev,
1744              "Missing dma-channels property, using %u.\n",
1745              OMAP_SDMA_CHANNELS);
1746         od->lch_count = OMAP_SDMA_CHANNELS;
1747     }
1748 
1749     /* Mask of allowed logical channels */
1750     if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node,
1751                                "dma-channel-mask",
1752                                &val)) {
1753         /* Tag channels not in mask as reserved */
1754         val = ~val;
1755         bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count);
1756     }
1757     if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED)
1758         bitmap_set(od->lch_bitmap, 0, 2);
1759 
1760     od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count,
1761                    sizeof(*od->lch_map),
1762                    GFP_KERNEL);
1763     if (!od->lch_map)
1764         return -ENOMEM;
1765 
1766     for (i = 0; i < od->dma_requests; i++) {
1767         rc = omap_dma_chan_init(od);
1768         if (rc) {
1769             omap_dma_free(od);
1770             return rc;
1771         }
1772     }
1773 
1774     irq = platform_get_irq(pdev, 1);
1775     if (irq <= 0) {
1776         dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
1777         od->legacy = true;
1778     } else {
1779         /* Disable all interrupts */
1780         od->irq_enable_mask = 0;
1781         omap_dma_glbl_write(od, IRQENABLE_L1, 0);
1782 
1783         rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1784                       IRQF_SHARED, "omap-dma-engine", od);
1785         if (rc) {
1786             omap_dma_free(od);
1787             return rc;
1788         }
1789     }
1790 
1791     if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
1792         od->ll123_supported = true;
1793 
1794     od->ddev.filter.map = od->plat->slave_map;
1795     od->ddev.filter.mapcnt = od->plat->slavecnt;
1796     od->ddev.filter.fn = omap_dma_filter_fn;
1797 
1798     if (od->ll123_supported) {
1799         od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
1800                         &pdev->dev,
1801                         sizeof(struct omap_type2_desc),
1802                         4, 0);
1803         if (!od->desc_pool) {
1804             dev_err(&pdev->dev,
1805                 "unable to allocate descriptor pool\n");
1806             od->ll123_supported = false;
1807         }
1808     }
1809 
1810     rc = dma_async_device_register(&od->ddev);
1811     if (rc) {
1812         pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1813             rc);
1814         omap_dma_free(od);
1815         return rc;
1816     }
1817 
1818     platform_set_drvdata(pdev, od);
1819 
1820     if (pdev->dev.of_node) {
1821         omap_dma_info.dma_cap = od->ddev.cap_mask;
1822 
1823         /* Device-tree DMA controller registration */
1824         rc = of_dma_controller_register(pdev->dev.of_node,
1825                 of_dma_simple_xlate, &omap_dma_info);
1826         if (rc) {
1827             pr_warn("OMAP-DMA: failed to register DMA controller\n");
1828             dma_async_device_unregister(&od->ddev);
1829             omap_dma_free(od);
1830         }
1831     }
1832 
1833     omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0);
1834 
1835     if (od->cfg->needs_busy_check) {
1836         od->nb.notifier_call = omap_dma_busy_notifier;
1837         cpu_pm_register_notifier(&od->nb);
1838     } else if (od->cfg->may_lose_context) {
1839         od->nb.notifier_call = omap_dma_context_notifier;
1840         cpu_pm_register_notifier(&od->nb);
1841     }
1842 
1843     dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
1844          od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
1845 
1846     return rc;
1847 }
1848 
1849 static int omap_dma_remove(struct platform_device *pdev)
1850 {
1851     struct omap_dmadev *od = platform_get_drvdata(pdev);
1852     int irq;
1853 
1854     if (od->cfg->may_lose_context)
1855         cpu_pm_unregister_notifier(&od->nb);
1856 
1857     if (pdev->dev.of_node)
1858         of_dma_controller_free(pdev->dev.of_node);
1859 
1860     irq = platform_get_irq(pdev, 1);
1861     devm_free_irq(&pdev->dev, irq, od);
1862 
1863     dma_async_device_unregister(&od->ddev);
1864 
1865     if (!omap_dma_legacy(od)) {
1866         /* Disable all interrupts */
1867         omap_dma_glbl_write(od, IRQENABLE_L0, 0);
1868     }
1869 
1870     if (od->ll123_supported)
1871         dma_pool_destroy(od->desc_pool);
1872 
1873     omap_dma_free(od);
1874 
1875     return 0;
1876 }
1877 
1878 static const struct omap_dma_config omap2420_data = {
1879     .lch_end = CCFN,
1880     .rw_priority = true,
1881     .needs_lch_clear = true,
1882     .needs_busy_check = true,
1883 };
1884 
1885 static const struct omap_dma_config omap2430_data = {
1886     .lch_end = CCFN,
1887     .rw_priority = true,
1888     .needs_lch_clear = true,
1889 };
1890 
1891 static const struct omap_dma_config omap3430_data = {
1892     .lch_end = CCFN,
1893     .rw_priority = true,
1894     .needs_lch_clear = true,
1895     .may_lose_context = true,
1896 };
1897 
1898 static const struct omap_dma_config omap3630_data = {
1899     .lch_end = CCDN,
1900     .rw_priority = true,
1901     .needs_lch_clear = true,
1902     .may_lose_context = true,
1903 };
1904 
1905 static const struct omap_dma_config omap4_data = {
1906     .lch_end = CCDN,
1907     .rw_priority = true,
1908     .needs_lch_clear = true,
1909 };
1910 
1911 static const struct of_device_id omap_dma_match[] = {
1912     { .compatible = "ti,omap2420-sdma", .data = &omap2420_data, },
1913     { .compatible = "ti,omap2430-sdma", .data = &omap2430_data, },
1914     { .compatible = "ti,omap3430-sdma", .data = &omap3430_data, },
1915     { .compatible = "ti,omap3630-sdma", .data = &omap3630_data, },
1916     { .compatible = "ti,omap4430-sdma", .data = &omap4_data, },
1917     {},
1918 };
1919 MODULE_DEVICE_TABLE(of, omap_dma_match);
1920 
1921 static struct platform_driver omap_dma_driver = {
1922     .probe  = omap_dma_probe,
1923     .remove = omap_dma_remove,
1924     .driver = {
1925         .name = "omap-dma-engine",
1926         .of_match_table = omap_dma_match,
1927     },
1928 };
1929 
1930 static bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1931 {
1932     if (chan->device->dev->driver == &omap_dma_driver.driver) {
1933         struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1934         struct omap_chan *c = to_omap_dma_chan(chan);
1935         unsigned req = *(unsigned *)param;
1936 
1937         if (req <= od->dma_requests) {
1938             c->dma_sig = req;
1939             return true;
1940         }
1941     }
1942     return false;
1943 }
1944 
1945 static int omap_dma_init(void)
1946 {
1947     return platform_driver_register(&omap_dma_driver);
1948 }
1949 subsys_initcall(omap_dma_init);
1950 
1951 static void __exit omap_dma_exit(void)
1952 {
1953     platform_driver_unregister(&omap_dma_driver);
1954 }
1955 module_exit(omap_dma_exit);
1956 
1957 MODULE_AUTHOR("Russell King");
1958 MODULE_LICENSE("GPL");