0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/err.h>
0009 #include <linux/module.h>
0010 #include <linux/init.h>
0011 #include <linux/types.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/slab.h>
0015 #include <linux/dmaengine.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/device.h>
0018 #include <linux/platform_data/dma-mmp_tdma.h>
0019 #include <linux/of_device.h>
0020 #include <linux/of_dma.h>
0021
0022 #include "dmaengine.h"
0023
0024
0025
0026
0027 #define TDBCR 0x00
0028 #define TDSAR 0x10
0029 #define TDDAR 0x20
0030 #define TDNDPR 0x30
0031 #define TDCR 0x40
0032 #define TDCP 0x60
0033 #define TDCDPR 0x70
0034 #define TDIMR 0x80
0035 #define TDISR 0xa0
0036
0037
0038 #define TDCR_SSZ_8_BITS (0x0 << 22)
0039 #define TDCR_SSZ_12_BITS (0x1 << 22)
0040 #define TDCR_SSZ_16_BITS (0x2 << 22)
0041 #define TDCR_SSZ_20_BITS (0x3 << 22)
0042 #define TDCR_SSZ_24_BITS (0x4 << 22)
0043 #define TDCR_SSZ_32_BITS (0x5 << 22)
0044 #define TDCR_SSZ_SHIFT (0x1 << 22)
0045 #define TDCR_SSZ_MASK (0x7 << 22)
0046 #define TDCR_SSPMOD (0x1 << 21)
0047 #define TDCR_ABR (0x1 << 20)
0048 #define TDCR_CDE (0x1 << 17)
0049 #define TDCR_PACKMOD (0x1 << 16)
0050 #define TDCR_CHANACT (0x1 << 14)
0051 #define TDCR_FETCHND (0x1 << 13)
0052 #define TDCR_CHANEN (0x1 << 12)
0053 #define TDCR_INTMODE (0x1 << 10)
0054 #define TDCR_CHAINMOD (0x1 << 9)
0055 #define TDCR_BURSTSZ_MSK (0x7 << 6)
0056 #define TDCR_BURSTSZ_4B (0x0 << 6)
0057 #define TDCR_BURSTSZ_8B (0x1 << 6)
0058 #define TDCR_BURSTSZ_16B (0x3 << 6)
0059 #define TDCR_BURSTSZ_32B (0x6 << 6)
0060 #define TDCR_BURSTSZ_64B (0x7 << 6)
0061 #define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
0062 #define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
0063 #define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
0064 #define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
0065 #define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
0066 #define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
0067 #define TDCR_BURSTSZ_128B (0x5 << 6)
0068 #define TDCR_DSTDIR_MSK (0x3 << 4)
0069 #define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4)
0070 #define TDCR_DSTDIR_ADDR_INC (0x0 << 4)
0071 #define TDCR_SRCDIR_MSK (0x3 << 2)
0072 #define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2)
0073 #define TDCR_SRCDIR_ADDR_INC (0x0 << 2)
0074 #define TDCR_DSTDESCCONT (0x1 << 1)
0075 #define TDCR_SRCDESTCONT (0x1 << 0)
0076
0077
0078 #define TDIMR_COMP (0x1 << 0)
0079
0080
0081 #define TDISR_COMP (0x1 << 0)
0082
0083
0084
0085
0086
0087 struct mmp_tdma_desc {
0088 u32 byte_cnt;
0089 u32 src_addr;
0090 u32 dst_addr;
0091 u32 nxt_desc;
0092 };
0093
0094 enum mmp_tdma_type {
0095 MMP_AUD_TDMA = 0,
0096 PXA910_SQU,
0097 };
0098
0099 #define TDMA_MAX_XFER_BYTES SZ_64K
0100
0101 struct mmp_tdma_chan {
0102 struct device *dev;
0103 struct dma_chan chan;
0104 struct dma_async_tx_descriptor desc;
0105 struct tasklet_struct tasklet;
0106
0107 struct mmp_tdma_desc *desc_arr;
0108 dma_addr_t desc_arr_phys;
0109 int desc_num;
0110 enum dma_transfer_direction dir;
0111 dma_addr_t dev_addr;
0112 u32 burst_sz;
0113 enum dma_slave_buswidth buswidth;
0114 enum dma_status status;
0115 struct dma_slave_config slave_config;
0116
0117 int idx;
0118 enum mmp_tdma_type type;
0119 int irq;
0120 void __iomem *reg_base;
0121
0122 size_t buf_len;
0123 size_t period_len;
0124 size_t pos;
0125
0126 struct gen_pool *pool;
0127 };
0128
0129 #define TDMA_CHANNEL_NUM 2
0130 struct mmp_tdma_device {
0131 struct device *dev;
0132 void __iomem *base;
0133 struct dma_device device;
0134 struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
0135 };
0136
0137 #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
0138
0139 static int mmp_tdma_config_write(struct dma_chan *chan,
0140 enum dma_transfer_direction dir,
0141 struct dma_slave_config *dmaengine_cfg);
0142
0143 static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
0144 {
0145 writel(phys, tdmac->reg_base + TDNDPR);
0146 writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND,
0147 tdmac->reg_base + TDCR);
0148 }
0149
0150 static void mmp_tdma_enable_irq(struct mmp_tdma_chan *tdmac, bool enable)
0151 {
0152 if (enable)
0153 writel(TDIMR_COMP, tdmac->reg_base + TDIMR);
0154 else
0155 writel(0, tdmac->reg_base + TDIMR);
0156 }
0157
0158 static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
0159 {
0160
0161 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
0162 tdmac->reg_base + TDCR);
0163 tdmac->status = DMA_IN_PROGRESS;
0164 }
0165
0166 static int mmp_tdma_disable_chan(struct dma_chan *chan)
0167 {
0168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0169 u32 tdcr;
0170
0171 tdcr = readl(tdmac->reg_base + TDCR);
0172 tdcr |= TDCR_ABR;
0173 tdcr &= ~TDCR_CHANEN;
0174 writel(tdcr, tdmac->reg_base + TDCR);
0175
0176 tdmac->status = DMA_COMPLETE;
0177
0178 return 0;
0179 }
0180
0181 static int mmp_tdma_resume_chan(struct dma_chan *chan)
0182 {
0183 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0184
0185 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
0186 tdmac->reg_base + TDCR);
0187 tdmac->status = DMA_IN_PROGRESS;
0188
0189 return 0;
0190 }
0191
0192 static int mmp_tdma_pause_chan(struct dma_chan *chan)
0193 {
0194 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0195
0196 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
0197 tdmac->reg_base + TDCR);
0198 tdmac->status = DMA_PAUSED;
0199
0200 return 0;
0201 }
0202
0203 static int mmp_tdma_config_chan(struct dma_chan *chan)
0204 {
0205 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0206 unsigned int tdcr = 0;
0207
0208 mmp_tdma_disable_chan(chan);
0209
0210 if (tdmac->dir == DMA_MEM_TO_DEV)
0211 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
0212 else if (tdmac->dir == DMA_DEV_TO_MEM)
0213 tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC;
0214
0215 if (tdmac->type == MMP_AUD_TDMA) {
0216 tdcr |= TDCR_PACKMOD;
0217
0218 switch (tdmac->burst_sz) {
0219 case 4:
0220 tdcr |= TDCR_BURSTSZ_4B;
0221 break;
0222 case 8:
0223 tdcr |= TDCR_BURSTSZ_8B;
0224 break;
0225 case 16:
0226 tdcr |= TDCR_BURSTSZ_16B;
0227 break;
0228 case 32:
0229 tdcr |= TDCR_BURSTSZ_32B;
0230 break;
0231 case 64:
0232 tdcr |= TDCR_BURSTSZ_64B;
0233 break;
0234 case 128:
0235 tdcr |= TDCR_BURSTSZ_128B;
0236 break;
0237 default:
0238 dev_err(tdmac->dev, "unknown burst size.\n");
0239 return -EINVAL;
0240 }
0241
0242 switch (tdmac->buswidth) {
0243 case DMA_SLAVE_BUSWIDTH_1_BYTE:
0244 tdcr |= TDCR_SSZ_8_BITS;
0245 break;
0246 case DMA_SLAVE_BUSWIDTH_2_BYTES:
0247 tdcr |= TDCR_SSZ_16_BITS;
0248 break;
0249 case DMA_SLAVE_BUSWIDTH_4_BYTES:
0250 tdcr |= TDCR_SSZ_32_BITS;
0251 break;
0252 default:
0253 dev_err(tdmac->dev, "unknown bus size.\n");
0254 return -EINVAL;
0255 }
0256 } else if (tdmac->type == PXA910_SQU) {
0257 tdcr |= TDCR_SSPMOD;
0258
0259 switch (tdmac->burst_sz) {
0260 case 1:
0261 tdcr |= TDCR_BURSTSZ_SQU_1B;
0262 break;
0263 case 2:
0264 tdcr |= TDCR_BURSTSZ_SQU_2B;
0265 break;
0266 case 4:
0267 tdcr |= TDCR_BURSTSZ_SQU_4B;
0268 break;
0269 case 8:
0270 tdcr |= TDCR_BURSTSZ_SQU_8B;
0271 break;
0272 case 16:
0273 tdcr |= TDCR_BURSTSZ_SQU_16B;
0274 break;
0275 case 32:
0276 tdcr |= TDCR_BURSTSZ_SQU_32B;
0277 break;
0278 default:
0279 dev_err(tdmac->dev, "unknown burst size.\n");
0280 return -EINVAL;
0281 }
0282 }
0283
0284 writel(tdcr, tdmac->reg_base + TDCR);
0285 return 0;
0286 }
0287
0288 static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
0289 {
0290 u32 reg = readl(tdmac->reg_base + TDISR);
0291
0292 if (reg & TDISR_COMP) {
0293
0294 reg &= ~TDISR_COMP;
0295 writel(reg, tdmac->reg_base + TDISR);
0296
0297 return 0;
0298 }
0299 return -EAGAIN;
0300 }
0301
0302 static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac)
0303 {
0304 size_t reg;
0305
0306 if (tdmac->idx == 0) {
0307 reg = __raw_readl(tdmac->reg_base + TDSAR);
0308 reg -= tdmac->desc_arr[0].src_addr;
0309 } else if (tdmac->idx == 1) {
0310 reg = __raw_readl(tdmac->reg_base + TDDAR);
0311 reg -= tdmac->desc_arr[0].dst_addr;
0312 } else
0313 return -EINVAL;
0314
0315 return reg;
0316 }
0317
0318 static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
0319 {
0320 struct mmp_tdma_chan *tdmac = dev_id;
0321
0322 if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
0323 tasklet_schedule(&tdmac->tasklet);
0324 return IRQ_HANDLED;
0325 } else
0326 return IRQ_NONE;
0327 }
0328
0329 static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id)
0330 {
0331 struct mmp_tdma_device *tdev = dev_id;
0332 int i, ret;
0333 int irq_num = 0;
0334
0335 for (i = 0; i < TDMA_CHANNEL_NUM; i++) {
0336 struct mmp_tdma_chan *tdmac = tdev->tdmac[i];
0337
0338 ret = mmp_tdma_chan_handler(irq, tdmac);
0339 if (ret == IRQ_HANDLED)
0340 irq_num++;
0341 }
0342
0343 if (irq_num)
0344 return IRQ_HANDLED;
0345 else
0346 return IRQ_NONE;
0347 }
0348
0349 static void dma_do_tasklet(struct tasklet_struct *t)
0350 {
0351 struct mmp_tdma_chan *tdmac = from_tasklet(tdmac, t, tasklet);
0352
0353 dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
0354 }
0355
0356 static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
0357 {
0358 struct gen_pool *gpool;
0359 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
0360
0361 gpool = tdmac->pool;
0362 if (gpool && tdmac->desc_arr)
0363 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
0364 size);
0365 tdmac->desc_arr = NULL;
0366 if (tdmac->status == DMA_ERROR)
0367 tdmac->status = DMA_COMPLETE;
0368
0369 return;
0370 }
0371
0372 static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx)
0373 {
0374 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan);
0375
0376 mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys);
0377
0378 return 0;
0379 }
0380
0381 static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
0382 {
0383 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0384 int ret;
0385
0386 dma_async_tx_descriptor_init(&tdmac->desc, chan);
0387 tdmac->desc.tx_submit = mmp_tdma_tx_submit;
0388
0389 if (tdmac->irq) {
0390 ret = devm_request_irq(tdmac->dev, tdmac->irq,
0391 mmp_tdma_chan_handler, 0, "tdma", tdmac);
0392 if (ret)
0393 return ret;
0394 }
0395 return 1;
0396 }
0397
0398 static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
0399 {
0400 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0401
0402 if (tdmac->irq)
0403 devm_free_irq(tdmac->dev, tdmac->irq, tdmac);
0404 mmp_tdma_free_descriptor(tdmac);
0405 return;
0406 }
0407
0408 static struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
0409 {
0410 struct gen_pool *gpool;
0411 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
0412
0413 gpool = tdmac->pool;
0414 if (!gpool)
0415 return NULL;
0416
0417 tdmac->desc_arr = gen_pool_dma_alloc(gpool, size, &tdmac->desc_arr_phys);
0418
0419 return tdmac->desc_arr;
0420 }
0421
0422 static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
0423 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
0424 size_t period_len, enum dma_transfer_direction direction,
0425 unsigned long flags)
0426 {
0427 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0428 struct mmp_tdma_desc *desc;
0429 int num_periods = buf_len / period_len;
0430 int i = 0, buf = 0;
0431
0432 if (!is_slave_direction(direction)) {
0433 dev_err(tdmac->dev, "unsupported transfer direction\n");
0434 return NULL;
0435 }
0436
0437 if (tdmac->status != DMA_COMPLETE) {
0438 dev_err(tdmac->dev, "controller busy");
0439 return NULL;
0440 }
0441
0442 if (period_len > TDMA_MAX_XFER_BYTES) {
0443 dev_err(tdmac->dev,
0444 "maximum period size exceeded: %zu > %d\n",
0445 period_len, TDMA_MAX_XFER_BYTES);
0446 goto err_out;
0447 }
0448
0449 tdmac->status = DMA_IN_PROGRESS;
0450 tdmac->desc_num = num_periods;
0451 desc = mmp_tdma_alloc_descriptor(tdmac);
0452 if (!desc)
0453 goto err_out;
0454
0455 if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
0456 goto err_out;
0457
0458 while (buf < buf_len) {
0459 desc = &tdmac->desc_arr[i];
0460
0461 if (i + 1 == num_periods)
0462 desc->nxt_desc = tdmac->desc_arr_phys;
0463 else
0464 desc->nxt_desc = tdmac->desc_arr_phys +
0465 sizeof(*desc) * (i + 1);
0466
0467 if (direction == DMA_MEM_TO_DEV) {
0468 desc->src_addr = dma_addr;
0469 desc->dst_addr = tdmac->dev_addr;
0470 } else {
0471 desc->src_addr = tdmac->dev_addr;
0472 desc->dst_addr = dma_addr;
0473 }
0474 desc->byte_cnt = period_len;
0475 dma_addr += period_len;
0476 buf += period_len;
0477 i++;
0478 }
0479
0480
0481 if (flags & DMA_PREP_INTERRUPT)
0482 mmp_tdma_enable_irq(tdmac, true);
0483
0484 tdmac->buf_len = buf_len;
0485 tdmac->period_len = period_len;
0486 tdmac->pos = 0;
0487
0488 return &tdmac->desc;
0489
0490 err_out:
0491 tdmac->status = DMA_ERROR;
0492 return NULL;
0493 }
0494
0495 static int mmp_tdma_terminate_all(struct dma_chan *chan)
0496 {
0497 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0498
0499 mmp_tdma_disable_chan(chan);
0500
0501 mmp_tdma_enable_irq(tdmac, false);
0502
0503 return 0;
0504 }
0505
0506 static int mmp_tdma_config(struct dma_chan *chan,
0507 struct dma_slave_config *dmaengine_cfg)
0508 {
0509 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0510
0511 memcpy(&tdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
0512
0513 return 0;
0514 }
0515
0516 static int mmp_tdma_config_write(struct dma_chan *chan,
0517 enum dma_transfer_direction dir,
0518 struct dma_slave_config *dmaengine_cfg)
0519 {
0520 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0521
0522 if (dir == DMA_DEV_TO_MEM) {
0523 tdmac->dev_addr = dmaengine_cfg->src_addr;
0524 tdmac->burst_sz = dmaengine_cfg->src_maxburst;
0525 tdmac->buswidth = dmaengine_cfg->src_addr_width;
0526 } else {
0527 tdmac->dev_addr = dmaengine_cfg->dst_addr;
0528 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
0529 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
0530 }
0531 tdmac->dir = dir;
0532
0533 return mmp_tdma_config_chan(chan);
0534 }
0535
0536 static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
0537 dma_cookie_t cookie, struct dma_tx_state *txstate)
0538 {
0539 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0540
0541 tdmac->pos = mmp_tdma_get_pos(tdmac);
0542 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
0543 tdmac->buf_len - tdmac->pos);
0544
0545 return tdmac->status;
0546 }
0547
0548 static void mmp_tdma_issue_pending(struct dma_chan *chan)
0549 {
0550 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
0551
0552 mmp_tdma_enable_chan(tdmac);
0553 }
0554
0555 static int mmp_tdma_remove(struct platform_device *pdev)
0556 {
0557 if (pdev->dev.of_node)
0558 of_dma_controller_free(pdev->dev.of_node);
0559
0560 return 0;
0561 }
0562
0563 static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
0564 int idx, int irq,
0565 int type, struct gen_pool *pool)
0566 {
0567 struct mmp_tdma_chan *tdmac;
0568
0569 if (idx >= TDMA_CHANNEL_NUM) {
0570 dev_err(tdev->dev, "too many channels for device!\n");
0571 return -EINVAL;
0572 }
0573
0574
0575 tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
0576 if (!tdmac)
0577 return -ENOMEM;
0578
0579 if (irq)
0580 tdmac->irq = irq;
0581 tdmac->dev = tdev->dev;
0582 tdmac->chan.device = &tdev->device;
0583 tdmac->idx = idx;
0584 tdmac->type = type;
0585 tdmac->reg_base = tdev->base + idx * 4;
0586 tdmac->pool = pool;
0587 tdmac->status = DMA_COMPLETE;
0588 tdev->tdmac[tdmac->idx] = tdmac;
0589 tasklet_setup(&tdmac->tasklet, dma_do_tasklet);
0590
0591
0592 list_add_tail(&tdmac->chan.device_node,
0593 &tdev->device.channels);
0594 return 0;
0595 }
0596
0597 struct mmp_tdma_filter_param {
0598 unsigned int chan_id;
0599 };
0600
0601 static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
0602 {
0603 struct mmp_tdma_filter_param *param = fn_param;
0604
0605 if (chan->chan_id != param->chan_id)
0606 return false;
0607
0608 return true;
0609 }
0610
0611 static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
0612 struct of_dma *ofdma)
0613 {
0614 struct mmp_tdma_device *tdev = ofdma->of_dma_data;
0615 dma_cap_mask_t mask = tdev->device.cap_mask;
0616 struct mmp_tdma_filter_param param;
0617
0618 if (dma_spec->args_count != 1)
0619 return NULL;
0620
0621 param.chan_id = dma_spec->args[0];
0622
0623 if (param.chan_id >= TDMA_CHANNEL_NUM)
0624 return NULL;
0625
0626 return __dma_request_channel(&mask, mmp_tdma_filter_fn, ¶m,
0627 ofdma->of_node);
0628 }
0629
0630 static const struct of_device_id mmp_tdma_dt_ids[] = {
0631 { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
0632 { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
0633 {}
0634 };
0635 MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids);
0636
0637 static int mmp_tdma_probe(struct platform_device *pdev)
0638 {
0639 enum mmp_tdma_type type;
0640 const struct of_device_id *of_id;
0641 struct mmp_tdma_device *tdev;
0642 struct resource *iores;
0643 int i, ret;
0644 int irq = 0, irq_num = 0;
0645 int chan_num = TDMA_CHANNEL_NUM;
0646 struct gen_pool *pool = NULL;
0647
0648 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
0649 if (of_id)
0650 type = (enum mmp_tdma_type) of_id->data;
0651 else
0652 type = platform_get_device_id(pdev)->driver_data;
0653
0654
0655 tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
0656 if (!tdev)
0657 return -ENOMEM;
0658
0659 tdev->dev = &pdev->dev;
0660
0661 for (i = 0; i < chan_num; i++) {
0662 if (platform_get_irq(pdev, i) > 0)
0663 irq_num++;
0664 }
0665
0666 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0667 tdev->base = devm_ioremap_resource(&pdev->dev, iores);
0668 if (IS_ERR(tdev->base))
0669 return PTR_ERR(tdev->base);
0670
0671 INIT_LIST_HEAD(&tdev->device.channels);
0672
0673 if (pdev->dev.of_node)
0674 pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
0675 else
0676 pool = sram_get_gpool("asram");
0677 if (!pool) {
0678 dev_err(&pdev->dev, "asram pool not available\n");
0679 return -ENOMEM;
0680 }
0681
0682 if (irq_num != chan_num) {
0683 irq = platform_get_irq(pdev, 0);
0684 ret = devm_request_irq(&pdev->dev, irq,
0685 mmp_tdma_int_handler, IRQF_SHARED, "tdma", tdev);
0686 if (ret)
0687 return ret;
0688 }
0689
0690
0691 for (i = 0; i < chan_num; i++) {
0692 irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
0693 ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
0694 if (ret)
0695 return ret;
0696 }
0697
0698 dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
0699 dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
0700 tdev->device.dev = &pdev->dev;
0701 tdev->device.device_alloc_chan_resources =
0702 mmp_tdma_alloc_chan_resources;
0703 tdev->device.device_free_chan_resources =
0704 mmp_tdma_free_chan_resources;
0705 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
0706 tdev->device.device_tx_status = mmp_tdma_tx_status;
0707 tdev->device.device_issue_pending = mmp_tdma_issue_pending;
0708 tdev->device.device_config = mmp_tdma_config;
0709 tdev->device.device_pause = mmp_tdma_pause_chan;
0710 tdev->device.device_resume = mmp_tdma_resume_chan;
0711 tdev->device.device_terminate_all = mmp_tdma_terminate_all;
0712 tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
0713
0714 tdev->device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
0715 if (type == MMP_AUD_TDMA) {
0716 tdev->device.max_burst = SZ_128;
0717 tdev->device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
0718 tdev->device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
0719 } else if (type == PXA910_SQU) {
0720 tdev->device.max_burst = SZ_32;
0721 }
0722 tdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
0723 tdev->device.descriptor_reuse = true;
0724
0725 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
0726 platform_set_drvdata(pdev, tdev);
0727
0728 ret = dmaenginem_async_device_register(&tdev->device);
0729 if (ret) {
0730 dev_err(tdev->device.dev, "unable to register\n");
0731 return ret;
0732 }
0733
0734 if (pdev->dev.of_node) {
0735 ret = of_dma_controller_register(pdev->dev.of_node,
0736 mmp_tdma_xlate, tdev);
0737 if (ret) {
0738 dev_err(tdev->device.dev,
0739 "failed to register controller\n");
0740 return ret;
0741 }
0742 }
0743
0744 dev_info(tdev->device.dev, "initialized\n");
0745 return 0;
0746 }
0747
0748 static const struct platform_device_id mmp_tdma_id_table[] = {
0749 { "mmp-adma", MMP_AUD_TDMA },
0750 { "pxa910-squ", PXA910_SQU },
0751 { },
0752 };
0753
0754 static struct platform_driver mmp_tdma_driver = {
0755 .driver = {
0756 .name = "mmp-tdma",
0757 .of_match_table = mmp_tdma_dt_ids,
0758 },
0759 .id_table = mmp_tdma_id_table,
0760 .probe = mmp_tdma_probe,
0761 .remove = mmp_tdma_remove,
0762 };
0763
0764 module_platform_driver(mmp_tdma_driver);
0765
0766 MODULE_LICENSE("GPL");
0767 MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
0768 MODULE_ALIAS("platform:mmp-tdma");
0769 MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
0770 MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");