0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/dmaengine.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/err.h>
0013 #include <linux/init.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/list.h>
0016 #include <linux/module.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/slab.h>
0019 #include <linux/spinlock.h>
0020 #include <linux/of_address.h>
0021 #include <linux/of_irq.h>
0022 #include <linux/of_dma.h>
0023 #include <linux/bitops.h>
0024
0025 #include <asm/cacheflush.h>
0026
0027 #include "dmaengine.h"
0028 #include "virt-dma.h"
0029
0030 #define APB_DMA_MAX_CHANNEL 4
0031
0032 #define REG_OFF_ADDRESS_SOURCE 0
0033 #define REG_OFF_ADDRESS_DEST 4
0034 #define REG_OFF_CYCLES 8
0035 #define REG_OFF_CTRL 12
0036 #define REG_OFF_CHAN_SIZE 16
0037
0038 #define APB_DMA_ENABLE BIT(0)
0039 #define APB_DMA_FIN_INT_STS BIT(1)
0040 #define APB_DMA_FIN_INT_EN BIT(2)
0041 #define APB_DMA_BURST_MODE BIT(3)
0042 #define APB_DMA_ERR_INT_STS BIT(4)
0043 #define APB_DMA_ERR_INT_EN BIT(5)
0044
0045
0046
0047
0048
0049 #define APB_DMA_SOURCE_SELECT 0x40
0050 #define APB_DMA_DEST_SELECT 0x80
0051
0052 #define APB_DMA_SOURCE 0x100
0053 #define APB_DMA_DEST 0x1000
0054
0055 #define APB_DMA_SOURCE_MASK 0x700
0056 #define APB_DMA_DEST_MASK 0x7000
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067 #define APB_DMA_SOURCE_INC_0 0
0068 #define APB_DMA_SOURCE_INC_1_4 0x100
0069 #define APB_DMA_SOURCE_INC_2_8 0x200
0070 #define APB_DMA_SOURCE_INC_4_16 0x300
0071 #define APB_DMA_SOURCE_DEC_1_4 0x500
0072 #define APB_DMA_SOURCE_DEC_2_8 0x600
0073 #define APB_DMA_SOURCE_DEC_4_16 0x700
0074 #define APB_DMA_DEST_INC_0 0
0075 #define APB_DMA_DEST_INC_1_4 0x1000
0076 #define APB_DMA_DEST_INC_2_8 0x2000
0077 #define APB_DMA_DEST_INC_4_16 0x3000
0078 #define APB_DMA_DEST_DEC_1_4 0x5000
0079 #define APB_DMA_DEST_DEC_2_8 0x6000
0080 #define APB_DMA_DEST_DEC_4_16 0x7000
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091 #define APB_DMA_SOURCE_REQ_NO 0x1000000
0092 #define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
0093 #define APB_DMA_DEST_REQ_NO 0x10000
0094 #define APB_DMA_DEST_REQ_NO_MASK 0xf0000
0095
0096 #define APB_DMA_DATA_WIDTH 0x100000
0097 #define APB_DMA_DATA_WIDTH_MASK 0x300000
0098
0099
0100
0101
0102
0103
0104
0105 #define APB_DMA_DATA_WIDTH_4 0
0106 #define APB_DMA_DATA_WIDTH_2 0x100000
0107 #define APB_DMA_DATA_WIDTH_1 0x200000
0108
0109 #define APB_DMA_CYCLES_MASK 0x00ffffff
0110
0111 #define MOXART_DMA_DATA_TYPE_S8 0x00
0112 #define MOXART_DMA_DATA_TYPE_S16 0x01
0113 #define MOXART_DMA_DATA_TYPE_S32 0x02
0114
0115 struct moxart_sg {
0116 dma_addr_t addr;
0117 uint32_t len;
0118 };
0119
0120 struct moxart_desc {
0121 enum dma_transfer_direction dma_dir;
0122 dma_addr_t dev_addr;
0123 unsigned int sglen;
0124 unsigned int dma_cycles;
0125 struct virt_dma_desc vd;
0126 uint8_t es;
0127 struct moxart_sg sg[];
0128 };
0129
0130 struct moxart_chan {
0131 struct virt_dma_chan vc;
0132
0133 void __iomem *base;
0134 struct moxart_desc *desc;
0135
0136 struct dma_slave_config cfg;
0137
0138 bool allocated;
0139 bool error;
0140 int ch_num;
0141 unsigned int line_reqno;
0142 unsigned int sgidx;
0143 };
0144
0145 struct moxart_dmadev {
0146 struct dma_device dma_slave;
0147 struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL];
0148 unsigned int irq;
0149 };
0150
0151 struct moxart_filter_data {
0152 struct moxart_dmadev *mdc;
0153 struct of_phandle_args *dma_spec;
0154 };
0155
0156 static const unsigned int es_bytes[] = {
0157 [MOXART_DMA_DATA_TYPE_S8] = 1,
0158 [MOXART_DMA_DATA_TYPE_S16] = 2,
0159 [MOXART_DMA_DATA_TYPE_S32] = 4,
0160 };
0161
0162 static struct device *chan2dev(struct dma_chan *chan)
0163 {
0164 return &chan->dev->device;
0165 }
0166
0167 static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
0168 {
0169 return container_of(c, struct moxart_chan, vc.chan);
0170 }
0171
0172 static inline struct moxart_desc *to_moxart_dma_desc(
0173 struct dma_async_tx_descriptor *t)
0174 {
0175 return container_of(t, struct moxart_desc, vd.tx);
0176 }
0177
0178 static void moxart_dma_desc_free(struct virt_dma_desc *vd)
0179 {
0180 kfree(container_of(vd, struct moxart_desc, vd));
0181 }
0182
0183 static int moxart_terminate_all(struct dma_chan *chan)
0184 {
0185 struct moxart_chan *ch = to_moxart_dma_chan(chan);
0186 unsigned long flags;
0187 LIST_HEAD(head);
0188 u32 ctrl;
0189
0190 dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
0191
0192 spin_lock_irqsave(&ch->vc.lock, flags);
0193
0194 if (ch->desc) {
0195 moxart_dma_desc_free(&ch->desc->vd);
0196 ch->desc = NULL;
0197 }
0198
0199 ctrl = readl(ch->base + REG_OFF_CTRL);
0200 ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
0201 writel(ctrl, ch->base + REG_OFF_CTRL);
0202
0203 vchan_get_all_descriptors(&ch->vc, &head);
0204 spin_unlock_irqrestore(&ch->vc.lock, flags);
0205 vchan_dma_desc_free_list(&ch->vc, &head);
0206
0207 return 0;
0208 }
0209
0210 static int moxart_slave_config(struct dma_chan *chan,
0211 struct dma_slave_config *cfg)
0212 {
0213 struct moxart_chan *ch = to_moxart_dma_chan(chan);
0214 u32 ctrl;
0215
0216 ch->cfg = *cfg;
0217
0218 ctrl = readl(ch->base + REG_OFF_CTRL);
0219 ctrl |= APB_DMA_BURST_MODE;
0220 ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
0221 ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
0222
0223 switch (ch->cfg.src_addr_width) {
0224 case DMA_SLAVE_BUSWIDTH_1_BYTE:
0225 ctrl |= APB_DMA_DATA_WIDTH_1;
0226 if (ch->cfg.direction != DMA_MEM_TO_DEV)
0227 ctrl |= APB_DMA_DEST_INC_1_4;
0228 else
0229 ctrl |= APB_DMA_SOURCE_INC_1_4;
0230 break;
0231 case DMA_SLAVE_BUSWIDTH_2_BYTES:
0232 ctrl |= APB_DMA_DATA_WIDTH_2;
0233 if (ch->cfg.direction != DMA_MEM_TO_DEV)
0234 ctrl |= APB_DMA_DEST_INC_2_8;
0235 else
0236 ctrl |= APB_DMA_SOURCE_INC_2_8;
0237 break;
0238 case DMA_SLAVE_BUSWIDTH_4_BYTES:
0239 ctrl &= ~APB_DMA_DATA_WIDTH;
0240 if (ch->cfg.direction != DMA_MEM_TO_DEV)
0241 ctrl |= APB_DMA_DEST_INC_4_16;
0242 else
0243 ctrl |= APB_DMA_SOURCE_INC_4_16;
0244 break;
0245 default:
0246 return -EINVAL;
0247 }
0248
0249 if (ch->cfg.direction == DMA_MEM_TO_DEV) {
0250 ctrl &= ~APB_DMA_DEST_SELECT;
0251 ctrl |= APB_DMA_SOURCE_SELECT;
0252 ctrl |= (ch->line_reqno << 16 &
0253 APB_DMA_DEST_REQ_NO_MASK);
0254 } else {
0255 ctrl |= APB_DMA_DEST_SELECT;
0256 ctrl &= ~APB_DMA_SOURCE_SELECT;
0257 ctrl |= (ch->line_reqno << 24 &
0258 APB_DMA_SOURCE_REQ_NO_MASK);
0259 }
0260
0261 writel(ctrl, ch->base + REG_OFF_CTRL);
0262
0263 return 0;
0264 }
0265
0266 static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
0267 struct dma_chan *chan, struct scatterlist *sgl,
0268 unsigned int sg_len, enum dma_transfer_direction dir,
0269 unsigned long tx_flags, void *context)
0270 {
0271 struct moxart_chan *ch = to_moxart_dma_chan(chan);
0272 struct moxart_desc *d;
0273 enum dma_slave_buswidth dev_width;
0274 dma_addr_t dev_addr;
0275 struct scatterlist *sgent;
0276 unsigned int es;
0277 unsigned int i;
0278
0279 if (!is_slave_direction(dir)) {
0280 dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
0281 __func__);
0282 return NULL;
0283 }
0284
0285 if (dir == DMA_DEV_TO_MEM) {
0286 dev_addr = ch->cfg.src_addr;
0287 dev_width = ch->cfg.src_addr_width;
0288 } else {
0289 dev_addr = ch->cfg.dst_addr;
0290 dev_width = ch->cfg.dst_addr_width;
0291 }
0292
0293 switch (dev_width) {
0294 case DMA_SLAVE_BUSWIDTH_1_BYTE:
0295 es = MOXART_DMA_DATA_TYPE_S8;
0296 break;
0297 case DMA_SLAVE_BUSWIDTH_2_BYTES:
0298 es = MOXART_DMA_DATA_TYPE_S16;
0299 break;
0300 case DMA_SLAVE_BUSWIDTH_4_BYTES:
0301 es = MOXART_DMA_DATA_TYPE_S32;
0302 break;
0303 default:
0304 dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
0305 __func__, dev_width);
0306 return NULL;
0307 }
0308
0309 d = kzalloc(struct_size(d, sg, sg_len), GFP_ATOMIC);
0310 if (!d)
0311 return NULL;
0312
0313 d->dma_dir = dir;
0314 d->dev_addr = dev_addr;
0315 d->es = es;
0316
0317 for_each_sg(sgl, sgent, sg_len, i) {
0318 d->sg[i].addr = sg_dma_address(sgent);
0319 d->sg[i].len = sg_dma_len(sgent);
0320 }
0321
0322 d->sglen = sg_len;
0323
0324 ch->error = 0;
0325
0326 return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
0327 }
0328
0329 static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
0330 struct of_dma *ofdma)
0331 {
0332 struct moxart_dmadev *mdc = ofdma->of_dma_data;
0333 struct dma_chan *chan;
0334 struct moxart_chan *ch;
0335
0336 chan = dma_get_any_slave_channel(&mdc->dma_slave);
0337 if (!chan)
0338 return NULL;
0339
0340 ch = to_moxart_dma_chan(chan);
0341 ch->line_reqno = dma_spec->args[0];
0342
0343 return chan;
0344 }
0345
0346 static int moxart_alloc_chan_resources(struct dma_chan *chan)
0347 {
0348 struct moxart_chan *ch = to_moxart_dma_chan(chan);
0349
0350 dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
0351 __func__, ch->ch_num);
0352 ch->allocated = 1;
0353
0354 return 0;
0355 }
0356
0357 static void moxart_free_chan_resources(struct dma_chan *chan)
0358 {
0359 struct moxart_chan *ch = to_moxart_dma_chan(chan);
0360
0361 vchan_free_chan_resources(&ch->vc);
0362
0363 dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
0364 __func__, ch->ch_num);
0365 ch->allocated = 0;
0366 }
0367
0368 static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
0369 dma_addr_t dst_addr)
0370 {
0371 writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
0372 writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
0373 }
0374
0375 static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
0376 {
0377 struct moxart_desc *d = ch->desc;
0378 unsigned int sglen_div = es_bytes[d->es];
0379
0380 d->dma_cycles = len >> sglen_div;
0381
0382
0383
0384
0385
0386 writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
0387
0388 dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
0389 __func__, d->dma_cycles, len);
0390 }
0391
0392 static void moxart_start_dma(struct moxart_chan *ch)
0393 {
0394 u32 ctrl;
0395
0396 ctrl = readl(ch->base + REG_OFF_CTRL);
0397 ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
0398 writel(ctrl, ch->base + REG_OFF_CTRL);
0399 }
0400
0401 static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
0402 {
0403 struct moxart_desc *d = ch->desc;
0404 struct moxart_sg *sg = ch->desc->sg + idx;
0405
0406 if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
0407 moxart_dma_set_params(ch, sg->addr, d->dev_addr);
0408 else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
0409 moxart_dma_set_params(ch, d->dev_addr, sg->addr);
0410
0411 moxart_set_transfer_params(ch, sg->len);
0412
0413 moxart_start_dma(ch);
0414 }
0415
0416 static void moxart_dma_start_desc(struct dma_chan *chan)
0417 {
0418 struct moxart_chan *ch = to_moxart_dma_chan(chan);
0419 struct virt_dma_desc *vd;
0420
0421 vd = vchan_next_desc(&ch->vc);
0422
0423 if (!vd) {
0424 ch->desc = NULL;
0425 return;
0426 }
0427
0428 list_del(&vd->node);
0429
0430 ch->desc = to_moxart_dma_desc(&vd->tx);
0431 ch->sgidx = 0;
0432
0433 moxart_dma_start_sg(ch, 0);
0434 }
0435
0436 static void moxart_issue_pending(struct dma_chan *chan)
0437 {
0438 struct moxart_chan *ch = to_moxart_dma_chan(chan);
0439 unsigned long flags;
0440
0441 spin_lock_irqsave(&ch->vc.lock, flags);
0442 if (vchan_issue_pending(&ch->vc) && !ch->desc)
0443 moxart_dma_start_desc(chan);
0444 spin_unlock_irqrestore(&ch->vc.lock, flags);
0445 }
0446
0447 static size_t moxart_dma_desc_size(struct moxart_desc *d,
0448 unsigned int completed_sgs)
0449 {
0450 unsigned int i;
0451 size_t size;
0452
0453 for (size = i = completed_sgs; i < d->sglen; i++)
0454 size += d->sg[i].len;
0455
0456 return size;
0457 }
0458
0459 static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
0460 {
0461 size_t size;
0462 unsigned int completed_cycles, cycles;
0463
0464 size = moxart_dma_desc_size(ch->desc, ch->sgidx);
0465 cycles = readl(ch->base + REG_OFF_CYCLES);
0466 completed_cycles = (ch->desc->dma_cycles - cycles);
0467 size -= completed_cycles << es_bytes[ch->desc->es];
0468
0469 dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
0470
0471 return size;
0472 }
0473
0474 static enum dma_status moxart_tx_status(struct dma_chan *chan,
0475 dma_cookie_t cookie,
0476 struct dma_tx_state *txstate)
0477 {
0478 struct moxart_chan *ch = to_moxart_dma_chan(chan);
0479 struct virt_dma_desc *vd;
0480 struct moxart_desc *d;
0481 enum dma_status ret;
0482 unsigned long flags;
0483
0484
0485
0486
0487 ret = dma_cookie_status(chan, cookie, txstate);
0488
0489 spin_lock_irqsave(&ch->vc.lock, flags);
0490 vd = vchan_find_desc(&ch->vc, cookie);
0491 if (vd) {
0492 d = to_moxart_dma_desc(&vd->tx);
0493 txstate->residue = moxart_dma_desc_size(d, 0);
0494 } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
0495 txstate->residue = moxart_dma_desc_size_in_flight(ch);
0496 }
0497 spin_unlock_irqrestore(&ch->vc.lock, flags);
0498
0499 if (ch->error)
0500 return DMA_ERROR;
0501
0502 return ret;
0503 }
0504
0505 static void moxart_dma_init(struct dma_device *dma, struct device *dev)
0506 {
0507 dma->device_prep_slave_sg = moxart_prep_slave_sg;
0508 dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
0509 dma->device_free_chan_resources = moxart_free_chan_resources;
0510 dma->device_issue_pending = moxart_issue_pending;
0511 dma->device_tx_status = moxart_tx_status;
0512 dma->device_config = moxart_slave_config;
0513 dma->device_terminate_all = moxart_terminate_all;
0514 dma->dev = dev;
0515
0516 INIT_LIST_HEAD(&dma->channels);
0517 }
0518
0519 static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
0520 {
0521 struct moxart_dmadev *mc = devid;
0522 struct moxart_chan *ch = &mc->slave_chans[0];
0523 unsigned int i;
0524 u32 ctrl;
0525
0526 dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
0527
0528 for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
0529 if (!ch->allocated)
0530 continue;
0531
0532 ctrl = readl(ch->base + REG_OFF_CTRL);
0533
0534 dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
0535 __func__, ch, ch->base, ctrl);
0536
0537 if (ctrl & APB_DMA_FIN_INT_STS) {
0538 ctrl &= ~APB_DMA_FIN_INT_STS;
0539 if (ch->desc) {
0540 spin_lock(&ch->vc.lock);
0541 if (++ch->sgidx < ch->desc->sglen) {
0542 moxart_dma_start_sg(ch, ch->sgidx);
0543 } else {
0544 vchan_cookie_complete(&ch->desc->vd);
0545 moxart_dma_start_desc(&ch->vc.chan);
0546 }
0547 spin_unlock(&ch->vc.lock);
0548 }
0549 }
0550
0551 if (ctrl & APB_DMA_ERR_INT_STS) {
0552 ctrl &= ~APB_DMA_ERR_INT_STS;
0553 ch->error = 1;
0554 }
0555
0556 writel(ctrl, ch->base + REG_OFF_CTRL);
0557 }
0558
0559 return IRQ_HANDLED;
0560 }
0561
0562 static int moxart_probe(struct platform_device *pdev)
0563 {
0564 struct device *dev = &pdev->dev;
0565 struct device_node *node = dev->of_node;
0566 struct resource *res;
0567 void __iomem *dma_base_addr;
0568 int ret, i;
0569 unsigned int irq;
0570 struct moxart_chan *ch;
0571 struct moxart_dmadev *mdc;
0572
0573 mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
0574 if (!mdc)
0575 return -ENOMEM;
0576
0577 irq = irq_of_parse_and_map(node, 0);
0578 if (!irq) {
0579 dev_err(dev, "no IRQ resource\n");
0580 return -EINVAL;
0581 }
0582
0583 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0584 dma_base_addr = devm_ioremap_resource(dev, res);
0585 if (IS_ERR(dma_base_addr))
0586 return PTR_ERR(dma_base_addr);
0587
0588 dma_cap_zero(mdc->dma_slave.cap_mask);
0589 dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
0590 dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
0591
0592 moxart_dma_init(&mdc->dma_slave, dev);
0593
0594 ch = &mdc->slave_chans[0];
0595 for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
0596 ch->ch_num = i;
0597 ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
0598 ch->allocated = 0;
0599
0600 ch->vc.desc_free = moxart_dma_desc_free;
0601 vchan_init(&ch->vc, &mdc->dma_slave);
0602
0603 dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
0604 __func__, i, ch->ch_num, ch->base);
0605 }
0606
0607 platform_set_drvdata(pdev, mdc);
0608
0609 ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
0610 "moxart-dma-engine", mdc);
0611 if (ret) {
0612 dev_err(dev, "devm_request_irq failed\n");
0613 return ret;
0614 }
0615 mdc->irq = irq;
0616
0617 ret = dma_async_device_register(&mdc->dma_slave);
0618 if (ret) {
0619 dev_err(dev, "dma_async_device_register failed\n");
0620 return ret;
0621 }
0622
0623 ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
0624 if (ret) {
0625 dev_err(dev, "of_dma_controller_register failed\n");
0626 dma_async_device_unregister(&mdc->dma_slave);
0627 return ret;
0628 }
0629
0630 dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
0631
0632 return 0;
0633 }
0634
0635 static int moxart_remove(struct platform_device *pdev)
0636 {
0637 struct moxart_dmadev *m = platform_get_drvdata(pdev);
0638
0639 devm_free_irq(&pdev->dev, m->irq, m);
0640
0641 dma_async_device_unregister(&m->dma_slave);
0642
0643 if (pdev->dev.of_node)
0644 of_dma_controller_free(pdev->dev.of_node);
0645
0646 return 0;
0647 }
0648
0649 static const struct of_device_id moxart_dma_match[] = {
0650 { .compatible = "moxa,moxart-dma" },
0651 { }
0652 };
0653 MODULE_DEVICE_TABLE(of, moxart_dma_match);
0654
0655 static struct platform_driver moxart_driver = {
0656 .probe = moxart_probe,
0657 .remove = moxart_remove,
0658 .driver = {
0659 .name = "moxart-dma-engine",
0660 .of_match_table = moxart_dma_match,
0661 },
0662 };
0663
0664 static int moxart_init(void)
0665 {
0666 return platform_driver_register(&moxart_driver);
0667 }
0668 subsys_initcall(moxart_init);
0669
0670 static void __exit moxart_exit(void)
0671 {
0672 platform_driver_unregister(&moxart_driver);
0673 }
0674 module_exit(moxart_exit);
0675
0676 MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
0677 MODULE_DESCRIPTION("MOXART DMA engine driver");
0678 MODULE_LICENSE("GPL v2");