0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <linux/dmaengine.h>
0021 #include <linux/dma-mapping.h>
0022 #include <linux/dmapool.h>
0023 #include <linux/err.h>
0024 #include <linux/init.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/list.h>
0027 #include <linux/module.h>
0028 #include <linux/platform_device.h>
0029 #include <linux/slab.h>
0030 #include <linux/io.h>
0031 #include <linux/spinlock.h>
0032 #include <linux/of.h>
0033 #include <linux/of_dma.h>
0034
0035 #include "virt-dma.h"
0036
0037 #define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
0038 #define BCM2835_DMA_CHAN_NAME_SIZE 8
0039
0040
0041
0042
0043
0044
0045
0046
0047 struct bcm2835_dmadev {
0048 struct dma_device ddev;
0049 void __iomem *base;
0050 dma_addr_t zero_page;
0051 };
0052
0053 struct bcm2835_dma_cb {
0054 uint32_t info;
0055 uint32_t src;
0056 uint32_t dst;
0057 uint32_t length;
0058 uint32_t stride;
0059 uint32_t next;
0060 uint32_t pad[2];
0061 };
0062
0063 struct bcm2835_cb_entry {
0064 struct bcm2835_dma_cb *cb;
0065 dma_addr_t paddr;
0066 };
0067
0068 struct bcm2835_chan {
0069 struct virt_dma_chan vc;
0070
0071 struct dma_slave_config cfg;
0072 unsigned int dreq;
0073
0074 int ch;
0075 struct bcm2835_desc *desc;
0076 struct dma_pool *cb_pool;
0077
0078 void __iomem *chan_base;
0079 int irq_number;
0080 unsigned int irq_flags;
0081
0082 bool is_lite_channel;
0083 };
0084
0085 struct bcm2835_desc {
0086 struct bcm2835_chan *c;
0087 struct virt_dma_desc vd;
0088 enum dma_transfer_direction dir;
0089
0090 unsigned int frames;
0091 size_t size;
0092
0093 bool cyclic;
0094
0095 struct bcm2835_cb_entry cb_list[];
0096 };
0097
0098 #define BCM2835_DMA_CS 0x00
0099 #define BCM2835_DMA_ADDR 0x04
0100 #define BCM2835_DMA_TI 0x08
0101 #define BCM2835_DMA_SOURCE_AD 0x0c
0102 #define BCM2835_DMA_DEST_AD 0x10
0103 #define BCM2835_DMA_LEN 0x14
0104 #define BCM2835_DMA_STRIDE 0x18
0105 #define BCM2835_DMA_NEXTCB 0x1c
0106 #define BCM2835_DMA_DEBUG 0x20
0107
0108
0109 #define BCM2835_DMA_ACTIVE BIT(0)
0110 #define BCM2835_DMA_END BIT(1)
0111 #define BCM2835_DMA_INT BIT(2)
0112 #define BCM2835_DMA_DREQ BIT(3)
0113 #define BCM2835_DMA_ISPAUSED BIT(4)
0114 #define BCM2835_DMA_ISHELD BIT(5)
0115 #define BCM2835_DMA_WAITING_FOR_WRITES BIT(6)
0116
0117
0118 #define BCM2835_DMA_ERR BIT(8)
0119 #define BCM2835_DMA_PRIORITY(x) ((x & 15) << 16)
0120 #define BCM2835_DMA_PANIC_PRIORITY(x) ((x & 15) << 20)
0121
0122 #define BCM2835_DMA_WAIT_FOR_WRITES BIT(28)
0123 #define BCM2835_DMA_DIS_DEBUG BIT(29)
0124 #define BCM2835_DMA_ABORT BIT(30)
0125 #define BCM2835_DMA_RESET BIT(31)
0126
0127
0128 #define BCM2835_DMA_INT_EN BIT(0)
0129 #define BCM2835_DMA_TDMODE BIT(1)
0130 #define BCM2835_DMA_WAIT_RESP BIT(3)
0131 #define BCM2835_DMA_D_INC BIT(4)
0132 #define BCM2835_DMA_D_WIDTH BIT(5)
0133 #define BCM2835_DMA_D_DREQ BIT(6)
0134 #define BCM2835_DMA_D_IGNORE BIT(7)
0135 #define BCM2835_DMA_S_INC BIT(8)
0136 #define BCM2835_DMA_S_WIDTH BIT(9)
0137 #define BCM2835_DMA_S_DREQ BIT(10)
0138 #define BCM2835_DMA_S_IGNORE BIT(11)
0139 #define BCM2835_DMA_BURST_LENGTH(x) ((x & 15) << 12)
0140 #define BCM2835_DMA_PER_MAP(x) ((x & 31) << 16)
0141 #define BCM2835_DMA_WAIT(x) ((x & 31) << 21)
0142 #define BCM2835_DMA_NO_WIDE_BURSTS BIT(26)
0143
0144
0145 #define BCM2835_DMA_DEBUG_LAST_NOT_SET_ERR BIT(0)
0146 #define BCM2835_DMA_DEBUG_FIFO_ERR BIT(1)
0147 #define BCM2835_DMA_DEBUG_READ_ERR BIT(2)
0148 #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_SHIFT 4
0149 #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_BITS 4
0150 #define BCM2835_DMA_DEBUG_ID_SHIFT 16
0151 #define BCM2835_DMA_DEBUG_ID_BITS 9
0152 #define BCM2835_DMA_DEBUG_STATE_SHIFT 16
0153 #define BCM2835_DMA_DEBUG_STATE_BITS 9
0154 #define BCM2835_DMA_DEBUG_VERSION_SHIFT 25
0155 #define BCM2835_DMA_DEBUG_VERSION_BITS 3
0156 #define BCM2835_DMA_DEBUG_LITE BIT(28)
0157
0158
0159 #define BCM2835_DMA_INT_STATUS 0xfe0
0160 #define BCM2835_DMA_ENABLE 0xff0
0161
0162 #define BCM2835_DMA_DATA_TYPE_S8 1
0163 #define BCM2835_DMA_DATA_TYPE_S16 2
0164 #define BCM2835_DMA_DATA_TYPE_S32 4
0165 #define BCM2835_DMA_DATA_TYPE_S128 16
0166
0167
0168 #define BCM2835_DMA_CHAN(n) ((n) << 8)
0169 #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
0170
0171
0172 #define MAX_DMA_LEN SZ_1G
0173 #define MAX_LITE_DMA_LEN (SZ_64K - 4)
0174
0175 static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c)
0176 {
0177
0178 return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN;
0179 }
0180
0181
0182 static inline size_t bcm2835_dma_frames_for_length(size_t len,
0183 size_t max_len)
0184 {
0185 return DIV_ROUND_UP(len, max_len);
0186 }
0187
0188 static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
0189 {
0190 return container_of(d, struct bcm2835_dmadev, ddev);
0191 }
0192
0193 static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
0194 {
0195 return container_of(c, struct bcm2835_chan, vc.chan);
0196 }
0197
0198 static inline struct bcm2835_desc *to_bcm2835_dma_desc(
0199 struct dma_async_tx_descriptor *t)
0200 {
0201 return container_of(t, struct bcm2835_desc, vd.tx);
0202 }
0203
0204 static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc)
0205 {
0206 size_t i;
0207
0208 for (i = 0; i < desc->frames; i++)
0209 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
0210 desc->cb_list[i].paddr);
0211
0212 kfree(desc);
0213 }
0214
0215 static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
0216 {
0217 bcm2835_dma_free_cb_chain(
0218 container_of(vd, struct bcm2835_desc, vd));
0219 }
0220
0221 static void bcm2835_dma_create_cb_set_length(
0222 struct bcm2835_chan *chan,
0223 struct bcm2835_dma_cb *control_block,
0224 size_t len,
0225 size_t period_len,
0226 size_t *total_len,
0227 u32 finalextrainfo)
0228 {
0229 size_t max_len = bcm2835_dma_max_frame_length(chan);
0230
0231
0232 control_block->length = min_t(u32, len, max_len);
0233
0234
0235 if (!period_len)
0236 return;
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 if (*total_len + control_block->length < period_len) {
0248
0249 *total_len += control_block->length;
0250 return;
0251 }
0252
0253
0254 control_block->length = period_len - *total_len;
0255
0256
0257 *total_len = 0;
0258
0259
0260 control_block->info |= finalextrainfo;
0261 }
0262
0263 static inline size_t bcm2835_dma_count_frames_for_sg(
0264 struct bcm2835_chan *c,
0265 struct scatterlist *sgl,
0266 unsigned int sg_len)
0267 {
0268 size_t frames = 0;
0269 struct scatterlist *sgent;
0270 unsigned int i;
0271 size_t plength = bcm2835_dma_max_frame_length(c);
0272
0273 for_each_sg(sgl, sgent, sg_len, i)
0274 frames += bcm2835_dma_frames_for_length(
0275 sg_dma_len(sgent), plength);
0276
0277 return frames;
0278 }
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
0301 struct dma_chan *chan, enum dma_transfer_direction direction,
0302 bool cyclic, u32 info, u32 finalextrainfo, size_t frames,
0303 dma_addr_t src, dma_addr_t dst, size_t buf_len,
0304 size_t period_len, gfp_t gfp)
0305 {
0306 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0307 size_t len = buf_len, total_len;
0308 size_t frame;
0309 struct bcm2835_desc *d;
0310 struct bcm2835_cb_entry *cb_entry;
0311 struct bcm2835_dma_cb *control_block;
0312
0313 if (!frames)
0314 return NULL;
0315
0316
0317 d = kzalloc(struct_size(d, cb_list, frames), gfp);
0318 if (!d)
0319 return NULL;
0320
0321 d->c = c;
0322 d->dir = direction;
0323 d->cyclic = cyclic;
0324
0325
0326
0327
0328
0329 for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) {
0330 cb_entry = &d->cb_list[frame];
0331 cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp,
0332 &cb_entry->paddr);
0333 if (!cb_entry->cb)
0334 goto error_cb;
0335
0336
0337 control_block = cb_entry->cb;
0338 control_block->info = info;
0339 control_block->src = src;
0340 control_block->dst = dst;
0341 control_block->stride = 0;
0342 control_block->next = 0;
0343
0344 if (buf_len) {
0345
0346 bcm2835_dma_create_cb_set_length(
0347 c, control_block,
0348 len, period_len, &total_len,
0349 cyclic ? finalextrainfo : 0);
0350
0351
0352 len -= control_block->length;
0353 }
0354
0355
0356 if (frame)
0357 d->cb_list[frame - 1].cb->next = cb_entry->paddr;
0358
0359
0360 if (src && (info & BCM2835_DMA_S_INC))
0361 src += control_block->length;
0362 if (dst && (info & BCM2835_DMA_D_INC))
0363 dst += control_block->length;
0364
0365
0366 d->size += control_block->length;
0367 }
0368
0369
0370 d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
0371
0372
0373 if (buf_len && (d->size != buf_len))
0374 goto error_cb;
0375
0376 return d;
0377 error_cb:
0378 bcm2835_dma_free_cb_chain(d);
0379
0380 return NULL;
0381 }
0382
0383 static void bcm2835_dma_fill_cb_chain_with_sg(
0384 struct dma_chan *chan,
0385 enum dma_transfer_direction direction,
0386 struct bcm2835_cb_entry *cb,
0387 struct scatterlist *sgl,
0388 unsigned int sg_len)
0389 {
0390 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0391 size_t len, max_len;
0392 unsigned int i;
0393 dma_addr_t addr;
0394 struct scatterlist *sgent;
0395
0396 max_len = bcm2835_dma_max_frame_length(c);
0397 for_each_sg(sgl, sgent, sg_len, i) {
0398 for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent);
0399 len > 0;
0400 addr += cb->cb->length, len -= cb->cb->length, cb++) {
0401 if (direction == DMA_DEV_TO_MEM)
0402 cb->cb->dst = addr;
0403 else
0404 cb->cb->src = addr;
0405 cb->cb->length = min(len, max_len);
0406 }
0407 }
0408 }
0409
0410 static void bcm2835_dma_abort(struct bcm2835_chan *c)
0411 {
0412 void __iomem *chan_base = c->chan_base;
0413 long int timeout = 10000;
0414
0415
0416
0417
0418
0419 if (!readl(chan_base + BCM2835_DMA_ADDR))
0420 return;
0421
0422
0423 writel(0, chan_base + BCM2835_DMA_CS);
0424
0425
0426 while ((readl(chan_base + BCM2835_DMA_CS) &
0427 BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
0428 cpu_relax();
0429
0430
0431 if (!timeout)
0432 dev_err(c->vc.chan.device->dev,
0433 "failed to complete outstanding writes\n");
0434
0435 writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
0436 }
0437
0438 static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
0439 {
0440 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
0441 struct bcm2835_desc *d;
0442
0443 if (!vd) {
0444 c->desc = NULL;
0445 return;
0446 }
0447
0448 list_del(&vd->node);
0449
0450 c->desc = d = to_bcm2835_dma_desc(&vd->tx);
0451
0452 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
0453 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
0454 }
0455
0456 static irqreturn_t bcm2835_dma_callback(int irq, void *data)
0457 {
0458 struct bcm2835_chan *c = data;
0459 struct bcm2835_desc *d;
0460 unsigned long flags;
0461
0462
0463 if (c->irq_flags & IRQF_SHARED) {
0464
0465 flags = readl(c->chan_base + BCM2835_DMA_CS);
0466
0467 if (!(flags & BCM2835_DMA_INT))
0468 return IRQ_NONE;
0469 }
0470
0471 spin_lock_irqsave(&c->vc.lock, flags);
0472
0473
0474
0475
0476
0477
0478
0479
0480 writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
0481 c->chan_base + BCM2835_DMA_CS);
0482
0483 d = c->desc;
0484
0485 if (d) {
0486 if (d->cyclic) {
0487
0488 vchan_cyclic_callback(&d->vd);
0489 } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
0490 vchan_cookie_complete(&c->desc->vd);
0491 bcm2835_dma_start_desc(c);
0492 }
0493 }
0494
0495 spin_unlock_irqrestore(&c->vc.lock, flags);
0496
0497 return IRQ_HANDLED;
0498 }
0499
0500 static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
0501 {
0502 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0503 struct device *dev = c->vc.chan.device->dev;
0504
0505 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
0506
0507
0508
0509
0510
0511 c->cb_pool = dma_pool_create(dev_name(dev), dev,
0512 sizeof(struct bcm2835_dma_cb), 32, 0);
0513 if (!c->cb_pool) {
0514 dev_err(dev, "unable to allocate descriptor pool\n");
0515 return -ENOMEM;
0516 }
0517
0518 return request_irq(c->irq_number, bcm2835_dma_callback,
0519 c->irq_flags, "DMA IRQ", c);
0520 }
0521
0522 static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
0523 {
0524 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0525
0526 vchan_free_chan_resources(&c->vc);
0527 free_irq(c->irq_number, c);
0528 dma_pool_destroy(c->cb_pool);
0529
0530 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
0531 }
0532
0533 static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
0534 {
0535 return d->size;
0536 }
0537
0538 static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
0539 {
0540 unsigned int i;
0541 size_t size;
0542
0543 for (size = i = 0; i < d->frames; i++) {
0544 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
0545 size_t this_size = control_block->length;
0546 dma_addr_t dma;
0547
0548 if (d->dir == DMA_DEV_TO_MEM)
0549 dma = control_block->dst;
0550 else
0551 dma = control_block->src;
0552
0553 if (size)
0554 size += this_size;
0555 else if (addr >= dma && addr < dma + this_size)
0556 size += dma + this_size - addr;
0557 }
0558
0559 return size;
0560 }
0561
0562 static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
0563 dma_cookie_t cookie, struct dma_tx_state *txstate)
0564 {
0565 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0566 struct virt_dma_desc *vd;
0567 enum dma_status ret;
0568 unsigned long flags;
0569
0570 ret = dma_cookie_status(chan, cookie, txstate);
0571 if (ret == DMA_COMPLETE || !txstate)
0572 return ret;
0573
0574 spin_lock_irqsave(&c->vc.lock, flags);
0575 vd = vchan_find_desc(&c->vc, cookie);
0576 if (vd) {
0577 txstate->residue =
0578 bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
0579 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
0580 struct bcm2835_desc *d = c->desc;
0581 dma_addr_t pos;
0582
0583 if (d->dir == DMA_MEM_TO_DEV)
0584 pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
0585 else if (d->dir == DMA_DEV_TO_MEM)
0586 pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
0587 else
0588 pos = 0;
0589
0590 txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
0591 } else {
0592 txstate->residue = 0;
0593 }
0594
0595 spin_unlock_irqrestore(&c->vc.lock, flags);
0596
0597 return ret;
0598 }
0599
0600 static void bcm2835_dma_issue_pending(struct dma_chan *chan)
0601 {
0602 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0603 unsigned long flags;
0604
0605 spin_lock_irqsave(&c->vc.lock, flags);
0606 if (vchan_issue_pending(&c->vc) && !c->desc)
0607 bcm2835_dma_start_desc(c);
0608
0609 spin_unlock_irqrestore(&c->vc.lock, flags);
0610 }
0611
0612 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
0613 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
0614 size_t len, unsigned long flags)
0615 {
0616 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0617 struct bcm2835_desc *d;
0618 u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC;
0619 u32 extra = BCM2835_DMA_INT_EN | BCM2835_DMA_WAIT_RESP;
0620 size_t max_len = bcm2835_dma_max_frame_length(c);
0621 size_t frames;
0622
0623
0624 if (!src || !dst || !len)
0625 return NULL;
0626
0627
0628 frames = bcm2835_dma_frames_for_length(len, max_len);
0629
0630
0631 d = bcm2835_dma_create_cb_chain(chan, DMA_MEM_TO_MEM, false,
0632 info, extra, frames,
0633 src, dst, len, 0, GFP_KERNEL);
0634 if (!d)
0635 return NULL;
0636
0637 return vchan_tx_prep(&c->vc, &d->vd, flags);
0638 }
0639
0640 static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
0641 struct dma_chan *chan,
0642 struct scatterlist *sgl, unsigned int sg_len,
0643 enum dma_transfer_direction direction,
0644 unsigned long flags, void *context)
0645 {
0646 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0647 struct bcm2835_desc *d;
0648 dma_addr_t src = 0, dst = 0;
0649 u32 info = BCM2835_DMA_WAIT_RESP;
0650 u32 extra = BCM2835_DMA_INT_EN;
0651 size_t frames;
0652
0653 if (!is_slave_direction(direction)) {
0654 dev_err(chan->device->dev,
0655 "%s: bad direction?\n", __func__);
0656 return NULL;
0657 }
0658
0659 if (c->dreq != 0)
0660 info |= BCM2835_DMA_PER_MAP(c->dreq);
0661
0662 if (direction == DMA_DEV_TO_MEM) {
0663 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
0664 return NULL;
0665 src = c->cfg.src_addr;
0666 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
0667 } else {
0668 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
0669 return NULL;
0670 dst = c->cfg.dst_addr;
0671 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
0672 }
0673
0674
0675 frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len);
0676
0677
0678 d = bcm2835_dma_create_cb_chain(chan, direction, false,
0679 info, extra,
0680 frames, src, dst, 0, 0,
0681 GFP_NOWAIT);
0682 if (!d)
0683 return NULL;
0684
0685
0686 bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list,
0687 sgl, sg_len);
0688
0689 return vchan_tx_prep(&c->vc, &d->vd, flags);
0690 }
0691
0692 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
0693 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
0694 size_t period_len, enum dma_transfer_direction direction,
0695 unsigned long flags)
0696 {
0697 struct bcm2835_dmadev *od = to_bcm2835_dma_dev(chan->device);
0698 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0699 struct bcm2835_desc *d;
0700 dma_addr_t src, dst;
0701 u32 info = BCM2835_DMA_WAIT_RESP;
0702 u32 extra = 0;
0703 size_t max_len = bcm2835_dma_max_frame_length(c);
0704 size_t frames;
0705
0706
0707 if (!is_slave_direction(direction)) {
0708 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
0709 return NULL;
0710 }
0711
0712 if (!buf_len) {
0713 dev_err(chan->device->dev,
0714 "%s: bad buffer length (= 0)\n", __func__);
0715 return NULL;
0716 }
0717
0718 if (flags & DMA_PREP_INTERRUPT)
0719 extra |= BCM2835_DMA_INT_EN;
0720 else
0721 period_len = buf_len;
0722
0723
0724
0725
0726
0727 if (buf_len % period_len)
0728 dev_warn_once(chan->device->dev,
0729 "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n",
0730 __func__, buf_len, period_len);
0731
0732
0733 if (c->dreq != 0)
0734 info |= BCM2835_DMA_PER_MAP(c->dreq);
0735
0736 if (direction == DMA_DEV_TO_MEM) {
0737 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
0738 return NULL;
0739 src = c->cfg.src_addr;
0740 dst = buf_addr;
0741 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
0742 } else {
0743 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
0744 return NULL;
0745 dst = c->cfg.dst_addr;
0746 src = buf_addr;
0747 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
0748
0749
0750 if (buf_addr == od->zero_page && !c->is_lite_channel)
0751 info |= BCM2835_DMA_S_IGNORE;
0752 }
0753
0754
0755 frames =
0756 DIV_ROUND_UP(buf_len, period_len) *
0757
0758 bcm2835_dma_frames_for_length(period_len, max_len);
0759
0760
0761
0762
0763
0764
0765 d = bcm2835_dma_create_cb_chain(chan, direction, true,
0766 info, extra,
0767 frames, src, dst, buf_len,
0768 period_len, GFP_NOWAIT);
0769 if (!d)
0770 return NULL;
0771
0772
0773 d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr;
0774
0775 return vchan_tx_prep(&c->vc, &d->vd, flags);
0776 }
0777
0778 static int bcm2835_dma_slave_config(struct dma_chan *chan,
0779 struct dma_slave_config *cfg)
0780 {
0781 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0782
0783 c->cfg = *cfg;
0784
0785 return 0;
0786 }
0787
0788 static int bcm2835_dma_terminate_all(struct dma_chan *chan)
0789 {
0790 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0791 unsigned long flags;
0792 LIST_HEAD(head);
0793
0794 spin_lock_irqsave(&c->vc.lock, flags);
0795
0796
0797 if (c->desc) {
0798 vchan_terminate_vdesc(&c->desc->vd);
0799 c->desc = NULL;
0800 bcm2835_dma_abort(c);
0801 }
0802
0803 vchan_get_all_descriptors(&c->vc, &head);
0804 spin_unlock_irqrestore(&c->vc.lock, flags);
0805 vchan_dma_desc_free_list(&c->vc, &head);
0806
0807 return 0;
0808 }
0809
0810 static void bcm2835_dma_synchronize(struct dma_chan *chan)
0811 {
0812 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
0813
0814 vchan_synchronize(&c->vc);
0815 }
0816
0817 static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
0818 int irq, unsigned int irq_flags)
0819 {
0820 struct bcm2835_chan *c;
0821
0822 c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
0823 if (!c)
0824 return -ENOMEM;
0825
0826 c->vc.desc_free = bcm2835_dma_desc_free;
0827 vchan_init(&c->vc, &d->ddev);
0828
0829 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
0830 c->ch = chan_id;
0831 c->irq_number = irq;
0832 c->irq_flags = irq_flags;
0833
0834
0835 if (readl(c->chan_base + BCM2835_DMA_DEBUG) &
0836 BCM2835_DMA_DEBUG_LITE)
0837 c->is_lite_channel = true;
0838
0839 return 0;
0840 }
0841
0842 static void bcm2835_dma_free(struct bcm2835_dmadev *od)
0843 {
0844 struct bcm2835_chan *c, *next;
0845
0846 list_for_each_entry_safe(c, next, &od->ddev.channels,
0847 vc.chan.device_node) {
0848 list_del(&c->vc.chan.device_node);
0849 tasklet_kill(&c->vc.task);
0850 }
0851
0852 dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
0853 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
0854 }
0855
0856 static const struct of_device_id bcm2835_dma_of_match[] = {
0857 { .compatible = "brcm,bcm2835-dma", },
0858 {},
0859 };
0860 MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
0861
0862 static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
0863 struct of_dma *ofdma)
0864 {
0865 struct bcm2835_dmadev *d = ofdma->of_dma_data;
0866 struct dma_chan *chan;
0867
0868 chan = dma_get_any_slave_channel(&d->ddev);
0869 if (!chan)
0870 return NULL;
0871
0872
0873 to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
0874
0875 return chan;
0876 }
0877
0878 static int bcm2835_dma_probe(struct platform_device *pdev)
0879 {
0880 struct bcm2835_dmadev *od;
0881 struct resource *res;
0882 void __iomem *base;
0883 int rc;
0884 int i, j;
0885 int irq[BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1];
0886 int irq_flags;
0887 uint32_t chans_available;
0888 char chan_name[BCM2835_DMA_CHAN_NAME_SIZE];
0889
0890 if (!pdev->dev.dma_mask)
0891 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
0892
0893 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
0894 if (rc) {
0895 dev_err(&pdev->dev, "Unable to set DMA mask\n");
0896 return rc;
0897 }
0898
0899 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
0900 if (!od)
0901 return -ENOMEM;
0902
0903 dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
0904
0905 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0906 base = devm_ioremap_resource(&pdev->dev, res);
0907 if (IS_ERR(base))
0908 return PTR_ERR(base);
0909
0910 od->base = base;
0911
0912 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
0913 dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
0914 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
0915 dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
0916 od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
0917 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
0918 od->ddev.device_tx_status = bcm2835_dma_tx_status;
0919 od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
0920 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
0921 od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
0922 od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
0923 od->ddev.device_config = bcm2835_dma_slave_config;
0924 od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
0925 od->ddev.device_synchronize = bcm2835_dma_synchronize;
0926 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
0927 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
0928 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
0929 BIT(DMA_MEM_TO_MEM);
0930 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
0931 od->ddev.descriptor_reuse = true;
0932 od->ddev.dev = &pdev->dev;
0933 INIT_LIST_HEAD(&od->ddev.channels);
0934
0935 platform_set_drvdata(pdev, od);
0936
0937 od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0,
0938 PAGE_SIZE, DMA_TO_DEVICE,
0939 DMA_ATTR_SKIP_CPU_SYNC);
0940 if (dma_mapping_error(od->ddev.dev, od->zero_page)) {
0941 dev_err(&pdev->dev, "Failed to map zero page\n");
0942 return -ENOMEM;
0943 }
0944
0945
0946 if (of_property_read_u32(pdev->dev.of_node,
0947 "brcm,dma-channel-mask",
0948 &chans_available)) {
0949 dev_err(&pdev->dev, "Failed to get channel mask\n");
0950 rc = -EINVAL;
0951 goto err_no_dma;
0952 }
0953
0954
0955 for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) {
0956
0957 if (!(chans_available & (1 << i))) {
0958 irq[i] = -1;
0959 continue;
0960 }
0961
0962
0963 snprintf(chan_name, sizeof(chan_name), "dma%i", i);
0964 irq[i] = platform_get_irq_byname(pdev, chan_name);
0965 if (irq[i] >= 0)
0966 continue;
0967
0968
0969 dev_warn_once(&pdev->dev,
0970 "missing interrupt-names property in device tree - legacy interpretation is used\n");
0971
0972
0973
0974
0975 irq[i] = platform_get_irq(pdev, i < 11 ? i : 11);
0976 }
0977
0978
0979 for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) {
0980
0981 if (irq[i] < 0)
0982 continue;
0983
0984
0985 irq_flags = 0;
0986 for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++)
0987 if ((i != j) && (irq[j] == irq[i])) {
0988 irq_flags = IRQF_SHARED;
0989 break;
0990 }
0991
0992
0993 rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags);
0994 if (rc)
0995 goto err_no_dma;
0996 }
0997
0998 dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
0999
1000
1001 rc = of_dma_controller_register(pdev->dev.of_node,
1002 bcm2835_dma_xlate, od);
1003 if (rc) {
1004 dev_err(&pdev->dev, "Failed to register DMA controller\n");
1005 goto err_no_dma;
1006 }
1007
1008 rc = dma_async_device_register(&od->ddev);
1009 if (rc) {
1010 dev_err(&pdev->dev,
1011 "Failed to register slave DMA engine device: %d\n", rc);
1012 goto err_no_dma;
1013 }
1014
1015 dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
1016
1017 return 0;
1018
1019 err_no_dma:
1020 bcm2835_dma_free(od);
1021 return rc;
1022 }
1023
1024 static int bcm2835_dma_remove(struct platform_device *pdev)
1025 {
1026 struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
1027
1028 dma_async_device_unregister(&od->ddev);
1029 bcm2835_dma_free(od);
1030
1031 return 0;
1032 }
1033
1034 static struct platform_driver bcm2835_dma_driver = {
1035 .probe = bcm2835_dma_probe,
1036 .remove = bcm2835_dma_remove,
1037 .driver = {
1038 .name = "bcm2835-dma",
1039 .of_match_table = of_match_ptr(bcm2835_dma_of_match),
1040 },
1041 };
1042
1043 module_platform_driver(bcm2835_dma_driver);
1044
1045 MODULE_ALIAS("platform:bcm2835-dma");
1046 MODULE_DESCRIPTION("BCM2835 DMA engine driver");
1047 MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
1048 MODULE_LICENSE("GPL");