0001
0002
0003
0004
0005
0006
0007 #include <linux/bitmap.h>
0008 #include <linux/bitops.h>
0009 #include <linux/clk.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/err.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/io.h>
0015 #include <linux/log2.h>
0016 #include <linux/module.h>
0017 #include <linux/of.h>
0018 #include <linux/of_device.h>
0019 #include <linux/of_dma.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/slab.h>
0022
0023 #include <dt-bindings/dma/nbpfaxi.h>
0024
0025 #include "dmaengine.h"
0026
0027 #define NBPF_REG_CHAN_OFFSET 0
0028 #define NBPF_REG_CHAN_SIZE 0x40
0029
0030
0031 #define NBPF_CHAN_CUR_TR_BYTE 0x20
0032
0033
0034 #define NBPF_CHAN_STAT 0x24
0035 #define NBPF_CHAN_STAT_EN 1
0036 #define NBPF_CHAN_STAT_TACT 4
0037 #define NBPF_CHAN_STAT_ERR 0x10
0038 #define NBPF_CHAN_STAT_END 0x20
0039 #define NBPF_CHAN_STAT_TC 0x40
0040 #define NBPF_CHAN_STAT_DER 0x400
0041
0042
0043 #define NBPF_CHAN_CTRL 0x28
0044 #define NBPF_CHAN_CTRL_SETEN 1
0045 #define NBPF_CHAN_CTRL_CLREN 2
0046 #define NBPF_CHAN_CTRL_STG 4
0047 #define NBPF_CHAN_CTRL_SWRST 8
0048 #define NBPF_CHAN_CTRL_CLRRQ 0x10
0049 #define NBPF_CHAN_CTRL_CLREND 0x20
0050 #define NBPF_CHAN_CTRL_CLRTC 0x40
0051 #define NBPF_CHAN_CTRL_SETSUS 0x100
0052 #define NBPF_CHAN_CTRL_CLRSUS 0x200
0053
0054
0055 #define NBPF_CHAN_CFG 0x2c
0056 #define NBPF_CHAN_CFG_SEL 7
0057 #define NBPF_CHAN_CFG_REQD 8
0058 #define NBPF_CHAN_CFG_LOEN 0x10
0059 #define NBPF_CHAN_CFG_HIEN 0x20
0060 #define NBPF_CHAN_CFG_LVL 0x40
0061 #define NBPF_CHAN_CFG_AM 0x700
0062 #define NBPF_CHAN_CFG_SDS 0xf000
0063 #define NBPF_CHAN_CFG_DDS 0xf0000
0064 #define NBPF_CHAN_CFG_SAD 0x100000
0065 #define NBPF_CHAN_CFG_DAD 0x200000
0066 #define NBPF_CHAN_CFG_TM 0x400000
0067 #define NBPF_CHAN_CFG_DEM 0x1000000
0068 #define NBPF_CHAN_CFG_TCM 0x2000000
0069 #define NBPF_CHAN_CFG_SBE 0x8000000
0070 #define NBPF_CHAN_CFG_RSEL 0x10000000
0071 #define NBPF_CHAN_CFG_RSW 0x20000000
0072 #define NBPF_CHAN_CFG_REN 0x40000000
0073 #define NBPF_CHAN_CFG_DMS 0x80000000
0074
0075 #define NBPF_CHAN_NXLA 0x38
0076 #define NBPF_CHAN_CRLA 0x3c
0077
0078
0079 #define NBPF_HEADER_LV 1
0080 #define NBPF_HEADER_LE 2
0081 #define NBPF_HEADER_WBD 4
0082 #define NBPF_HEADER_DIM 8
0083
0084 #define NBPF_CTRL 0x300
0085 #define NBPF_CTRL_PR 1
0086 #define NBPF_CTRL_LVINT 2
0087
0088 #define NBPF_DSTAT_ER 0x314
0089 #define NBPF_DSTAT_END 0x318
0090
0091 #define NBPF_DMA_BUSWIDTHS \
0092 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
0093 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
0094 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
0095 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
0096 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
0097
0098 struct nbpf_config {
0099 int num_channels;
0100 int buffer_size;
0101 };
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 struct nbpf_link_reg {
0121 u32 header;
0122 u32 src_addr;
0123 u32 dst_addr;
0124 u32 transaction_size;
0125 u32 config;
0126 u32 interval;
0127 u32 extension;
0128 u32 next;
0129 } __packed;
0130
0131 struct nbpf_device;
0132 struct nbpf_channel;
0133 struct nbpf_desc;
0134
0135 struct nbpf_link_desc {
0136 struct nbpf_link_reg *hwdesc;
0137 dma_addr_t hwdesc_dma_addr;
0138 struct nbpf_desc *desc;
0139 struct list_head node;
0140 };
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151 struct nbpf_desc {
0152 struct dma_async_tx_descriptor async_tx;
0153 bool user_wait;
0154 size_t length;
0155 struct nbpf_channel *chan;
0156 struct list_head sg;
0157 struct list_head node;
0158 };
0159
0160
0161 #define NBPF_SEGMENTS_PER_DESC 4
0162 #define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \
0163 (sizeof(struct nbpf_desc) + \
0164 NBPF_SEGMENTS_PER_DESC * \
0165 (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg))))
0166 #define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE)
0167
0168 struct nbpf_desc_page {
0169 struct list_head node;
0170 struct nbpf_desc desc[NBPF_DESCS_PER_PAGE];
0171 struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE];
0172 struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE];
0173 };
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202 struct nbpf_channel {
0203 struct dma_chan dma_chan;
0204 struct tasklet_struct tasklet;
0205 void __iomem *base;
0206 struct nbpf_device *nbpf;
0207 char name[16];
0208 int irq;
0209 dma_addr_t slave_src_addr;
0210 size_t slave_src_width;
0211 size_t slave_src_burst;
0212 dma_addr_t slave_dst_addr;
0213 size_t slave_dst_width;
0214 size_t slave_dst_burst;
0215 unsigned int terminal;
0216 u32 dmarq_cfg;
0217 unsigned long flags;
0218 spinlock_t lock;
0219 struct list_head free_links;
0220 struct list_head free;
0221 struct list_head queued;
0222 struct list_head active;
0223 struct list_head done;
0224 struct list_head desc_page;
0225 struct nbpf_desc *running;
0226 bool paused;
0227 };
0228
0229 struct nbpf_device {
0230 struct dma_device dma_dev;
0231 void __iomem *base;
0232 u32 max_burst_mem_read;
0233 u32 max_burst_mem_write;
0234 struct clk *clk;
0235 const struct nbpf_config *config;
0236 unsigned int eirq;
0237 struct nbpf_channel chan[];
0238 };
0239
0240 enum nbpf_model {
0241 NBPF1B4,
0242 NBPF1B8,
0243 NBPF1B16,
0244 NBPF4B4,
0245 NBPF4B8,
0246 NBPF4B16,
0247 NBPF8B4,
0248 NBPF8B8,
0249 NBPF8B16,
0250 };
0251
0252 static struct nbpf_config nbpf_cfg[] = {
0253 [NBPF1B4] = {
0254 .num_channels = 1,
0255 .buffer_size = 4,
0256 },
0257 [NBPF1B8] = {
0258 .num_channels = 1,
0259 .buffer_size = 8,
0260 },
0261 [NBPF1B16] = {
0262 .num_channels = 1,
0263 .buffer_size = 16,
0264 },
0265 [NBPF4B4] = {
0266 .num_channels = 4,
0267 .buffer_size = 4,
0268 },
0269 [NBPF4B8] = {
0270 .num_channels = 4,
0271 .buffer_size = 8,
0272 },
0273 [NBPF4B16] = {
0274 .num_channels = 4,
0275 .buffer_size = 16,
0276 },
0277 [NBPF8B4] = {
0278 .num_channels = 8,
0279 .buffer_size = 4,
0280 },
0281 [NBPF8B8] = {
0282 .num_channels = 8,
0283 .buffer_size = 8,
0284 },
0285 [NBPF8B16] = {
0286 .num_channels = 8,
0287 .buffer_size = 16,
0288 },
0289 };
0290
0291 #define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan)
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305 static inline u32 nbpf_chan_read(struct nbpf_channel *chan,
0306 unsigned int offset)
0307 {
0308 u32 data = ioread32(chan->base + offset);
0309 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
0310 __func__, chan->base, offset, data);
0311 return data;
0312 }
0313
0314 static inline void nbpf_chan_write(struct nbpf_channel *chan,
0315 unsigned int offset, u32 data)
0316 {
0317 iowrite32(data, chan->base + offset);
0318 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
0319 __func__, chan->base, offset, data);
0320 }
0321
0322 static inline u32 nbpf_read(struct nbpf_device *nbpf,
0323 unsigned int offset)
0324 {
0325 u32 data = ioread32(nbpf->base + offset);
0326 dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
0327 __func__, nbpf->base, offset, data);
0328 return data;
0329 }
0330
0331 static inline void nbpf_write(struct nbpf_device *nbpf,
0332 unsigned int offset, u32 data)
0333 {
0334 iowrite32(data, nbpf->base + offset);
0335 dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
0336 __func__, nbpf->base, offset, data);
0337 }
0338
0339 static void nbpf_chan_halt(struct nbpf_channel *chan)
0340 {
0341 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
0342 }
0343
0344 static bool nbpf_status_get(struct nbpf_channel *chan)
0345 {
0346 u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END);
0347
0348 return status & BIT(chan - chan->nbpf->chan);
0349 }
0350
0351 static void nbpf_status_ack(struct nbpf_channel *chan)
0352 {
0353 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND);
0354 }
0355
0356 static u32 nbpf_error_get(struct nbpf_device *nbpf)
0357 {
0358 return nbpf_read(nbpf, NBPF_DSTAT_ER);
0359 }
0360
0361 static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error)
0362 {
0363 return nbpf->chan + __ffs(error);
0364 }
0365
0366 static void nbpf_error_clear(struct nbpf_channel *chan)
0367 {
0368 u32 status;
0369 int i;
0370
0371
0372 nbpf_chan_halt(chan);
0373
0374 for (i = 1000; i; i--) {
0375 status = nbpf_chan_read(chan, NBPF_CHAN_STAT);
0376 if (!(status & NBPF_CHAN_STAT_TACT))
0377 break;
0378 cpu_relax();
0379 }
0380
0381 if (!i)
0382 dev_err(chan->dma_chan.device->dev,
0383 "%s(): abort timeout, channel status 0x%x\n", __func__, status);
0384
0385 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST);
0386 }
0387
0388 static int nbpf_start(struct nbpf_desc *desc)
0389 {
0390 struct nbpf_channel *chan = desc->chan;
0391 struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node);
0392
0393 nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr);
0394 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS);
0395 chan->paused = false;
0396
0397
0398 if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM)
0399 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG);
0400
0401 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__,
0402 nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA));
0403
0404 return 0;
0405 }
0406
0407 static void nbpf_chan_prepare(struct nbpf_channel *chan)
0408 {
0409 chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) |
0410 (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) |
0411 (chan->flags & NBPF_SLAVE_RQ_LEVEL ?
0412 NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) |
0413 chan->terminal;
0414 }
0415
0416 static void nbpf_chan_prepare_default(struct nbpf_channel *chan)
0417 {
0418
0419 chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400;
0420 chan->terminal = 0;
0421 chan->flags = 0;
0422 }
0423
0424 static void nbpf_chan_configure(struct nbpf_channel *chan)
0425 {
0426
0427
0428
0429
0430
0431 nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
0432 }
0433
0434 static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
0435 enum dma_transfer_direction direction)
0436 {
0437 int max_burst = nbpf->config->buffer_size * 8;
0438
0439 if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
0440 switch (direction) {
0441 case DMA_MEM_TO_MEM:
0442 max_burst = min_not_zero(nbpf->max_burst_mem_read,
0443 nbpf->max_burst_mem_write);
0444 break;
0445 case DMA_MEM_TO_DEV:
0446 if (nbpf->max_burst_mem_read)
0447 max_burst = nbpf->max_burst_mem_read;
0448 break;
0449 case DMA_DEV_TO_MEM:
0450 if (nbpf->max_burst_mem_write)
0451 max_burst = nbpf->max_burst_mem_write;
0452 break;
0453 case DMA_DEV_TO_DEV:
0454 default:
0455 break;
0456 }
0457 }
0458
0459
0460 return min_t(int, __ffs(size), ilog2(max_burst));
0461 }
0462
0463 static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
0464 enum dma_slave_buswidth width, u32 burst)
0465 {
0466 size_t size;
0467
0468 if (!burst)
0469 burst = 1;
0470
0471 switch (width) {
0472 case DMA_SLAVE_BUSWIDTH_8_BYTES:
0473 size = 8 * burst;
0474 break;
0475
0476 case DMA_SLAVE_BUSWIDTH_4_BYTES:
0477 size = 4 * burst;
0478 break;
0479
0480 case DMA_SLAVE_BUSWIDTH_2_BYTES:
0481 size = 2 * burst;
0482 break;
0483
0484 default:
0485 pr_warn("%s(): invalid bus width %u\n", __func__, width);
0486 fallthrough;
0487 case DMA_SLAVE_BUSWIDTH_1_BYTE:
0488 size = burst;
0489 }
0490
0491 return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
0492 }
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506 static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
0507 enum dma_transfer_direction direction,
0508 dma_addr_t src, dma_addr_t dst, size_t size, bool last)
0509 {
0510 struct nbpf_link_reg *hwdesc = ldesc->hwdesc;
0511 struct nbpf_desc *desc = ldesc->desc;
0512 struct nbpf_channel *chan = desc->chan;
0513 struct device *dev = chan->dma_chan.device->dev;
0514 size_t mem_xfer, slave_xfer;
0515 bool can_burst;
0516
0517 hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV |
0518 (last ? NBPF_HEADER_LE : 0);
0519
0520 hwdesc->src_addr = src;
0521 hwdesc->dst_addr = dst;
0522 hwdesc->transaction_size = size;
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540 mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
0541
0542 switch (direction) {
0543 case DMA_DEV_TO_MEM:
0544 can_burst = chan->slave_src_width >= 3;
0545 slave_xfer = min(mem_xfer, can_burst ?
0546 chan->slave_src_burst : chan->slave_src_width);
0547
0548
0549
0550
0551 if (mem_xfer > chan->slave_src_burst && !can_burst)
0552 mem_xfer = chan->slave_src_burst;
0553
0554 hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) |
0555 (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD |
0556 NBPF_CHAN_CFG_SBE;
0557 break;
0558
0559 case DMA_MEM_TO_DEV:
0560 slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ?
0561 chan->slave_dst_burst : chan->slave_dst_width);
0562 hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
0563 (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD;
0564 break;
0565
0566 case DMA_MEM_TO_MEM:
0567 hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM |
0568 (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
0569 (NBPF_CHAN_CFG_DDS & (mem_xfer << 16));
0570 break;
0571
0572 default:
0573 return -EINVAL;
0574 }
0575
0576 hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) |
0577 NBPF_CHAN_CFG_DMS;
0578
0579 dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n",
0580 __func__, &ldesc->hwdesc_dma_addr, hwdesc->header,
0581 hwdesc->config, size, &src, &dst);
0582
0583 dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc),
0584 DMA_TO_DEVICE);
0585
0586 return 0;
0587 }
0588
0589 static size_t nbpf_bytes_left(struct nbpf_channel *chan)
0590 {
0591 return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE);
0592 }
0593
0594 static void nbpf_configure(struct nbpf_device *nbpf)
0595 {
0596 nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
0597 }
0598
0599
0600
0601
0602 static void nbpf_issue_pending(struct dma_chan *dchan)
0603 {
0604 struct nbpf_channel *chan = nbpf_to_chan(dchan);
0605 unsigned long flags;
0606
0607 dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
0608
0609 spin_lock_irqsave(&chan->lock, flags);
0610 if (list_empty(&chan->queued))
0611 goto unlock;
0612
0613 list_splice_tail_init(&chan->queued, &chan->active);
0614
0615 if (!chan->running) {
0616 struct nbpf_desc *desc = list_first_entry(&chan->active,
0617 struct nbpf_desc, node);
0618 if (!nbpf_start(desc))
0619 chan->running = desc;
0620 }
0621
0622 unlock:
0623 spin_unlock_irqrestore(&chan->lock, flags);
0624 }
0625
0626 static enum dma_status nbpf_tx_status(struct dma_chan *dchan,
0627 dma_cookie_t cookie, struct dma_tx_state *state)
0628 {
0629 struct nbpf_channel *chan = nbpf_to_chan(dchan);
0630 enum dma_status status = dma_cookie_status(dchan, cookie, state);
0631
0632 if (state) {
0633 dma_cookie_t running;
0634 unsigned long flags;
0635
0636 spin_lock_irqsave(&chan->lock, flags);
0637 running = chan->running ? chan->running->async_tx.cookie : -EINVAL;
0638
0639 if (cookie == running) {
0640 state->residue = nbpf_bytes_left(chan);
0641 dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__,
0642 state->residue);
0643 } else if (status == DMA_IN_PROGRESS) {
0644 struct nbpf_desc *desc;
0645 bool found = false;
0646
0647 list_for_each_entry(desc, &chan->active, node)
0648 if (desc->async_tx.cookie == cookie) {
0649 found = true;
0650 break;
0651 }
0652
0653 if (!found)
0654 list_for_each_entry(desc, &chan->queued, node)
0655 if (desc->async_tx.cookie == cookie) {
0656 found = true;
0657 break;
0658
0659 }
0660
0661 state->residue = found ? desc->length : 0;
0662 }
0663
0664 spin_unlock_irqrestore(&chan->lock, flags);
0665 }
0666
0667 if (chan->paused)
0668 status = DMA_PAUSED;
0669
0670 return status;
0671 }
0672
0673 static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx)
0674 {
0675 struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx);
0676 struct nbpf_channel *chan = desc->chan;
0677 unsigned long flags;
0678 dma_cookie_t cookie;
0679
0680 spin_lock_irqsave(&chan->lock, flags);
0681 cookie = dma_cookie_assign(tx);
0682 list_add_tail(&desc->node, &chan->queued);
0683 spin_unlock_irqrestore(&chan->lock, flags);
0684
0685 dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie);
0686
0687 return cookie;
0688 }
0689
0690 static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
0691 {
0692 struct dma_chan *dchan = &chan->dma_chan;
0693 struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
0694 struct nbpf_link_desc *ldesc;
0695 struct nbpf_link_reg *hwdesc;
0696 struct nbpf_desc *desc;
0697 LIST_HEAD(head);
0698 LIST_HEAD(lhead);
0699 int i;
0700 struct device *dev = dchan->device->dev;
0701
0702 if (!dpage)
0703 return -ENOMEM;
0704
0705 dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n",
0706 __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage));
0707
0708 for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc;
0709 i < ARRAY_SIZE(dpage->ldesc);
0710 i++, ldesc++, hwdesc++) {
0711 ldesc->hwdesc = hwdesc;
0712 list_add_tail(&ldesc->node, &lhead);
0713 ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
0714 hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
0715
0716 dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
0717 hwdesc, &ldesc->hwdesc_dma_addr);
0718 }
0719
0720 for (i = 0, desc = dpage->desc;
0721 i < ARRAY_SIZE(dpage->desc);
0722 i++, desc++) {
0723 dma_async_tx_descriptor_init(&desc->async_tx, dchan);
0724 desc->async_tx.tx_submit = nbpf_tx_submit;
0725 desc->chan = chan;
0726 INIT_LIST_HEAD(&desc->sg);
0727 list_add_tail(&desc->node, &head);
0728 }
0729
0730
0731
0732
0733
0734 spin_lock_irq(&chan->lock);
0735 list_splice_tail(&lhead, &chan->free_links);
0736 list_splice_tail(&head, &chan->free);
0737 list_add(&dpage->node, &chan->desc_page);
0738 spin_unlock_irq(&chan->lock);
0739
0740 return ARRAY_SIZE(dpage->desc);
0741 }
0742
0743 static void nbpf_desc_put(struct nbpf_desc *desc)
0744 {
0745 struct nbpf_channel *chan = desc->chan;
0746 struct nbpf_link_desc *ldesc, *tmp;
0747 unsigned long flags;
0748
0749 spin_lock_irqsave(&chan->lock, flags);
0750 list_for_each_entry_safe(ldesc, tmp, &desc->sg, node)
0751 list_move(&ldesc->node, &chan->free_links);
0752
0753 list_add(&desc->node, &chan->free);
0754 spin_unlock_irqrestore(&chan->lock, flags);
0755 }
0756
0757 static void nbpf_scan_acked(struct nbpf_channel *chan)
0758 {
0759 struct nbpf_desc *desc, *tmp;
0760 unsigned long flags;
0761 LIST_HEAD(head);
0762
0763 spin_lock_irqsave(&chan->lock, flags);
0764 list_for_each_entry_safe(desc, tmp, &chan->done, node)
0765 if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) {
0766 list_move(&desc->node, &head);
0767 desc->user_wait = false;
0768 }
0769 spin_unlock_irqrestore(&chan->lock, flags);
0770
0771 list_for_each_entry_safe(desc, tmp, &head, node) {
0772 list_del(&desc->node);
0773 nbpf_desc_put(desc);
0774 }
0775 }
0776
0777
0778
0779
0780
0781
0782
0783 static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len)
0784 {
0785 struct nbpf_desc *desc = NULL;
0786 struct nbpf_link_desc *ldesc, *prev = NULL;
0787
0788 nbpf_scan_acked(chan);
0789
0790 spin_lock_irq(&chan->lock);
0791
0792 do {
0793 int i = 0, ret;
0794
0795 if (list_empty(&chan->free)) {
0796
0797 spin_unlock_irq(&chan->lock);
0798 ret = nbpf_desc_page_alloc(chan);
0799 if (ret < 0)
0800 return NULL;
0801 spin_lock_irq(&chan->lock);
0802 continue;
0803 }
0804 desc = list_first_entry(&chan->free, struct nbpf_desc, node);
0805 list_del(&desc->node);
0806
0807 do {
0808 if (list_empty(&chan->free_links)) {
0809
0810 spin_unlock_irq(&chan->lock);
0811 ret = nbpf_desc_page_alloc(chan);
0812 if (ret < 0) {
0813 nbpf_desc_put(desc);
0814 return NULL;
0815 }
0816 spin_lock_irq(&chan->lock);
0817 continue;
0818 }
0819
0820 ldesc = list_first_entry(&chan->free_links,
0821 struct nbpf_link_desc, node);
0822 ldesc->desc = desc;
0823 if (prev)
0824 prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr;
0825
0826 prev = ldesc;
0827 list_move_tail(&ldesc->node, &desc->sg);
0828
0829 i++;
0830 } while (i < len);
0831 } while (!desc);
0832
0833 prev->hwdesc->next = 0;
0834
0835 spin_unlock_irq(&chan->lock);
0836
0837 return desc;
0838 }
0839
0840 static void nbpf_chan_idle(struct nbpf_channel *chan)
0841 {
0842 struct nbpf_desc *desc, *tmp;
0843 unsigned long flags;
0844 LIST_HEAD(head);
0845
0846 spin_lock_irqsave(&chan->lock, flags);
0847
0848 list_splice_init(&chan->done, &head);
0849 list_splice_init(&chan->active, &head);
0850 list_splice_init(&chan->queued, &head);
0851
0852 chan->running = NULL;
0853
0854 spin_unlock_irqrestore(&chan->lock, flags);
0855
0856 list_for_each_entry_safe(desc, tmp, &head, node) {
0857 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n",
0858 __func__, desc, desc->async_tx.cookie);
0859 list_del(&desc->node);
0860 nbpf_desc_put(desc);
0861 }
0862 }
0863
0864 static int nbpf_pause(struct dma_chan *dchan)
0865 {
0866 struct nbpf_channel *chan = nbpf_to_chan(dchan);
0867
0868 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
0869
0870 chan->paused = true;
0871 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
0872
0873 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
0874
0875 return 0;
0876 }
0877
0878 static int nbpf_terminate_all(struct dma_chan *dchan)
0879 {
0880 struct nbpf_channel *chan = nbpf_to_chan(dchan);
0881
0882 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
0883 dev_dbg(dchan->device->dev, "Terminating\n");
0884
0885 nbpf_chan_halt(chan);
0886 nbpf_chan_idle(chan);
0887
0888 return 0;
0889 }
0890
0891 static int nbpf_config(struct dma_chan *dchan,
0892 struct dma_slave_config *config)
0893 {
0894 struct nbpf_channel *chan = nbpf_to_chan(dchan);
0895
0896 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
0897
0898
0899
0900
0901
0902
0903
0904 chan->slave_dst_addr = config->dst_addr;
0905 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
0906 config->dst_addr_width, 1);
0907 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
0908 config->dst_addr_width,
0909 config->dst_maxburst);
0910 chan->slave_src_addr = config->src_addr;
0911 chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
0912 config->src_addr_width, 1);
0913 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
0914 config->src_addr_width,
0915 config->src_maxburst);
0916
0917 return 0;
0918 }
0919
0920 static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan,
0921 struct scatterlist *src_sg, struct scatterlist *dst_sg,
0922 size_t len, enum dma_transfer_direction direction,
0923 unsigned long flags)
0924 {
0925 struct nbpf_link_desc *ldesc;
0926 struct scatterlist *mem_sg;
0927 struct nbpf_desc *desc;
0928 bool inc_src, inc_dst;
0929 size_t data_len = 0;
0930 int i = 0;
0931
0932 switch (direction) {
0933 case DMA_DEV_TO_MEM:
0934 mem_sg = dst_sg;
0935 inc_src = false;
0936 inc_dst = true;
0937 break;
0938
0939 case DMA_MEM_TO_DEV:
0940 mem_sg = src_sg;
0941 inc_src = true;
0942 inc_dst = false;
0943 break;
0944
0945 default:
0946 case DMA_MEM_TO_MEM:
0947 mem_sg = src_sg;
0948 inc_src = true;
0949 inc_dst = true;
0950 }
0951
0952 desc = nbpf_desc_get(chan, len);
0953 if (!desc)
0954 return NULL;
0955
0956 desc->async_tx.flags = flags;
0957 desc->async_tx.cookie = -EBUSY;
0958 desc->user_wait = false;
0959
0960
0961
0962
0963
0964 list_for_each_entry(ldesc, &desc->sg, node) {
0965 int ret = nbpf_prep_one(ldesc, direction,
0966 sg_dma_address(src_sg),
0967 sg_dma_address(dst_sg),
0968 sg_dma_len(mem_sg),
0969 i == len - 1);
0970 if (ret < 0) {
0971 nbpf_desc_put(desc);
0972 return NULL;
0973 }
0974 data_len += sg_dma_len(mem_sg);
0975 if (inc_src)
0976 src_sg = sg_next(src_sg);
0977 if (inc_dst)
0978 dst_sg = sg_next(dst_sg);
0979 mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg;
0980 i++;
0981 }
0982
0983 desc->length = data_len;
0984
0985
0986 return &desc->async_tx;
0987 }
0988
0989 static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
0990 struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
0991 size_t len, unsigned long flags)
0992 {
0993 struct nbpf_channel *chan = nbpf_to_chan(dchan);
0994 struct scatterlist dst_sg;
0995 struct scatterlist src_sg;
0996
0997 sg_init_table(&dst_sg, 1);
0998 sg_init_table(&src_sg, 1);
0999
1000 sg_dma_address(&dst_sg) = dst;
1001 sg_dma_address(&src_sg) = src;
1002
1003 sg_dma_len(&dst_sg) = len;
1004 sg_dma_len(&src_sg) = len;
1005
1006 dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n",
1007 __func__, len, &src, &dst);
1008
1009 return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1,
1010 DMA_MEM_TO_MEM, flags);
1011 }
1012
1013 static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
1014 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1015 enum dma_transfer_direction direction, unsigned long flags, void *context)
1016 {
1017 struct nbpf_channel *chan = nbpf_to_chan(dchan);
1018 struct scatterlist slave_sg;
1019
1020 dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
1021
1022 sg_init_table(&slave_sg, 1);
1023
1024 switch (direction) {
1025 case DMA_MEM_TO_DEV:
1026 sg_dma_address(&slave_sg) = chan->slave_dst_addr;
1027 return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len,
1028 direction, flags);
1029
1030 case DMA_DEV_TO_MEM:
1031 sg_dma_address(&slave_sg) = chan->slave_src_addr;
1032 return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len,
1033 direction, flags);
1034
1035 default:
1036 return NULL;
1037 }
1038 }
1039
1040 static int nbpf_alloc_chan_resources(struct dma_chan *dchan)
1041 {
1042 struct nbpf_channel *chan = nbpf_to_chan(dchan);
1043 int ret;
1044
1045 INIT_LIST_HEAD(&chan->free);
1046 INIT_LIST_HEAD(&chan->free_links);
1047 INIT_LIST_HEAD(&chan->queued);
1048 INIT_LIST_HEAD(&chan->active);
1049 INIT_LIST_HEAD(&chan->done);
1050
1051 ret = nbpf_desc_page_alloc(chan);
1052 if (ret < 0)
1053 return ret;
1054
1055 dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__,
1056 chan->terminal);
1057
1058 nbpf_chan_configure(chan);
1059
1060 return ret;
1061 }
1062
1063 static void nbpf_free_chan_resources(struct dma_chan *dchan)
1064 {
1065 struct nbpf_channel *chan = nbpf_to_chan(dchan);
1066 struct nbpf_desc_page *dpage, *tmp;
1067
1068 dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
1069
1070 nbpf_chan_halt(chan);
1071 nbpf_chan_idle(chan);
1072
1073 nbpf_chan_prepare_default(chan);
1074
1075 list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) {
1076 struct nbpf_link_desc *ldesc;
1077 int i;
1078 list_del(&dpage->node);
1079 for (i = 0, ldesc = dpage->ldesc;
1080 i < ARRAY_SIZE(dpage->ldesc);
1081 i++, ldesc++)
1082 dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
1083 sizeof(*ldesc->hwdesc), DMA_TO_DEVICE);
1084 free_page((unsigned long)dpage);
1085 }
1086 }
1087
1088 static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
1089 struct of_dma *ofdma)
1090 {
1091 struct nbpf_device *nbpf = ofdma->of_dma_data;
1092 struct dma_chan *dchan;
1093 struct nbpf_channel *chan;
1094
1095 if (dma_spec->args_count != 2)
1096 return NULL;
1097
1098 dchan = dma_get_any_slave_channel(&nbpf->dma_dev);
1099 if (!dchan)
1100 return NULL;
1101
1102 dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n", __func__,
1103 dma_spec->np);
1104
1105 chan = nbpf_to_chan(dchan);
1106
1107 chan->terminal = dma_spec->args[0];
1108 chan->flags = dma_spec->args[1];
1109
1110 nbpf_chan_prepare(chan);
1111 nbpf_chan_configure(chan);
1112
1113 return dchan;
1114 }
1115
1116 static void nbpf_chan_tasklet(struct tasklet_struct *t)
1117 {
1118 struct nbpf_channel *chan = from_tasklet(chan, t, tasklet);
1119 struct nbpf_desc *desc, *tmp;
1120 struct dmaengine_desc_callback cb;
1121
1122 while (!list_empty(&chan->done)) {
1123 bool found = false, must_put, recycling = false;
1124
1125 spin_lock_irq(&chan->lock);
1126
1127 list_for_each_entry_safe(desc, tmp, &chan->done, node) {
1128 if (!desc->user_wait) {
1129
1130 found = true;
1131 break;
1132 } else if (async_tx_test_ack(&desc->async_tx)) {
1133
1134
1135
1136
1137 list_del(&desc->node);
1138 spin_unlock_irq(&chan->lock);
1139 nbpf_desc_put(desc);
1140 recycling = true;
1141 break;
1142 }
1143 }
1144
1145 if (recycling)
1146 continue;
1147
1148 if (!found) {
1149
1150 spin_unlock_irq(&chan->lock);
1151 break;
1152 }
1153
1154 dma_cookie_complete(&desc->async_tx);
1155
1156
1157
1158
1159
1160 if (async_tx_test_ack(&desc->async_tx)) {
1161 list_del(&desc->node);
1162 must_put = true;
1163 } else {
1164 desc->user_wait = true;
1165 must_put = false;
1166 }
1167
1168 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1169
1170
1171 spin_unlock_irq(&chan->lock);
1172
1173 dmaengine_desc_callback_invoke(&cb, NULL);
1174
1175 if (must_put)
1176 nbpf_desc_put(desc);
1177 }
1178 }
1179
1180 static irqreturn_t nbpf_chan_irq(int irq, void *dev)
1181 {
1182 struct nbpf_channel *chan = dev;
1183 bool done = nbpf_status_get(chan);
1184 struct nbpf_desc *desc;
1185 irqreturn_t ret;
1186 bool bh = false;
1187
1188 if (!done)
1189 return IRQ_NONE;
1190
1191 nbpf_status_ack(chan);
1192
1193 dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__);
1194
1195 spin_lock(&chan->lock);
1196 desc = chan->running;
1197 if (WARN_ON(!desc)) {
1198 ret = IRQ_NONE;
1199 goto unlock;
1200 } else {
1201 ret = IRQ_HANDLED;
1202 bh = true;
1203 }
1204
1205 list_move_tail(&desc->node, &chan->done);
1206 chan->running = NULL;
1207
1208 if (!list_empty(&chan->active)) {
1209 desc = list_first_entry(&chan->active,
1210 struct nbpf_desc, node);
1211 if (!nbpf_start(desc))
1212 chan->running = desc;
1213 }
1214
1215 unlock:
1216 spin_unlock(&chan->lock);
1217
1218 if (bh)
1219 tasklet_schedule(&chan->tasklet);
1220
1221 return ret;
1222 }
1223
1224 static irqreturn_t nbpf_err_irq(int irq, void *dev)
1225 {
1226 struct nbpf_device *nbpf = dev;
1227 u32 error = nbpf_error_get(nbpf);
1228
1229 dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq);
1230
1231 if (!error)
1232 return IRQ_NONE;
1233
1234 do {
1235 struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error);
1236
1237 nbpf_error_clear(chan);
1238 nbpf_chan_idle(chan);
1239 error = nbpf_error_get(nbpf);
1240 } while (error);
1241
1242 return IRQ_HANDLED;
1243 }
1244
1245 static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
1246 {
1247 struct dma_device *dma_dev = &nbpf->dma_dev;
1248 struct nbpf_channel *chan = nbpf->chan + n;
1249 int ret;
1250
1251 chan->nbpf = nbpf;
1252 chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n;
1253 INIT_LIST_HEAD(&chan->desc_page);
1254 spin_lock_init(&chan->lock);
1255 chan->dma_chan.device = dma_dev;
1256 dma_cookie_init(&chan->dma_chan);
1257 nbpf_chan_prepare_default(chan);
1258
1259 dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base);
1260
1261 snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
1262
1263 tasklet_setup(&chan->tasklet, nbpf_chan_tasklet);
1264 ret = devm_request_irq(dma_dev->dev, chan->irq,
1265 nbpf_chan_irq, IRQF_SHARED,
1266 chan->name, chan);
1267 if (ret < 0)
1268 return ret;
1269
1270
1271 list_add_tail(&chan->dma_chan.device_node,
1272 &dma_dev->channels);
1273
1274 return 0;
1275 }
1276
1277 static const struct of_device_id nbpf_match[] = {
1278 {.compatible = "renesas,nbpfaxi64dmac1b4", .data = &nbpf_cfg[NBPF1B4]},
1279 {.compatible = "renesas,nbpfaxi64dmac1b8", .data = &nbpf_cfg[NBPF1B8]},
1280 {.compatible = "renesas,nbpfaxi64dmac1b16", .data = &nbpf_cfg[NBPF1B16]},
1281 {.compatible = "renesas,nbpfaxi64dmac4b4", .data = &nbpf_cfg[NBPF4B4]},
1282 {.compatible = "renesas,nbpfaxi64dmac4b8", .data = &nbpf_cfg[NBPF4B8]},
1283 {.compatible = "renesas,nbpfaxi64dmac4b16", .data = &nbpf_cfg[NBPF4B16]},
1284 {.compatible = "renesas,nbpfaxi64dmac8b4", .data = &nbpf_cfg[NBPF8B4]},
1285 {.compatible = "renesas,nbpfaxi64dmac8b8", .data = &nbpf_cfg[NBPF8B8]},
1286 {.compatible = "renesas,nbpfaxi64dmac8b16", .data = &nbpf_cfg[NBPF8B16]},
1287 {}
1288 };
1289 MODULE_DEVICE_TABLE(of, nbpf_match);
1290
1291 static int nbpf_probe(struct platform_device *pdev)
1292 {
1293 struct device *dev = &pdev->dev;
1294 struct device_node *np = dev->of_node;
1295 struct nbpf_device *nbpf;
1296 struct dma_device *dma_dev;
1297 struct resource *iomem;
1298 const struct nbpf_config *cfg;
1299 int num_channels;
1300 int ret, irq, eirq, i;
1301 int irqbuf[9] ;
1302 unsigned int irqs = 0;
1303
1304 BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
1305
1306
1307 if (!np)
1308 return -ENODEV;
1309
1310 cfg = of_device_get_match_data(dev);
1311 num_channels = cfg->num_channels;
1312
1313 nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels),
1314 GFP_KERNEL);
1315 if (!nbpf)
1316 return -ENOMEM;
1317
1318 dma_dev = &nbpf->dma_dev;
1319 dma_dev->dev = dev;
1320
1321 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1322 nbpf->base = devm_ioremap_resource(dev, iomem);
1323 if (IS_ERR(nbpf->base))
1324 return PTR_ERR(nbpf->base);
1325
1326 nbpf->clk = devm_clk_get(dev, NULL);
1327 if (IS_ERR(nbpf->clk))
1328 return PTR_ERR(nbpf->clk);
1329
1330 of_property_read_u32(np, "max-burst-mem-read",
1331 &nbpf->max_burst_mem_read);
1332 of_property_read_u32(np, "max-burst-mem-write",
1333 &nbpf->max_burst_mem_write);
1334
1335 nbpf->config = cfg;
1336
1337 for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
1338 irq = platform_get_irq_optional(pdev, i);
1339 if (irq < 0 && irq != -ENXIO)
1340 return irq;
1341 if (irq > 0)
1342 irqbuf[irqs++] = irq;
1343 }
1344
1345
1346
1347
1348
1349
1350
1351 if (irqs != 1 && irqs != 2 && irqs != num_channels + 1)
1352 return -ENXIO;
1353
1354 if (irqs == 1) {
1355 eirq = irqbuf[0];
1356
1357 for (i = 0; i <= num_channels; i++)
1358 nbpf->chan[i].irq = irqbuf[0];
1359 } else {
1360 eirq = platform_get_irq_byname(pdev, "error");
1361 if (eirq < 0)
1362 return eirq;
1363
1364 if (irqs == num_channels + 1) {
1365 struct nbpf_channel *chan;
1366
1367 for (i = 0, chan = nbpf->chan; i <= num_channels;
1368 i++, chan++) {
1369
1370 if (irqbuf[i] == eirq)
1371 i++;
1372 chan->irq = irqbuf[i];
1373 }
1374
1375 if (chan != nbpf->chan + num_channels)
1376 return -EINVAL;
1377 } else {
1378
1379 if (irqbuf[0] == eirq)
1380 irq = irqbuf[1];
1381 else
1382 irq = irqbuf[0];
1383
1384 for (i = 0; i <= num_channels; i++)
1385 nbpf->chan[i].irq = irq;
1386 }
1387 }
1388
1389 ret = devm_request_irq(dev, eirq, nbpf_err_irq,
1390 IRQF_SHARED, "dma error", nbpf);
1391 if (ret < 0)
1392 return ret;
1393 nbpf->eirq = eirq;
1394
1395 INIT_LIST_HEAD(&dma_dev->channels);
1396
1397
1398 for (i = 0; i < num_channels; i++) {
1399 ret = nbpf_chan_probe(nbpf, i);
1400 if (ret < 0)
1401 return ret;
1402 }
1403
1404 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1405 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1406 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1407
1408
1409 dma_dev->device_alloc_chan_resources
1410 = nbpf_alloc_chan_resources;
1411 dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
1412 dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
1413 dma_dev->device_tx_status = nbpf_tx_status;
1414 dma_dev->device_issue_pending = nbpf_issue_pending;
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
1426 dma_dev->device_config = nbpf_config;
1427 dma_dev->device_pause = nbpf_pause;
1428 dma_dev->device_terminate_all = nbpf_terminate_all;
1429
1430 dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1431 dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS;
1432 dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1433
1434 platform_set_drvdata(pdev, nbpf);
1435
1436 ret = clk_prepare_enable(nbpf->clk);
1437 if (ret < 0)
1438 return ret;
1439
1440 nbpf_configure(nbpf);
1441
1442 ret = dma_async_device_register(dma_dev);
1443 if (ret < 0)
1444 goto e_clk_off;
1445
1446 ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf);
1447 if (ret < 0)
1448 goto e_dma_dev_unreg;
1449
1450 return 0;
1451
1452 e_dma_dev_unreg:
1453 dma_async_device_unregister(dma_dev);
1454 e_clk_off:
1455 clk_disable_unprepare(nbpf->clk);
1456
1457 return ret;
1458 }
1459
1460 static int nbpf_remove(struct platform_device *pdev)
1461 {
1462 struct nbpf_device *nbpf = platform_get_drvdata(pdev);
1463 int i;
1464
1465 devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
1466
1467 for (i = 0; i < nbpf->config->num_channels; i++) {
1468 struct nbpf_channel *chan = nbpf->chan + i;
1469
1470 devm_free_irq(&pdev->dev, chan->irq, chan);
1471
1472 tasklet_kill(&chan->tasklet);
1473 }
1474
1475 of_dma_controller_free(pdev->dev.of_node);
1476 dma_async_device_unregister(&nbpf->dma_dev);
1477 clk_disable_unprepare(nbpf->clk);
1478
1479 return 0;
1480 }
1481
1482 static const struct platform_device_id nbpf_ids[] = {
1483 {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]},
1484 {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]},
1485 {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]},
1486 {"nbpfaxi64dmac4b4", (kernel_ulong_t)&nbpf_cfg[NBPF4B4]},
1487 {"nbpfaxi64dmac4b8", (kernel_ulong_t)&nbpf_cfg[NBPF4B8]},
1488 {"nbpfaxi64dmac4b16", (kernel_ulong_t)&nbpf_cfg[NBPF4B16]},
1489 {"nbpfaxi64dmac8b4", (kernel_ulong_t)&nbpf_cfg[NBPF8B4]},
1490 {"nbpfaxi64dmac8b8", (kernel_ulong_t)&nbpf_cfg[NBPF8B8]},
1491 {"nbpfaxi64dmac8b16", (kernel_ulong_t)&nbpf_cfg[NBPF8B16]},
1492 {},
1493 };
1494 MODULE_DEVICE_TABLE(platform, nbpf_ids);
1495
1496 #ifdef CONFIG_PM
1497 static int nbpf_runtime_suspend(struct device *dev)
1498 {
1499 struct nbpf_device *nbpf = dev_get_drvdata(dev);
1500 clk_disable_unprepare(nbpf->clk);
1501 return 0;
1502 }
1503
1504 static int nbpf_runtime_resume(struct device *dev)
1505 {
1506 struct nbpf_device *nbpf = dev_get_drvdata(dev);
1507 return clk_prepare_enable(nbpf->clk);
1508 }
1509 #endif
1510
1511 static const struct dev_pm_ops nbpf_pm_ops = {
1512 SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
1513 };
1514
1515 static struct platform_driver nbpf_driver = {
1516 .driver = {
1517 .name = "dma-nbpf",
1518 .of_match_table = nbpf_match,
1519 .pm = &nbpf_pm_ops,
1520 },
1521 .id_table = nbpf_ids,
1522 .probe = nbpf_probe,
1523 .remove = nbpf_remove,
1524 };
1525
1526 module_platform_driver(nbpf_driver);
1527
1528 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1529 MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs");
1530 MODULE_LICENSE("GPL v2");