0001
0002
0003
0004
0005
0006
0007 #include <linux/init.h>
0008 #include <linux/slab.h>
0009 #include <linux/delay.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/spinlock.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/of_device.h>
0014 #include <linux/platform_device.h>
0015 #include <linux/memory.h>
0016 #include <linux/clk.h>
0017 #include <linux/of.h>
0018 #include <linux/of_irq.h>
0019 #include <linux/irqdomain.h>
0020 #include <linux/cpumask.h>
0021 #include <linux/platform_data/dma-mv_xor.h>
0022
0023 #include "dmaengine.h"
0024 #include "mv_xor.h"
0025
0026 enum mv_xor_type {
0027 XOR_ORION,
0028 XOR_ARMADA_38X,
0029 XOR_ARMADA_37XX,
0030 };
0031
0032 enum mv_xor_mode {
0033 XOR_MODE_IN_REG,
0034 XOR_MODE_IN_DESC,
0035 };
0036
0037 static void mv_xor_issue_pending(struct dma_chan *chan);
0038
0039 #define to_mv_xor_chan(chan) \
0040 container_of(chan, struct mv_xor_chan, dmachan)
0041
0042 #define to_mv_xor_slot(tx) \
0043 container_of(tx, struct mv_xor_desc_slot, async_tx)
0044
0045 #define mv_chan_to_devp(chan) \
0046 ((chan)->dmadev.dev)
0047
0048 static void mv_desc_init(struct mv_xor_desc_slot *desc,
0049 dma_addr_t addr, u32 byte_count,
0050 enum dma_ctrl_flags flags)
0051 {
0052 struct mv_xor_desc *hw_desc = desc->hw_desc;
0053
0054 hw_desc->status = XOR_DESC_DMA_OWNED;
0055 hw_desc->phy_next_desc = 0;
0056
0057 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
0058 XOR_DESC_EOD_INT_EN : 0;
0059 hw_desc->phy_dest_addr = addr;
0060 hw_desc->byte_count = byte_count;
0061 }
0062
0063 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
0064 {
0065 struct mv_xor_desc *hw_desc = desc->hw_desc;
0066
0067 switch (desc->type) {
0068 case DMA_XOR:
0069 case DMA_INTERRUPT:
0070 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
0071 break;
0072 case DMA_MEMCPY:
0073 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
0074 break;
0075 default:
0076 BUG();
0077 return;
0078 }
0079 }
0080
0081 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
0082 u32 next_desc_addr)
0083 {
0084 struct mv_xor_desc *hw_desc = desc->hw_desc;
0085 BUG_ON(hw_desc->phy_next_desc);
0086 hw_desc->phy_next_desc = next_desc_addr;
0087 }
0088
0089 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
0090 int index, dma_addr_t addr)
0091 {
0092 struct mv_xor_desc *hw_desc = desc->hw_desc;
0093 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
0094 if (desc->type == DMA_XOR)
0095 hw_desc->desc_command |= (1 << index);
0096 }
0097
0098 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
0099 {
0100 return readl_relaxed(XOR_CURR_DESC(chan));
0101 }
0102
0103 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
0104 u32 next_desc_addr)
0105 {
0106 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
0107 }
0108
0109 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
0110 {
0111 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
0112 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
0113 writel_relaxed(val, XOR_INTR_MASK(chan));
0114 }
0115
0116 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
0117 {
0118 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
0119 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
0120 return intr_cause;
0121 }
0122
0123 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
0124 {
0125 u32 val;
0126
0127 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
0128 val = ~(val << (chan->idx * 16));
0129 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
0130 writel_relaxed(val, XOR_INTR_CAUSE(chan));
0131 }
0132
0133 static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
0134 {
0135 u32 val = 0xFFFF0000 >> (chan->idx * 16);
0136 writel_relaxed(val, XOR_INTR_CAUSE(chan));
0137 }
0138
0139 static void mv_chan_set_mode(struct mv_xor_chan *chan,
0140 u32 op_mode)
0141 {
0142 u32 config = readl_relaxed(XOR_CONFIG(chan));
0143
0144 config &= ~0x7;
0145 config |= op_mode;
0146
0147 #if defined(__BIG_ENDIAN)
0148 config |= XOR_DESCRIPTOR_SWAP;
0149 #else
0150 config &= ~XOR_DESCRIPTOR_SWAP;
0151 #endif
0152
0153 writel_relaxed(config, XOR_CONFIG(chan));
0154 }
0155
0156 static void mv_chan_activate(struct mv_xor_chan *chan)
0157 {
0158 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
0159
0160
0161 writel(BIT(0), XOR_ACTIVATION(chan));
0162 }
0163
0164 static char mv_chan_is_busy(struct mv_xor_chan *chan)
0165 {
0166 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
0167
0168 state = (state >> 4) & 0x3;
0169
0170 return (state == 1) ? 1 : 0;
0171 }
0172
0173
0174
0175
0176
0177
0178 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
0179 struct mv_xor_desc_slot *sw_desc)
0180 {
0181 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
0182 __func__, __LINE__, sw_desc);
0183
0184
0185 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
0186
0187 mv_chan->pending++;
0188 mv_xor_issue_pending(&mv_chan->dmachan);
0189 }
0190
0191 static dma_cookie_t
0192 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
0193 struct mv_xor_chan *mv_chan,
0194 dma_cookie_t cookie)
0195 {
0196 BUG_ON(desc->async_tx.cookie < 0);
0197
0198 if (desc->async_tx.cookie > 0) {
0199 cookie = desc->async_tx.cookie;
0200
0201 dma_descriptor_unmap(&desc->async_tx);
0202
0203
0204
0205 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
0206 }
0207
0208
0209 dma_run_dependencies(&desc->async_tx);
0210
0211 return cookie;
0212 }
0213
0214 static int
0215 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
0216 {
0217 struct mv_xor_desc_slot *iter, *_iter;
0218
0219 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
0220 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
0221 node) {
0222
0223 if (async_tx_test_ack(&iter->async_tx)) {
0224 list_move_tail(&iter->node, &mv_chan->free_slots);
0225 if (!list_empty(&iter->sg_tx_list)) {
0226 list_splice_tail_init(&iter->sg_tx_list,
0227 &mv_chan->free_slots);
0228 }
0229 }
0230 }
0231 return 0;
0232 }
0233
0234 static int
0235 mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
0236 struct mv_xor_chan *mv_chan)
0237 {
0238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
0239 __func__, __LINE__, desc, desc->async_tx.flags);
0240
0241
0242
0243
0244 if (!async_tx_test_ack(&desc->async_tx)) {
0245
0246 list_move_tail(&desc->node, &mv_chan->completed_slots);
0247 if (!list_empty(&desc->sg_tx_list)) {
0248 list_splice_tail_init(&desc->sg_tx_list,
0249 &mv_chan->completed_slots);
0250 }
0251 } else {
0252 list_move_tail(&desc->node, &mv_chan->free_slots);
0253 if (!list_empty(&desc->sg_tx_list)) {
0254 list_splice_tail_init(&desc->sg_tx_list,
0255 &mv_chan->free_slots);
0256 }
0257 }
0258
0259 return 0;
0260 }
0261
0262
0263 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
0264 {
0265 struct mv_xor_desc_slot *iter, *_iter;
0266 dma_cookie_t cookie = 0;
0267 int busy = mv_chan_is_busy(mv_chan);
0268 u32 current_desc = mv_chan_get_current_desc(mv_chan);
0269 int current_cleaned = 0;
0270 struct mv_xor_desc *hw_desc;
0271
0272 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
0273 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
0274 mv_chan_clean_completed_slots(mv_chan);
0275
0276
0277
0278
0279
0280 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
0281 node) {
0282
0283
0284 hw_desc = iter->hw_desc;
0285 if (hw_desc->status & XOR_DESC_SUCCESS) {
0286 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
0287 cookie);
0288
0289
0290 mv_desc_clean_slot(iter, mv_chan);
0291
0292
0293 if (iter->async_tx.phys == current_desc) {
0294 current_cleaned = 1;
0295 break;
0296 }
0297 } else {
0298 if (iter->async_tx.phys == current_desc) {
0299 current_cleaned = 0;
0300 break;
0301 }
0302 }
0303 }
0304
0305 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
0306 if (current_cleaned) {
0307
0308
0309
0310
0311 iter = list_entry(mv_chan->chain.next,
0312 struct mv_xor_desc_slot,
0313 node);
0314 mv_chan_start_new_chain(mv_chan, iter);
0315 } else {
0316 if (!list_is_last(&iter->node, &mv_chan->chain)) {
0317
0318
0319
0320
0321 iter = list_entry(iter->node.next,
0322 struct mv_xor_desc_slot,
0323 node);
0324 mv_chan_start_new_chain(mv_chan, iter);
0325 } else {
0326
0327
0328
0329
0330 tasklet_schedule(&mv_chan->irq_tasklet);
0331 }
0332 }
0333 }
0334
0335 if (cookie > 0)
0336 mv_chan->dmachan.completed_cookie = cookie;
0337 }
0338
0339 static void mv_xor_tasklet(struct tasklet_struct *t)
0340 {
0341 struct mv_xor_chan *chan = from_tasklet(chan, t, irq_tasklet);
0342
0343 spin_lock(&chan->lock);
0344 mv_chan_slot_cleanup(chan);
0345 spin_unlock(&chan->lock);
0346 }
0347
0348 static struct mv_xor_desc_slot *
0349 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
0350 {
0351 struct mv_xor_desc_slot *iter;
0352
0353 spin_lock_bh(&mv_chan->lock);
0354
0355 if (!list_empty(&mv_chan->free_slots)) {
0356 iter = list_first_entry(&mv_chan->free_slots,
0357 struct mv_xor_desc_slot,
0358 node);
0359
0360 list_move_tail(&iter->node, &mv_chan->allocated_slots);
0361
0362 spin_unlock_bh(&mv_chan->lock);
0363
0364
0365 async_tx_ack(&iter->async_tx);
0366 iter->async_tx.cookie = -EBUSY;
0367
0368 return iter;
0369
0370 }
0371
0372 spin_unlock_bh(&mv_chan->lock);
0373
0374
0375 tasklet_schedule(&mv_chan->irq_tasklet);
0376
0377 return NULL;
0378 }
0379
0380
0381 static dma_cookie_t
0382 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
0383 {
0384 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
0385 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
0386 struct mv_xor_desc_slot *old_chain_tail;
0387 dma_cookie_t cookie;
0388 int new_hw_chain = 1;
0389
0390 dev_dbg(mv_chan_to_devp(mv_chan),
0391 "%s sw_desc %p: async_tx %p\n",
0392 __func__, sw_desc, &sw_desc->async_tx);
0393
0394 spin_lock_bh(&mv_chan->lock);
0395 cookie = dma_cookie_assign(tx);
0396
0397 if (list_empty(&mv_chan->chain))
0398 list_move_tail(&sw_desc->node, &mv_chan->chain);
0399 else {
0400 new_hw_chain = 0;
0401
0402 old_chain_tail = list_entry(mv_chan->chain.prev,
0403 struct mv_xor_desc_slot,
0404 node);
0405 list_move_tail(&sw_desc->node, &mv_chan->chain);
0406
0407 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
0408 &old_chain_tail->async_tx.phys);
0409
0410
0411 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
0412
0413
0414 if (!mv_chan_is_busy(mv_chan)) {
0415 u32 current_desc = mv_chan_get_current_desc(mv_chan);
0416
0417
0418
0419
0420 if (current_desc == old_chain_tail->async_tx.phys)
0421 new_hw_chain = 1;
0422 }
0423 }
0424
0425 if (new_hw_chain)
0426 mv_chan_start_new_chain(mv_chan, sw_desc);
0427
0428 spin_unlock_bh(&mv_chan->lock);
0429
0430 return cookie;
0431 }
0432
0433
0434 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
0435 {
0436 void *virt_desc;
0437 dma_addr_t dma_desc;
0438 int idx;
0439 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
0440 struct mv_xor_desc_slot *slot = NULL;
0441 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
0442
0443
0444 idx = mv_chan->slots_allocated;
0445 while (idx < num_descs_in_pool) {
0446 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
0447 if (!slot) {
0448 dev_info(mv_chan_to_devp(mv_chan),
0449 "channel only initialized %d descriptor slots",
0450 idx);
0451 break;
0452 }
0453 virt_desc = mv_chan->dma_desc_pool_virt;
0454 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
0455
0456 dma_async_tx_descriptor_init(&slot->async_tx, chan);
0457 slot->async_tx.tx_submit = mv_xor_tx_submit;
0458 INIT_LIST_HEAD(&slot->node);
0459 INIT_LIST_HEAD(&slot->sg_tx_list);
0460 dma_desc = mv_chan->dma_desc_pool;
0461 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
0462 slot->idx = idx++;
0463
0464 spin_lock_bh(&mv_chan->lock);
0465 mv_chan->slots_allocated = idx;
0466 list_add_tail(&slot->node, &mv_chan->free_slots);
0467 spin_unlock_bh(&mv_chan->lock);
0468 }
0469
0470 dev_dbg(mv_chan_to_devp(mv_chan),
0471 "allocated %d descriptor slots\n",
0472 mv_chan->slots_allocated);
0473
0474 return mv_chan->slots_allocated ? : -ENOMEM;
0475 }
0476
0477
0478
0479
0480
0481
0482
0483 static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
0484 {
0485 struct mv_xor_device *xordev = mv_chan->xordev;
0486 void __iomem *base = mv_chan->mmr_high_base;
0487 u32 win_enable;
0488 u32 size;
0489 u8 target, attr;
0490 int ret;
0491 int i;
0492
0493
0494 if (xordev->xor_type == XOR_ARMADA_37XX)
0495 return 0;
0496
0497
0498
0499
0500
0501
0502 for (i = 0; i < WINDOW_COUNT; i++) {
0503 if (addr >= xordev->win_start[i] &&
0504 addr <= xordev->win_end[i]) {
0505
0506 return 0;
0507 }
0508 }
0509
0510
0511
0512
0513
0514
0515 ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
0516 if (ret < 0)
0517 return 0;
0518
0519
0520
0521
0522
0523
0524 size -= 1;
0525 addr &= ~size;
0526
0527
0528
0529
0530
0531 win_enable = readl(base + WINDOW_BAR_ENABLE(0));
0532
0533
0534 i = ffs(~win_enable) - 1;
0535 if (i >= WINDOW_COUNT)
0536 return -ENOMEM;
0537
0538 writel((addr & 0xffff0000) | (attr << 8) | target,
0539 base + WINDOW_BASE(i));
0540 writel(size & 0xffff0000, base + WINDOW_SIZE(i));
0541
0542
0543 xordev->win_start[i] = addr;
0544 xordev->win_end[i] = addr + size;
0545
0546 win_enable |= (1 << i);
0547 win_enable |= 3 << (16 + (2 * i));
0548 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
0549 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
0550
0551 return 0;
0552 }
0553
0554 static struct dma_async_tx_descriptor *
0555 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
0556 unsigned int src_cnt, size_t len, unsigned long flags)
0557 {
0558 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
0559 struct mv_xor_desc_slot *sw_desc;
0560 int ret;
0561
0562 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
0563 return NULL;
0564
0565 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
0566
0567 dev_dbg(mv_chan_to_devp(mv_chan),
0568 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
0569 __func__, src_cnt, len, &dest, flags);
0570
0571
0572 ret = mv_xor_add_io_win(mv_chan, dest);
0573 if (ret)
0574 return NULL;
0575
0576 sw_desc = mv_chan_alloc_slot(mv_chan);
0577 if (sw_desc) {
0578 sw_desc->type = DMA_XOR;
0579 sw_desc->async_tx.flags = flags;
0580 mv_desc_init(sw_desc, dest, len, flags);
0581 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
0582 mv_desc_set_mode(sw_desc);
0583 while (src_cnt--) {
0584
0585 ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
0586 if (ret)
0587 return NULL;
0588 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
0589 }
0590 }
0591
0592 dev_dbg(mv_chan_to_devp(mv_chan),
0593 "%s sw_desc %p async_tx %p \n",
0594 __func__, sw_desc, &sw_desc->async_tx);
0595 return sw_desc ? &sw_desc->async_tx : NULL;
0596 }
0597
0598 static struct dma_async_tx_descriptor *
0599 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
0600 size_t len, unsigned long flags)
0601 {
0602
0603
0604
0605
0606 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
0607 }
0608
0609 static struct dma_async_tx_descriptor *
0610 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
0611 {
0612 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
0613 dma_addr_t src, dest;
0614 size_t len;
0615
0616 src = mv_chan->dummy_src_addr;
0617 dest = mv_chan->dummy_dst_addr;
0618 len = MV_XOR_MIN_BYTE_COUNT;
0619
0620
0621
0622
0623
0624 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
0625 }
0626
0627 static void mv_xor_free_chan_resources(struct dma_chan *chan)
0628 {
0629 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
0630 struct mv_xor_desc_slot *iter, *_iter;
0631 int in_use_descs = 0;
0632
0633 spin_lock_bh(&mv_chan->lock);
0634
0635 mv_chan_slot_cleanup(mv_chan);
0636
0637 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
0638 node) {
0639 in_use_descs++;
0640 list_move_tail(&iter->node, &mv_chan->free_slots);
0641 }
0642 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
0643 node) {
0644 in_use_descs++;
0645 list_move_tail(&iter->node, &mv_chan->free_slots);
0646 }
0647 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
0648 node) {
0649 in_use_descs++;
0650 list_move_tail(&iter->node, &mv_chan->free_slots);
0651 }
0652 list_for_each_entry_safe_reverse(
0653 iter, _iter, &mv_chan->free_slots, node) {
0654 list_del(&iter->node);
0655 kfree(iter);
0656 mv_chan->slots_allocated--;
0657 }
0658
0659 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
0660 __func__, mv_chan->slots_allocated);
0661 spin_unlock_bh(&mv_chan->lock);
0662
0663 if (in_use_descs)
0664 dev_err(mv_chan_to_devp(mv_chan),
0665 "freeing %d in use descriptors!\n", in_use_descs);
0666 }
0667
0668
0669
0670
0671
0672
0673
0674 static enum dma_status mv_xor_status(struct dma_chan *chan,
0675 dma_cookie_t cookie,
0676 struct dma_tx_state *txstate)
0677 {
0678 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
0679 enum dma_status ret;
0680
0681 ret = dma_cookie_status(chan, cookie, txstate);
0682 if (ret == DMA_COMPLETE)
0683 return ret;
0684
0685 spin_lock_bh(&mv_chan->lock);
0686 mv_chan_slot_cleanup(mv_chan);
0687 spin_unlock_bh(&mv_chan->lock);
0688
0689 return dma_cookie_status(chan, cookie, txstate);
0690 }
0691
0692 static void mv_chan_dump_regs(struct mv_xor_chan *chan)
0693 {
0694 u32 val;
0695
0696 val = readl_relaxed(XOR_CONFIG(chan));
0697 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
0698
0699 val = readl_relaxed(XOR_ACTIVATION(chan));
0700 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
0701
0702 val = readl_relaxed(XOR_INTR_CAUSE(chan));
0703 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
0704
0705 val = readl_relaxed(XOR_INTR_MASK(chan));
0706 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
0707
0708 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
0709 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
0710
0711 val = readl_relaxed(XOR_ERROR_ADDR(chan));
0712 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
0713 }
0714
0715 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
0716 u32 intr_cause)
0717 {
0718 if (intr_cause & XOR_INT_ERR_DECODE) {
0719 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
0720 return;
0721 }
0722
0723 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
0724 chan->idx, intr_cause);
0725
0726 mv_chan_dump_regs(chan);
0727 WARN_ON(1);
0728 }
0729
0730 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
0731 {
0732 struct mv_xor_chan *chan = data;
0733 u32 intr_cause = mv_chan_get_intr_cause(chan);
0734
0735 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
0736
0737 if (intr_cause & XOR_INTR_ERRORS)
0738 mv_chan_err_interrupt_handler(chan, intr_cause);
0739
0740 tasklet_schedule(&chan->irq_tasklet);
0741
0742 mv_chan_clear_eoc_cause(chan);
0743
0744 return IRQ_HANDLED;
0745 }
0746
0747 static void mv_xor_issue_pending(struct dma_chan *chan)
0748 {
0749 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
0750
0751 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
0752 mv_chan->pending = 0;
0753 mv_chan_activate(mv_chan);
0754 }
0755 }
0756
0757
0758
0759
0760
0761 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
0762 {
0763 int i, ret;
0764 void *src, *dest;
0765 dma_addr_t src_dma, dest_dma;
0766 struct dma_chan *dma_chan;
0767 dma_cookie_t cookie;
0768 struct dma_async_tx_descriptor *tx;
0769 struct dmaengine_unmap_data *unmap;
0770 int err = 0;
0771
0772 src = kmalloc(PAGE_SIZE, GFP_KERNEL);
0773 if (!src)
0774 return -ENOMEM;
0775
0776 dest = kzalloc(PAGE_SIZE, GFP_KERNEL);
0777 if (!dest) {
0778 kfree(src);
0779 return -ENOMEM;
0780 }
0781
0782
0783 for (i = 0; i < PAGE_SIZE; i++)
0784 ((u8 *) src)[i] = (u8)i;
0785
0786 dma_chan = &mv_chan->dmachan;
0787 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
0788 err = -ENODEV;
0789 goto out;
0790 }
0791
0792 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
0793 if (!unmap) {
0794 err = -ENOMEM;
0795 goto free_resources;
0796 }
0797
0798 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
0799 offset_in_page(src), PAGE_SIZE,
0800 DMA_TO_DEVICE);
0801 unmap->addr[0] = src_dma;
0802
0803 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
0804 if (ret) {
0805 err = -ENOMEM;
0806 goto free_resources;
0807 }
0808 unmap->to_cnt = 1;
0809
0810 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
0811 offset_in_page(dest), PAGE_SIZE,
0812 DMA_FROM_DEVICE);
0813 unmap->addr[1] = dest_dma;
0814
0815 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
0816 if (ret) {
0817 err = -ENOMEM;
0818 goto free_resources;
0819 }
0820 unmap->from_cnt = 1;
0821 unmap->len = PAGE_SIZE;
0822
0823 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
0824 PAGE_SIZE, 0);
0825 if (!tx) {
0826 dev_err(dma_chan->device->dev,
0827 "Self-test cannot prepare operation, disabling\n");
0828 err = -ENODEV;
0829 goto free_resources;
0830 }
0831
0832 cookie = mv_xor_tx_submit(tx);
0833 if (dma_submit_error(cookie)) {
0834 dev_err(dma_chan->device->dev,
0835 "Self-test submit error, disabling\n");
0836 err = -ENODEV;
0837 goto free_resources;
0838 }
0839
0840 mv_xor_issue_pending(dma_chan);
0841 async_tx_ack(tx);
0842 msleep(1);
0843
0844 if (mv_xor_status(dma_chan, cookie, NULL) !=
0845 DMA_COMPLETE) {
0846 dev_err(dma_chan->device->dev,
0847 "Self-test copy timed out, disabling\n");
0848 err = -ENODEV;
0849 goto free_resources;
0850 }
0851
0852 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
0853 PAGE_SIZE, DMA_FROM_DEVICE);
0854 if (memcmp(src, dest, PAGE_SIZE)) {
0855 dev_err(dma_chan->device->dev,
0856 "Self-test copy failed compare, disabling\n");
0857 err = -ENODEV;
0858 goto free_resources;
0859 }
0860
0861 free_resources:
0862 dmaengine_unmap_put(unmap);
0863 mv_xor_free_chan_resources(dma_chan);
0864 out:
0865 kfree(src);
0866 kfree(dest);
0867 return err;
0868 }
0869
0870 #define MV_XOR_NUM_SRC_TEST 4
0871 static int
0872 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
0873 {
0874 int i, src_idx, ret;
0875 struct page *dest;
0876 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
0877 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
0878 dma_addr_t dest_dma;
0879 struct dma_async_tx_descriptor *tx;
0880 struct dmaengine_unmap_data *unmap;
0881 struct dma_chan *dma_chan;
0882 dma_cookie_t cookie;
0883 u8 cmp_byte = 0;
0884 u32 cmp_word;
0885 int err = 0;
0886 int src_count = MV_XOR_NUM_SRC_TEST;
0887
0888 for (src_idx = 0; src_idx < src_count; src_idx++) {
0889 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
0890 if (!xor_srcs[src_idx]) {
0891 while (src_idx--)
0892 __free_page(xor_srcs[src_idx]);
0893 return -ENOMEM;
0894 }
0895 }
0896
0897 dest = alloc_page(GFP_KERNEL);
0898 if (!dest) {
0899 while (src_idx--)
0900 __free_page(xor_srcs[src_idx]);
0901 return -ENOMEM;
0902 }
0903
0904
0905 for (src_idx = 0; src_idx < src_count; src_idx++) {
0906 u8 *ptr = page_address(xor_srcs[src_idx]);
0907 for (i = 0; i < PAGE_SIZE; i++)
0908 ptr[i] = (1 << src_idx);
0909 }
0910
0911 for (src_idx = 0; src_idx < src_count; src_idx++)
0912 cmp_byte ^= (u8) (1 << src_idx);
0913
0914 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
0915 (cmp_byte << 8) | cmp_byte;
0916
0917 memset(page_address(dest), 0, PAGE_SIZE);
0918
0919 dma_chan = &mv_chan->dmachan;
0920 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
0921 err = -ENODEV;
0922 goto out;
0923 }
0924
0925 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
0926 GFP_KERNEL);
0927 if (!unmap) {
0928 err = -ENOMEM;
0929 goto free_resources;
0930 }
0931
0932
0933 for (i = 0; i < src_count; i++) {
0934 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
0935 0, PAGE_SIZE, DMA_TO_DEVICE);
0936 dma_srcs[i] = unmap->addr[i];
0937 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
0938 if (ret) {
0939 err = -ENOMEM;
0940 goto free_resources;
0941 }
0942 unmap->to_cnt++;
0943 }
0944
0945 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
0946 DMA_FROM_DEVICE);
0947 dest_dma = unmap->addr[src_count];
0948 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
0949 if (ret) {
0950 err = -ENOMEM;
0951 goto free_resources;
0952 }
0953 unmap->from_cnt = 1;
0954 unmap->len = PAGE_SIZE;
0955
0956 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
0957 src_count, PAGE_SIZE, 0);
0958 if (!tx) {
0959 dev_err(dma_chan->device->dev,
0960 "Self-test cannot prepare operation, disabling\n");
0961 err = -ENODEV;
0962 goto free_resources;
0963 }
0964
0965 cookie = mv_xor_tx_submit(tx);
0966 if (dma_submit_error(cookie)) {
0967 dev_err(dma_chan->device->dev,
0968 "Self-test submit error, disabling\n");
0969 err = -ENODEV;
0970 goto free_resources;
0971 }
0972
0973 mv_xor_issue_pending(dma_chan);
0974 async_tx_ack(tx);
0975 msleep(8);
0976
0977 if (mv_xor_status(dma_chan, cookie, NULL) !=
0978 DMA_COMPLETE) {
0979 dev_err(dma_chan->device->dev,
0980 "Self-test xor timed out, disabling\n");
0981 err = -ENODEV;
0982 goto free_resources;
0983 }
0984
0985 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
0986 PAGE_SIZE, DMA_FROM_DEVICE);
0987 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
0988 u32 *ptr = page_address(dest);
0989 if (ptr[i] != cmp_word) {
0990 dev_err(dma_chan->device->dev,
0991 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
0992 i, ptr[i], cmp_word);
0993 err = -ENODEV;
0994 goto free_resources;
0995 }
0996 }
0997
0998 free_resources:
0999 dmaengine_unmap_put(unmap);
1000 mv_xor_free_chan_resources(dma_chan);
1001 out:
1002 src_idx = src_count;
1003 while (src_idx--)
1004 __free_page(xor_srcs[src_idx]);
1005 __free_page(dest);
1006 return err;
1007 }
1008
1009 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1010 {
1011 struct dma_chan *chan, *_chan;
1012 struct device *dev = mv_chan->dmadev.dev;
1013
1014 dma_async_device_unregister(&mv_chan->dmadev);
1015
1016 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1017 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1018 dma_unmap_single(dev, mv_chan->dummy_src_addr,
1019 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1020 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
1021 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1022
1023 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1024 device_node) {
1025 list_del(&chan->device_node);
1026 }
1027
1028 free_irq(mv_chan->irq, mv_chan);
1029
1030 return 0;
1031 }
1032
1033 static struct mv_xor_chan *
1034 mv_xor_channel_add(struct mv_xor_device *xordev,
1035 struct platform_device *pdev,
1036 int idx, dma_cap_mask_t cap_mask, int irq)
1037 {
1038 int ret = 0;
1039 struct mv_xor_chan *mv_chan;
1040 struct dma_device *dma_dev;
1041
1042 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1043 if (!mv_chan)
1044 return ERR_PTR(-ENOMEM);
1045
1046 mv_chan->idx = idx;
1047 mv_chan->irq = irq;
1048 if (xordev->xor_type == XOR_ORION)
1049 mv_chan->op_in_desc = XOR_MODE_IN_REG;
1050 else
1051 mv_chan->op_in_desc = XOR_MODE_IN_DESC;
1052
1053 dma_dev = &mv_chan->dmadev;
1054 dma_dev->dev = &pdev->dev;
1055 mv_chan->xordev = xordev;
1056
1057
1058
1059
1060
1061
1062 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
1063 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1064 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
1065 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1066
1067
1068
1069
1070
1071 mv_chan->dma_desc_pool_virt =
1072 dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
1073 GFP_KERNEL);
1074 if (!mv_chan->dma_desc_pool_virt)
1075 return ERR_PTR(-ENOMEM);
1076
1077
1078 dma_dev->cap_mask = cap_mask;
1079
1080 INIT_LIST_HEAD(&dma_dev->channels);
1081
1082
1083 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1084 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1085 dma_dev->device_tx_status = mv_xor_status;
1086 dma_dev->device_issue_pending = mv_xor_issue_pending;
1087
1088
1089 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1090 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1091 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1092 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1093 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1094 dma_dev->max_xor = 8;
1095 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1096 }
1097
1098 mv_chan->mmr_base = xordev->xor_base;
1099 mv_chan->mmr_high_base = xordev->xor_high_base;
1100 tasklet_setup(&mv_chan->irq_tasklet, mv_xor_tasklet);
1101
1102
1103 mv_chan_clear_err_status(mv_chan);
1104
1105 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1106 0, dev_name(&pdev->dev), mv_chan);
1107 if (ret)
1108 goto err_free_dma;
1109
1110 mv_chan_unmask_interrupts(mv_chan);
1111
1112 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1113 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
1114 else
1115 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
1116
1117 spin_lock_init(&mv_chan->lock);
1118 INIT_LIST_HEAD(&mv_chan->chain);
1119 INIT_LIST_HEAD(&mv_chan->completed_slots);
1120 INIT_LIST_HEAD(&mv_chan->free_slots);
1121 INIT_LIST_HEAD(&mv_chan->allocated_slots);
1122 mv_chan->dmachan.device = dma_dev;
1123 dma_cookie_init(&mv_chan->dmachan);
1124
1125 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1126
1127 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1128 ret = mv_chan_memcpy_self_test(mv_chan);
1129 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1130 if (ret)
1131 goto err_free_irq;
1132 }
1133
1134 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1135 ret = mv_chan_xor_self_test(mv_chan);
1136 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1137 if (ret)
1138 goto err_free_irq;
1139 }
1140
1141 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1142 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1143 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1144 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1145 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1146
1147 ret = dma_async_device_register(dma_dev);
1148 if (ret)
1149 goto err_free_irq;
1150
1151 return mv_chan;
1152
1153 err_free_irq:
1154 free_irq(mv_chan->irq, mv_chan);
1155 err_free_dma:
1156 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1157 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1158 return ERR_PTR(ret);
1159 }
1160
1161 static void
1162 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1163 const struct mbus_dram_target_info *dram)
1164 {
1165 void __iomem *base = xordev->xor_high_base;
1166 u32 win_enable = 0;
1167 int i;
1168
1169 for (i = 0; i < 8; i++) {
1170 writel(0, base + WINDOW_BASE(i));
1171 writel(0, base + WINDOW_SIZE(i));
1172 if (i < 4)
1173 writel(0, base + WINDOW_REMAP_HIGH(i));
1174 }
1175
1176 for (i = 0; i < dram->num_cs; i++) {
1177 const struct mbus_dram_window *cs = dram->cs + i;
1178
1179 writel((cs->base & 0xffff0000) |
1180 (cs->mbus_attr << 8) |
1181 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1182 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1183
1184
1185 xordev->win_start[i] = cs->base;
1186 xordev->win_end[i] = cs->base + cs->size - 1;
1187
1188 win_enable |= (1 << i);
1189 win_enable |= 3 << (16 + (2 * i));
1190 }
1191
1192 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1193 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1194 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1195 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1196 }
1197
1198 static void
1199 mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1200 {
1201 void __iomem *base = xordev->xor_high_base;
1202 u32 win_enable = 0;
1203 int i;
1204
1205 for (i = 0; i < 8; i++) {
1206 writel(0, base + WINDOW_BASE(i));
1207 writel(0, base + WINDOW_SIZE(i));
1208 if (i < 4)
1209 writel(0, base + WINDOW_REMAP_HIGH(i));
1210 }
1211
1212
1213
1214
1215 writel(0xffff0000, base + WINDOW_SIZE(0));
1216 win_enable |= 1;
1217 win_enable |= 3 << 16;
1218
1219 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1220 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1221 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1222 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1223 }
1224
1225
1226
1227
1228
1229
1230
1231 static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1232 {
1233 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1234 int i;
1235
1236 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1237 struct mv_xor_chan *mv_chan = xordev->channels[i];
1238
1239 if (!mv_chan)
1240 continue;
1241
1242 mv_chan->saved_config_reg =
1243 readl_relaxed(XOR_CONFIG(mv_chan));
1244 mv_chan->saved_int_mask_reg =
1245 readl_relaxed(XOR_INTR_MASK(mv_chan));
1246 }
1247
1248 return 0;
1249 }
1250
1251 static int mv_xor_resume(struct platform_device *dev)
1252 {
1253 struct mv_xor_device *xordev = platform_get_drvdata(dev);
1254 const struct mbus_dram_target_info *dram;
1255 int i;
1256
1257 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1258 struct mv_xor_chan *mv_chan = xordev->channels[i];
1259
1260 if (!mv_chan)
1261 continue;
1262
1263 writel_relaxed(mv_chan->saved_config_reg,
1264 XOR_CONFIG(mv_chan));
1265 writel_relaxed(mv_chan->saved_int_mask_reg,
1266 XOR_INTR_MASK(mv_chan));
1267 }
1268
1269 if (xordev->xor_type == XOR_ARMADA_37XX) {
1270 mv_xor_conf_mbus_windows_a3700(xordev);
1271 return 0;
1272 }
1273
1274 dram = mv_mbus_dram_info();
1275 if (dram)
1276 mv_xor_conf_mbus_windows(xordev, dram);
1277
1278 return 0;
1279 }
1280
1281 static const struct of_device_id mv_xor_dt_ids[] = {
1282 { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1283 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
1284 { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
1285 {},
1286 };
1287
1288 static unsigned int mv_xor_engine_count;
1289
1290 static int mv_xor_probe(struct platform_device *pdev)
1291 {
1292 const struct mbus_dram_target_info *dram;
1293 struct mv_xor_device *xordev;
1294 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1295 struct resource *res;
1296 unsigned int max_engines, max_channels;
1297 int i, ret;
1298
1299 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1300
1301 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1302 if (!xordev)
1303 return -ENOMEM;
1304
1305 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1306 if (!res)
1307 return -ENODEV;
1308
1309 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1310 resource_size(res));
1311 if (!xordev->xor_base)
1312 return -EBUSY;
1313
1314 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1315 if (!res)
1316 return -ENODEV;
1317
1318 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1319 resource_size(res));
1320 if (!xordev->xor_high_base)
1321 return -EBUSY;
1322
1323 platform_set_drvdata(pdev, xordev);
1324
1325
1326
1327
1328
1329
1330 xordev->xor_type = XOR_ORION;
1331 if (pdev->dev.of_node) {
1332 const struct of_device_id *of_id =
1333 of_match_device(mv_xor_dt_ids,
1334 &pdev->dev);
1335
1336 xordev->xor_type = (uintptr_t)of_id->data;
1337 }
1338
1339
1340
1341
1342 if (xordev->xor_type == XOR_ARMADA_37XX) {
1343 mv_xor_conf_mbus_windows_a3700(xordev);
1344 } else {
1345 dram = mv_mbus_dram_info();
1346 if (dram)
1347 mv_xor_conf_mbus_windows(xordev, dram);
1348 }
1349
1350
1351
1352
1353 xordev->clk = clk_get(&pdev->dev, NULL);
1354 if (!IS_ERR(xordev->clk))
1355 clk_prepare_enable(xordev->clk);
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 max_engines = num_present_cpus();
1366 if (xordev->xor_type == XOR_ARMADA_37XX)
1367 max_channels = num_present_cpus();
1368 else
1369 max_channels = min_t(unsigned int,
1370 MV_XOR_MAX_CHANNELS,
1371 DIV_ROUND_UP(num_present_cpus(), 2));
1372
1373 if (mv_xor_engine_count >= max_engines)
1374 return 0;
1375
1376 if (pdev->dev.of_node) {
1377 struct device_node *np;
1378 int i = 0;
1379
1380 for_each_child_of_node(pdev->dev.of_node, np) {
1381 struct mv_xor_chan *chan;
1382 dma_cap_mask_t cap_mask;
1383 int irq;
1384
1385 if (i >= max_channels)
1386 continue;
1387
1388 dma_cap_zero(cap_mask);
1389 dma_cap_set(DMA_MEMCPY, cap_mask);
1390 dma_cap_set(DMA_XOR, cap_mask);
1391 dma_cap_set(DMA_INTERRUPT, cap_mask);
1392
1393 irq = irq_of_parse_and_map(np, 0);
1394 if (!irq) {
1395 ret = -ENODEV;
1396 goto err_channel_add;
1397 }
1398
1399 chan = mv_xor_channel_add(xordev, pdev, i,
1400 cap_mask, irq);
1401 if (IS_ERR(chan)) {
1402 ret = PTR_ERR(chan);
1403 irq_dispose_mapping(irq);
1404 goto err_channel_add;
1405 }
1406
1407 xordev->channels[i] = chan;
1408 i++;
1409 }
1410 } else if (pdata && pdata->channels) {
1411 for (i = 0; i < max_channels; i++) {
1412 struct mv_xor_channel_data *cd;
1413 struct mv_xor_chan *chan;
1414 int irq;
1415
1416 cd = &pdata->channels[i];
1417 irq = platform_get_irq(pdev, i);
1418 if (irq < 0) {
1419 ret = irq;
1420 goto err_channel_add;
1421 }
1422
1423 chan = mv_xor_channel_add(xordev, pdev, i,
1424 cd->cap_mask, irq);
1425 if (IS_ERR(chan)) {
1426 ret = PTR_ERR(chan);
1427 goto err_channel_add;
1428 }
1429
1430 xordev->channels[i] = chan;
1431 }
1432 }
1433
1434 return 0;
1435
1436 err_channel_add:
1437 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1438 if (xordev->channels[i]) {
1439 mv_xor_channel_remove(xordev->channels[i]);
1440 if (pdev->dev.of_node)
1441 irq_dispose_mapping(xordev->channels[i]->irq);
1442 }
1443
1444 if (!IS_ERR(xordev->clk)) {
1445 clk_disable_unprepare(xordev->clk);
1446 clk_put(xordev->clk);
1447 }
1448
1449 return ret;
1450 }
1451
1452 static struct platform_driver mv_xor_driver = {
1453 .probe = mv_xor_probe,
1454 .suspend = mv_xor_suspend,
1455 .resume = mv_xor_resume,
1456 .driver = {
1457 .name = MV_XOR_NAME,
1458 .of_match_table = mv_xor_dt_ids,
1459 },
1460 };
1461
1462 builtin_platform_driver(mv_xor_driver);
1463
1464
1465
1466
1467
1468