0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 #include <linux/interrupt.h>
0062 #include <linux/module.h>
0063 #include <linux/of_irq.h>
0064 #include <linux/of_address.h>
0065 #include <linux/of_platform.h>
0066 #include <linux/dma-mapping.h>
0067 #include <linux/dmapool.h>
0068 #include <linux/dmaengine.h>
0069 #include <linux/io.h>
0070 #include <linux/spinlock.h>
0071 #include <linux/slab.h>
0072
0073 #include "dmaengine.h"
0074 #include "fsl_raid.h"
0075
0076 #define FSL_RE_MAX_XOR_SRCS 16
0077 #define FSL_RE_MAX_PQ_SRCS 16
0078 #define FSL_RE_MIN_DESCS 256
0079 #define FSL_RE_MAX_DESCS (4 * FSL_RE_MIN_DESCS)
0080 #define FSL_RE_FRAME_FORMAT 0x1
0081 #define FSL_RE_MAX_DATA_LEN (1024*1024)
0082
0083 #define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx)
0084
0085
0086 static dma_cookie_t fsl_re_tx_submit(struct dma_async_tx_descriptor *tx)
0087 {
0088 struct fsl_re_desc *desc;
0089 struct fsl_re_chan *re_chan;
0090 dma_cookie_t cookie;
0091 unsigned long flags;
0092
0093 desc = to_fsl_re_dma_desc(tx);
0094 re_chan = container_of(tx->chan, struct fsl_re_chan, chan);
0095
0096 spin_lock_irqsave(&re_chan->desc_lock, flags);
0097 cookie = dma_cookie_assign(tx);
0098 list_add_tail(&desc->node, &re_chan->submit_q);
0099 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
0100
0101 return cookie;
0102 }
0103
0104
0105 static void fsl_re_issue_pending(struct dma_chan *chan)
0106 {
0107 struct fsl_re_chan *re_chan;
0108 int avail;
0109 struct fsl_re_desc *desc, *_desc;
0110 unsigned long flags;
0111
0112 re_chan = container_of(chan, struct fsl_re_chan, chan);
0113
0114 spin_lock_irqsave(&re_chan->desc_lock, flags);
0115 avail = FSL_RE_SLOT_AVAIL(
0116 in_be32(&re_chan->jrregs->inbring_slot_avail));
0117
0118 list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) {
0119 if (!avail)
0120 break;
0121
0122 list_move_tail(&desc->node, &re_chan->active_q);
0123
0124 memcpy(&re_chan->inb_ring_virt_addr[re_chan->inb_count],
0125 &desc->hwdesc, sizeof(struct fsl_re_hw_desc));
0126
0127 re_chan->inb_count = (re_chan->inb_count + 1) &
0128 FSL_RE_RING_SIZE_MASK;
0129 out_be32(&re_chan->jrregs->inbring_add_job, FSL_RE_ADD_JOB(1));
0130 avail--;
0131 }
0132 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
0133 }
0134
0135 static void fsl_re_desc_done(struct fsl_re_desc *desc)
0136 {
0137 dma_cookie_complete(&desc->async_tx);
0138 dma_descriptor_unmap(&desc->async_tx);
0139 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
0140 }
0141
0142 static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
0143 {
0144 struct fsl_re_desc *desc, *_desc;
0145 unsigned long flags;
0146
0147 spin_lock_irqsave(&re_chan->desc_lock, flags);
0148 list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) {
0149 if (async_tx_test_ack(&desc->async_tx))
0150 list_move_tail(&desc->node, &re_chan->free_q);
0151 }
0152 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
0153
0154 fsl_re_issue_pending(&re_chan->chan);
0155 }
0156
0157 static void fsl_re_dequeue(struct tasklet_struct *t)
0158 {
0159 struct fsl_re_chan *re_chan = from_tasklet(re_chan, t, irqtask);
0160 struct fsl_re_desc *desc, *_desc;
0161 struct fsl_re_hw_desc *hwdesc;
0162 unsigned long flags;
0163 unsigned int count, oub_count;
0164 int found;
0165
0166 fsl_re_cleanup_descs(re_chan);
0167
0168 spin_lock_irqsave(&re_chan->desc_lock, flags);
0169 count = FSL_RE_SLOT_FULL(in_be32(&re_chan->jrregs->oubring_slot_full));
0170 while (count--) {
0171 found = 0;
0172 hwdesc = &re_chan->oub_ring_virt_addr[re_chan->oub_count];
0173 list_for_each_entry_safe(desc, _desc, &re_chan->active_q,
0174 node) {
0175
0176 if (desc->hwdesc.lbea32 == hwdesc->lbea32 &&
0177 desc->hwdesc.addr_low == hwdesc->addr_low) {
0178 found = 1;
0179 break;
0180 }
0181 }
0182
0183 if (found) {
0184 fsl_re_desc_done(desc);
0185 list_move_tail(&desc->node, &re_chan->ack_q);
0186 } else {
0187 dev_err(re_chan->dev,
0188 "found hwdesc not in sw queue, discard it\n");
0189 }
0190
0191 oub_count = (re_chan->oub_count + 1) & FSL_RE_RING_SIZE_MASK;
0192 re_chan->oub_count = oub_count;
0193
0194 out_be32(&re_chan->jrregs->oubring_job_rmvd,
0195 FSL_RE_RMVD_JOB(1));
0196 }
0197 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
0198 }
0199
0200
0201 static irqreturn_t fsl_re_isr(int irq, void *data)
0202 {
0203 struct fsl_re_chan *re_chan;
0204 u32 irqstate, status;
0205
0206 re_chan = dev_get_drvdata((struct device *)data);
0207
0208 irqstate = in_be32(&re_chan->jrregs->jr_interrupt_status);
0209 if (!irqstate)
0210 return IRQ_NONE;
0211
0212
0213
0214
0215
0216
0217 if (irqstate & FSL_RE_ERROR) {
0218 status = in_be32(&re_chan->jrregs->jr_status);
0219 dev_err(re_chan->dev, "chan error irqstate: %x, status: %x\n",
0220 irqstate, status);
0221 }
0222
0223
0224 out_be32(&re_chan->jrregs->jr_interrupt_status, FSL_RE_CLR_INTR);
0225
0226 tasklet_schedule(&re_chan->irqtask);
0227
0228 return IRQ_HANDLED;
0229 }
0230
0231 static enum dma_status fsl_re_tx_status(struct dma_chan *chan,
0232 dma_cookie_t cookie,
0233 struct dma_tx_state *txstate)
0234 {
0235 return dma_cookie_status(chan, cookie, txstate);
0236 }
0237
0238 static void fill_cfd_frame(struct fsl_re_cmpnd_frame *cf, u8 index,
0239 size_t length, dma_addr_t addr, bool final)
0240 {
0241 u32 efrl = length & FSL_RE_CF_LENGTH_MASK;
0242
0243 efrl |= final << FSL_RE_CF_FINAL_SHIFT;
0244 cf[index].efrl32 = efrl;
0245 cf[index].addr_high = upper_32_bits(addr);
0246 cf[index].addr_low = lower_32_bits(addr);
0247 }
0248
0249 static struct fsl_re_desc *fsl_re_init_desc(struct fsl_re_chan *re_chan,
0250 struct fsl_re_desc *desc,
0251 void *cf, dma_addr_t paddr)
0252 {
0253 desc->re_chan = re_chan;
0254 desc->async_tx.tx_submit = fsl_re_tx_submit;
0255 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan);
0256 INIT_LIST_HEAD(&desc->node);
0257
0258 desc->hwdesc.fmt32 = FSL_RE_FRAME_FORMAT << FSL_RE_HWDESC_FMT_SHIFT;
0259 desc->hwdesc.lbea32 = upper_32_bits(paddr);
0260 desc->hwdesc.addr_low = lower_32_bits(paddr);
0261 desc->cf_addr = cf;
0262 desc->cf_paddr = paddr;
0263
0264 desc->cdb_addr = (void *)(cf + FSL_RE_CF_DESC_SIZE);
0265 desc->cdb_paddr = paddr + FSL_RE_CF_DESC_SIZE;
0266
0267 return desc;
0268 }
0269
0270 static struct fsl_re_desc *fsl_re_chan_alloc_desc(struct fsl_re_chan *re_chan,
0271 unsigned long flags)
0272 {
0273 struct fsl_re_desc *desc = NULL;
0274 void *cf;
0275 dma_addr_t paddr;
0276 unsigned long lock_flag;
0277
0278 fsl_re_cleanup_descs(re_chan);
0279
0280 spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
0281 if (!list_empty(&re_chan->free_q)) {
0282
0283 desc = list_first_entry(&re_chan->free_q,
0284 struct fsl_re_desc, node);
0285 list_del(&desc->node);
0286
0287 desc->async_tx.flags = flags;
0288 }
0289 spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
0290
0291 if (!desc) {
0292 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
0293 if (!desc)
0294 return NULL;
0295
0296 cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_NOWAIT,
0297 &paddr);
0298 if (!cf) {
0299 kfree(desc);
0300 return NULL;
0301 }
0302
0303 desc = fsl_re_init_desc(re_chan, desc, cf, paddr);
0304 desc->async_tx.flags = flags;
0305
0306 spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
0307 re_chan->alloc_count++;
0308 spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
0309 }
0310
0311 return desc;
0312 }
0313
0314 static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq(
0315 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
0316 unsigned int src_cnt, const unsigned char *scf, size_t len,
0317 unsigned long flags)
0318 {
0319 struct fsl_re_chan *re_chan;
0320 struct fsl_re_desc *desc;
0321 struct fsl_re_xor_cdb *xor;
0322 struct fsl_re_cmpnd_frame *cf;
0323 u32 cdb;
0324 unsigned int i, j;
0325 unsigned int save_src_cnt = src_cnt;
0326 int cont_q = 0;
0327
0328 re_chan = container_of(chan, struct fsl_re_chan, chan);
0329 if (len > FSL_RE_MAX_DATA_LEN) {
0330 dev_err(re_chan->dev, "genq tx length %zu, max length %d\n",
0331 len, FSL_RE_MAX_DATA_LEN);
0332 return NULL;
0333 }
0334
0335 desc = fsl_re_chan_alloc_desc(re_chan, flags);
0336 if (desc <= 0)
0337 return NULL;
0338
0339 if (scf && (flags & DMA_PREP_CONTINUE)) {
0340 cont_q = 1;
0341 src_cnt += 1;
0342 }
0343
0344
0345 cdb = FSL_RE_XOR_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
0346 cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
0347 cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
0348 cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
0349 cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
0350 xor = desc->cdb_addr;
0351 xor->cdb32 = cdb;
0352
0353 if (scf) {
0354
0355 for (i = 0; i < save_src_cnt; i++)
0356 xor->gfm[i] = scf[i];
0357 if (cont_q)
0358 xor->gfm[i++] = 1;
0359 } else {
0360
0361 for (i = 0; i < src_cnt; i++)
0362 xor->gfm[i] = 1;
0363 }
0364
0365
0366 cf = desc->cf_addr;
0367 fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0);
0368
0369
0370 fill_cfd_frame(cf, 1, len, dest, 0);
0371
0372
0373 for (i = 2, j = 0; j < save_src_cnt; i++, j++)
0374 fill_cfd_frame(cf, i, len, src[j], 0);
0375
0376 if (cont_q)
0377 fill_cfd_frame(cf, i++, len, dest, 0);
0378
0379
0380 cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
0381
0382 return &desc->async_tx;
0383 }
0384
0385
0386
0387
0388
0389 static struct dma_async_tx_descriptor *fsl_re_prep_dma_xor(
0390 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
0391 unsigned int src_cnt, size_t len, unsigned long flags)
0392 {
0393
0394 return fsl_re_prep_dma_genq(chan, dest, src, src_cnt, NULL, len, flags);
0395 }
0396
0397
0398
0399
0400
0401 static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq(
0402 struct dma_chan *chan, dma_addr_t *dest, dma_addr_t *src,
0403 unsigned int src_cnt, const unsigned char *scf, size_t len,
0404 unsigned long flags)
0405 {
0406 struct fsl_re_chan *re_chan;
0407 struct fsl_re_desc *desc;
0408 struct fsl_re_pq_cdb *pq;
0409 struct fsl_re_cmpnd_frame *cf;
0410 u32 cdb;
0411 u8 *p;
0412 int gfmq_len, i, j;
0413 unsigned int save_src_cnt = src_cnt;
0414
0415 re_chan = container_of(chan, struct fsl_re_chan, chan);
0416 if (len > FSL_RE_MAX_DATA_LEN) {
0417 dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n",
0418 len, FSL_RE_MAX_DATA_LEN);
0419 return NULL;
0420 }
0421
0422
0423
0424
0425
0426
0427 if (src_cnt == 1) {
0428 struct dma_async_tx_descriptor *tx;
0429 dma_addr_t dma_src[2];
0430 unsigned char coef[2];
0431
0432 dma_src[0] = *src;
0433 coef[0] = *scf;
0434 dma_src[1] = *src;
0435 coef[1] = 0;
0436 tx = fsl_re_prep_dma_genq(chan, dest[1], dma_src, 2, coef, len,
0437 flags);
0438 if (tx)
0439 desc = to_fsl_re_dma_desc(tx);
0440
0441 return tx;
0442 }
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452 if (flags & DMA_PREP_PQ_DISABLE_P)
0453 return fsl_re_prep_dma_genq(chan, dest[1], src, src_cnt,
0454 scf, len, flags);
0455
0456 if (flags & DMA_PREP_CONTINUE)
0457 src_cnt += 3;
0458
0459 desc = fsl_re_chan_alloc_desc(re_chan, flags);
0460 if (desc <= 0)
0461 return NULL;
0462
0463
0464 cdb = FSL_RE_PQ_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
0465 cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
0466 cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
0467 cdb |= FSL_RE_BUFFER_OUTPUT << FSL_RE_CDB_BUFFER_SHIFT;
0468 cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
0469
0470 pq = desc->cdb_addr;
0471 pq->cdb32 = cdb;
0472
0473 p = pq->gfm_q1;
0474
0475 for (i = 0; i < src_cnt; i++)
0476 p[i] = 1;
0477
0478
0479 gfmq_len = ALIGN(src_cnt, 4);
0480
0481
0482 p += gfmq_len;
0483 for (i = 0; i < src_cnt; i++)
0484 p[i] = scf[i];
0485
0486
0487 cf = desc->cf_addr;
0488 fill_cfd_frame(cf, 0, sizeof(struct fsl_re_pq_cdb), desc->cdb_paddr, 0);
0489
0490
0491 for (i = 1, j = 0; i < 3; i++, j++)
0492 fill_cfd_frame(cf, i, len, dest[j], 0);
0493
0494
0495 for (i = 3, j = 0; j < save_src_cnt; i++, j++)
0496 fill_cfd_frame(cf, i, len, src[j], 0);
0497
0498
0499 if (flags & DMA_PREP_CONTINUE) {
0500 if (src_cnt - save_src_cnt == 3) {
0501 p[save_src_cnt] = 0;
0502 p[save_src_cnt + 1] = 0;
0503 p[save_src_cnt + 2] = 1;
0504 fill_cfd_frame(cf, i++, len, dest[0], 0);
0505 fill_cfd_frame(cf, i++, len, dest[1], 0);
0506 fill_cfd_frame(cf, i++, len, dest[1], 0);
0507 } else {
0508 dev_err(re_chan->dev, "PQ tx continuation error!\n");
0509 return NULL;
0510 }
0511 }
0512
0513
0514 cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
0515
0516 return &desc->async_tx;
0517 }
0518
0519
0520
0521
0522
0523
0524 static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy(
0525 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
0526 size_t len, unsigned long flags)
0527 {
0528 struct fsl_re_chan *re_chan;
0529 struct fsl_re_desc *desc;
0530 size_t length;
0531 struct fsl_re_cmpnd_frame *cf;
0532 struct fsl_re_move_cdb *move;
0533 u32 cdb;
0534
0535 re_chan = container_of(chan, struct fsl_re_chan, chan);
0536
0537 if (len > FSL_RE_MAX_DATA_LEN) {
0538 dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n",
0539 len, FSL_RE_MAX_DATA_LEN);
0540 return NULL;
0541 }
0542
0543 desc = fsl_re_chan_alloc_desc(re_chan, flags);
0544 if (desc <= 0)
0545 return NULL;
0546
0547
0548 cdb = FSL_RE_MOVE_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
0549 cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
0550 cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
0551 cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
0552
0553 move = desc->cdb_addr;
0554 move->cdb32 = cdb;
0555
0556
0557 cf = desc->cf_addr;
0558 fill_cfd_frame(cf, 0, sizeof(*move), desc->cdb_paddr, 0);
0559
0560 length = min_t(size_t, len, FSL_RE_MAX_DATA_LEN);
0561
0562
0563 fill_cfd_frame(cf, 1, length, dest, 0);
0564
0565
0566 fill_cfd_frame(cf, 2, length, src, 1);
0567
0568 return &desc->async_tx;
0569 }
0570
0571 static int fsl_re_alloc_chan_resources(struct dma_chan *chan)
0572 {
0573 struct fsl_re_chan *re_chan;
0574 struct fsl_re_desc *desc;
0575 void *cf;
0576 dma_addr_t paddr;
0577 int i;
0578
0579 re_chan = container_of(chan, struct fsl_re_chan, chan);
0580 for (i = 0; i < FSL_RE_MIN_DESCS; i++) {
0581 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
0582 if (!desc)
0583 break;
0584
0585 cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_KERNEL,
0586 &paddr);
0587 if (!cf) {
0588 kfree(desc);
0589 break;
0590 }
0591
0592 INIT_LIST_HEAD(&desc->node);
0593 fsl_re_init_desc(re_chan, desc, cf, paddr);
0594
0595 list_add_tail(&desc->node, &re_chan->free_q);
0596 re_chan->alloc_count++;
0597 }
0598 return re_chan->alloc_count;
0599 }
0600
0601 static void fsl_re_free_chan_resources(struct dma_chan *chan)
0602 {
0603 struct fsl_re_chan *re_chan;
0604 struct fsl_re_desc *desc;
0605
0606 re_chan = container_of(chan, struct fsl_re_chan, chan);
0607 while (re_chan->alloc_count--) {
0608 desc = list_first_entry(&re_chan->free_q,
0609 struct fsl_re_desc,
0610 node);
0611
0612 list_del(&desc->node);
0613 dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr,
0614 desc->cf_paddr);
0615 kfree(desc);
0616 }
0617
0618 if (!list_empty(&re_chan->free_q))
0619 dev_err(re_chan->dev, "chan resource cannot be cleaned!\n");
0620 }
0621
0622 static int fsl_re_chan_probe(struct platform_device *ofdev,
0623 struct device_node *np, u8 q, u32 off)
0624 {
0625 struct device *dev, *chandev;
0626 struct fsl_re_drv_private *re_priv;
0627 struct fsl_re_chan *chan;
0628 struct dma_device *dma_dev;
0629 u32 ptr;
0630 u32 status;
0631 int ret = 0, rc;
0632 struct platform_device *chan_ofdev;
0633
0634 dev = &ofdev->dev;
0635 re_priv = dev_get_drvdata(dev);
0636 dma_dev = &re_priv->dma_dev;
0637
0638 chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
0639 if (!chan)
0640 return -ENOMEM;
0641
0642
0643 chan_ofdev = of_platform_device_create(np, NULL, dev);
0644 if (!chan_ofdev) {
0645 dev_err(dev, "Not able to create ofdev for jr %d\n", q);
0646 ret = -EINVAL;
0647 goto err_free;
0648 }
0649
0650
0651 rc = of_property_read_u32(np, "reg", &ptr);
0652 if (rc) {
0653 dev_err(dev, "Reg property not found in jr %d\n", q);
0654 ret = -ENODEV;
0655 goto err_free;
0656 }
0657
0658 chan->jrregs = (struct fsl_re_chan_cfg *)((u8 *)re_priv->re_regs +
0659 off + ptr);
0660
0661
0662 chan->irq = irq_of_parse_and_map(np, 0);
0663 if (!chan->irq) {
0664 dev_err(dev, "No IRQ defined for JR %d\n", q);
0665 ret = -ENODEV;
0666 goto err_free;
0667 }
0668
0669 snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q);
0670
0671 chandev = &chan_ofdev->dev;
0672 tasklet_setup(&chan->irqtask, fsl_re_dequeue);
0673
0674 ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev);
0675 if (ret) {
0676 dev_err(dev, "Unable to register interrupt for JR %d\n", q);
0677 ret = -EINVAL;
0678 goto err_free;
0679 }
0680
0681 re_priv->re_jrs[q] = chan;
0682 chan->chan.device = dma_dev;
0683 chan->chan.private = chan;
0684 chan->dev = chandev;
0685 chan->re_dev = re_priv;
0686
0687 spin_lock_init(&chan->desc_lock);
0688 INIT_LIST_HEAD(&chan->ack_q);
0689 INIT_LIST_HEAD(&chan->active_q);
0690 INIT_LIST_HEAD(&chan->submit_q);
0691 INIT_LIST_HEAD(&chan->free_q);
0692
0693 chan->inb_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
0694 GFP_KERNEL, &chan->inb_phys_addr);
0695 if (!chan->inb_ring_virt_addr) {
0696 dev_err(dev, "No dma memory for inb_ring_virt_addr\n");
0697 ret = -ENOMEM;
0698 goto err_free;
0699 }
0700
0701 chan->oub_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
0702 GFP_KERNEL, &chan->oub_phys_addr);
0703 if (!chan->oub_ring_virt_addr) {
0704 dev_err(dev, "No dma memory for oub_ring_virt_addr\n");
0705 ret = -ENOMEM;
0706 goto err_free_1;
0707 }
0708
0709
0710 out_be32(&chan->jrregs->inbring_base_h,
0711 chan->inb_phys_addr & FSL_RE_ADDR_BIT_MASK);
0712 out_be32(&chan->jrregs->oubring_base_h,
0713 chan->oub_phys_addr & FSL_RE_ADDR_BIT_MASK);
0714 out_be32(&chan->jrregs->inbring_base_l,
0715 chan->inb_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
0716 out_be32(&chan->jrregs->oubring_base_l,
0717 chan->oub_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
0718 out_be32(&chan->jrregs->inbring_size,
0719 FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
0720 out_be32(&chan->jrregs->oubring_size,
0721 FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
0722
0723
0724 status = in_be32(&chan->jrregs->jr_config_1) & FSL_RE_REG_LIODN_MASK;
0725
0726
0727 out_be32(&chan->jrregs->jr_config_1,
0728 FSL_RE_CFG1_CBSI | FSL_RE_CFG1_CBS0 | status);
0729
0730 dev_set_drvdata(chandev, chan);
0731
0732
0733 out_be32(&chan->jrregs->jr_command, FSL_RE_ENABLE);
0734
0735 return 0;
0736
0737 err_free_1:
0738 dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
0739 chan->inb_phys_addr);
0740 err_free:
0741 return ret;
0742 }
0743
0744
0745 static int fsl_re_probe(struct platform_device *ofdev)
0746 {
0747 struct fsl_re_drv_private *re_priv;
0748 struct device_node *np;
0749 struct device_node *child;
0750 u32 off;
0751 u8 ridx = 0;
0752 struct dma_device *dma_dev;
0753 struct resource *res;
0754 int rc;
0755 struct device *dev = &ofdev->dev;
0756
0757 re_priv = devm_kzalloc(dev, sizeof(*re_priv), GFP_KERNEL);
0758 if (!re_priv)
0759 return -ENOMEM;
0760
0761 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
0762 if (!res)
0763 return -ENODEV;
0764
0765
0766 re_priv->re_regs = devm_ioremap(dev, res->start, resource_size(res));
0767 if (!re_priv->re_regs)
0768 return -EBUSY;
0769
0770
0771 out_be32(&re_priv->re_regs->global_config, FSL_RE_NON_DPAA_MODE);
0772
0773
0774 out_be32(&re_priv->re_regs->galois_field_config, FSL_RE_GFM_POLY);
0775
0776 dev_info(dev, "version %x, mode %x, gfp %x\n",
0777 in_be32(&re_priv->re_regs->re_version_id),
0778 in_be32(&re_priv->re_regs->global_config),
0779 in_be32(&re_priv->re_regs->galois_field_config));
0780
0781 dma_dev = &re_priv->dma_dev;
0782 dma_dev->dev = dev;
0783 INIT_LIST_HEAD(&dma_dev->channels);
0784 dma_set_mask(dev, DMA_BIT_MASK(40));
0785
0786 dma_dev->device_alloc_chan_resources = fsl_re_alloc_chan_resources;
0787 dma_dev->device_tx_status = fsl_re_tx_status;
0788 dma_dev->device_issue_pending = fsl_re_issue_pending;
0789
0790 dma_dev->max_xor = FSL_RE_MAX_XOR_SRCS;
0791 dma_dev->device_prep_dma_xor = fsl_re_prep_dma_xor;
0792 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
0793
0794 dma_dev->max_pq = FSL_RE_MAX_PQ_SRCS;
0795 dma_dev->device_prep_dma_pq = fsl_re_prep_dma_pq;
0796 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
0797
0798 dma_dev->device_prep_dma_memcpy = fsl_re_prep_dma_memcpy;
0799 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
0800
0801 dma_dev->device_free_chan_resources = fsl_re_free_chan_resources;
0802
0803 re_priv->total_chans = 0;
0804
0805 re_priv->cf_desc_pool = dmam_pool_create("fsl_re_cf_desc_pool", dev,
0806 FSL_RE_CF_CDB_SIZE,
0807 FSL_RE_CF_CDB_ALIGN, 0);
0808
0809 if (!re_priv->cf_desc_pool) {
0810 dev_err(dev, "No memory for fsl re_cf desc pool\n");
0811 return -ENOMEM;
0812 }
0813
0814 re_priv->hw_desc_pool = dmam_pool_create("fsl_re_hw_desc_pool", dev,
0815 sizeof(struct fsl_re_hw_desc) * FSL_RE_RING_SIZE,
0816 FSL_RE_FRAME_ALIGN, 0);
0817 if (!re_priv->hw_desc_pool) {
0818 dev_err(dev, "No memory for fsl re_hw desc pool\n");
0819 return -ENOMEM;
0820 }
0821
0822 dev_set_drvdata(dev, re_priv);
0823
0824
0825 for_each_compatible_node(np, NULL, "fsl,raideng-v1.0-job-queue") {
0826 rc = of_property_read_u32(np, "reg", &off);
0827 if (rc) {
0828 dev_err(dev, "Reg property not found in JQ node\n");
0829 of_node_put(np);
0830 return -ENODEV;
0831 }
0832
0833 for_each_child_of_node(np, child) {
0834 rc = of_device_is_compatible(child,
0835 "fsl,raideng-v1.0-job-ring");
0836 if (rc) {
0837 fsl_re_chan_probe(ofdev, child, ridx++, off);
0838 re_priv->total_chans++;
0839 }
0840 }
0841 }
0842
0843 dma_async_device_register(dma_dev);
0844
0845 return 0;
0846 }
0847
0848 static void fsl_re_remove_chan(struct fsl_re_chan *chan)
0849 {
0850 tasklet_kill(&chan->irqtask);
0851
0852 dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
0853 chan->inb_phys_addr);
0854
0855 dma_pool_free(chan->re_dev->hw_desc_pool, chan->oub_ring_virt_addr,
0856 chan->oub_phys_addr);
0857 }
0858
0859 static int fsl_re_remove(struct platform_device *ofdev)
0860 {
0861 struct fsl_re_drv_private *re_priv;
0862 struct device *dev;
0863 int i;
0864
0865 dev = &ofdev->dev;
0866 re_priv = dev_get_drvdata(dev);
0867
0868
0869 for (i = 0; i < re_priv->total_chans; i++)
0870 fsl_re_remove_chan(re_priv->re_jrs[i]);
0871
0872
0873 dma_async_device_unregister(&re_priv->dma_dev);
0874
0875 return 0;
0876 }
0877
0878 static const struct of_device_id fsl_re_ids[] = {
0879 { .compatible = "fsl,raideng-v1.0", },
0880 {}
0881 };
0882 MODULE_DEVICE_TABLE(of, fsl_re_ids);
0883
0884 static struct platform_driver fsl_re_driver = {
0885 .driver = {
0886 .name = "fsl-raideng",
0887 .of_match_table = fsl_re_ids,
0888 },
0889 .probe = fsl_re_probe,
0890 .remove = fsl_re_remove,
0891 };
0892
0893 module_platform_driver(fsl_re_driver);
0894
0895 MODULE_AUTHOR("Harninder Rai <harninder.rai@freescale.com>");
0896 MODULE_LICENSE("GPL v2");
0897 MODULE_DESCRIPTION("Freescale RAID Engine Device Driver");