0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kernel.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/module.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/raid/pq.h>
0014 #include <linux/async_tx.h>
0015 #include <linux/dmaengine.h>
0016
0017 static struct dma_async_tx_descriptor *
0018 async_sum_product(struct page *dest, unsigned int d_off,
0019 struct page **srcs, unsigned int *src_offs, unsigned char *coef,
0020 size_t len, struct async_submit_ctl *submit)
0021 {
0022 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
0023 &dest, 1, srcs, 2, len);
0024 struct dma_device *dma = chan ? chan->device : NULL;
0025 struct dmaengine_unmap_data *unmap = NULL;
0026 const u8 *amul, *bmul;
0027 u8 ax, bx;
0028 u8 *a, *b, *c;
0029
0030 if (dma)
0031 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
0032
0033 if (unmap) {
0034 struct device *dev = dma->dev;
0035 dma_addr_t pq[2];
0036 struct dma_async_tx_descriptor *tx;
0037 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
0038
0039 if (submit->flags & ASYNC_TX_FENCE)
0040 dma_flags |= DMA_PREP_FENCE;
0041 unmap->addr[0] = dma_map_page(dev, srcs[0], src_offs[0],
0042 len, DMA_TO_DEVICE);
0043 unmap->addr[1] = dma_map_page(dev, srcs[1], src_offs[1],
0044 len, DMA_TO_DEVICE);
0045 unmap->to_cnt = 2;
0046
0047 unmap->addr[2] = dma_map_page(dev, dest, d_off,
0048 len, DMA_BIDIRECTIONAL);
0049 unmap->bidi_cnt = 1;
0050
0051 pq[1] = unmap->addr[2];
0052
0053 unmap->len = len;
0054 tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,
0055 len, dma_flags);
0056 if (tx) {
0057 dma_set_unmap(tx, unmap);
0058 async_tx_submit(chan, tx, submit);
0059 dmaengine_unmap_put(unmap);
0060 return tx;
0061 }
0062
0063
0064
0065
0066 dmaengine_unmap_put(unmap);
0067 }
0068
0069
0070 async_tx_quiesce(&submit->depend_tx);
0071 amul = raid6_gfmul[coef[0]];
0072 bmul = raid6_gfmul[coef[1]];
0073 a = page_address(srcs[0]) + src_offs[0];
0074 b = page_address(srcs[1]) + src_offs[1];
0075 c = page_address(dest) + d_off;
0076
0077 while (len--) {
0078 ax = amul[*a++];
0079 bx = bmul[*b++];
0080 *c++ = ax ^ bx;
0081 }
0082
0083 return NULL;
0084 }
0085
0086 static struct dma_async_tx_descriptor *
0087 async_mult(struct page *dest, unsigned int d_off, struct page *src,
0088 unsigned int s_off, u8 coef, size_t len,
0089 struct async_submit_ctl *submit)
0090 {
0091 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
0092 &dest, 1, &src, 1, len);
0093 struct dma_device *dma = chan ? chan->device : NULL;
0094 struct dmaengine_unmap_data *unmap = NULL;
0095 const u8 *qmul;
0096 u8 *d, *s;
0097
0098 if (dma)
0099 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
0100
0101 if (unmap) {
0102 dma_addr_t dma_dest[2];
0103 struct device *dev = dma->dev;
0104 struct dma_async_tx_descriptor *tx;
0105 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
0106
0107 if (submit->flags & ASYNC_TX_FENCE)
0108 dma_flags |= DMA_PREP_FENCE;
0109 unmap->addr[0] = dma_map_page(dev, src, s_off,
0110 len, DMA_TO_DEVICE);
0111 unmap->to_cnt++;
0112 unmap->addr[1] = dma_map_page(dev, dest, d_off,
0113 len, DMA_BIDIRECTIONAL);
0114 dma_dest[1] = unmap->addr[1];
0115 unmap->bidi_cnt++;
0116 unmap->len = len;
0117
0118
0119
0120
0121
0122 tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr,
0123 1, &coef, len, dma_flags);
0124
0125 if (tx) {
0126 dma_set_unmap(tx, unmap);
0127 dmaengine_unmap_put(unmap);
0128 async_tx_submit(chan, tx, submit);
0129 return tx;
0130 }
0131
0132
0133
0134
0135 dmaengine_unmap_put(unmap);
0136 }
0137
0138
0139
0140
0141 async_tx_quiesce(&submit->depend_tx);
0142 qmul = raid6_gfmul[coef];
0143 d = page_address(dest) + d_off;
0144 s = page_address(src) + s_off;
0145
0146 while (len--)
0147 *d++ = qmul[*s++];
0148
0149 return NULL;
0150 }
0151
0152 static struct dma_async_tx_descriptor *
0153 __2data_recov_4(int disks, size_t bytes, int faila, int failb,
0154 struct page **blocks, unsigned int *offs,
0155 struct async_submit_ctl *submit)
0156 {
0157 struct dma_async_tx_descriptor *tx = NULL;
0158 struct page *p, *q, *a, *b;
0159 unsigned int p_off, q_off, a_off, b_off;
0160 struct page *srcs[2];
0161 unsigned int src_offs[2];
0162 unsigned char coef[2];
0163 enum async_tx_flags flags = submit->flags;
0164 dma_async_tx_callback cb_fn = submit->cb_fn;
0165 void *cb_param = submit->cb_param;
0166 void *scribble = submit->scribble;
0167
0168 p = blocks[disks-2];
0169 p_off = offs[disks-2];
0170 q = blocks[disks-1];
0171 q_off = offs[disks-1];
0172
0173 a = blocks[faila];
0174 a_off = offs[faila];
0175 b = blocks[failb];
0176 b_off = offs[failb];
0177
0178
0179
0180 srcs[0] = p;
0181 src_offs[0] = p_off;
0182 srcs[1] = q;
0183 src_offs[1] = q_off;
0184 coef[0] = raid6_gfexi[failb-faila];
0185 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
0186 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0187 tx = async_sum_product(b, b_off, srcs, src_offs, coef, bytes, submit);
0188
0189
0190 srcs[0] = p;
0191 src_offs[0] = p_off;
0192 srcs[1] = b;
0193 src_offs[1] = b_off;
0194 init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
0195 cb_param, scribble);
0196 tx = async_xor_offs(a, a_off, srcs, src_offs, 2, bytes, submit);
0197
0198 return tx;
0199
0200 }
0201
0202 static struct dma_async_tx_descriptor *
0203 __2data_recov_5(int disks, size_t bytes, int faila, int failb,
0204 struct page **blocks, unsigned int *offs,
0205 struct async_submit_ctl *submit)
0206 {
0207 struct dma_async_tx_descriptor *tx = NULL;
0208 struct page *p, *q, *g, *dp, *dq;
0209 unsigned int p_off, q_off, g_off, dp_off, dq_off;
0210 struct page *srcs[2];
0211 unsigned int src_offs[2];
0212 unsigned char coef[2];
0213 enum async_tx_flags flags = submit->flags;
0214 dma_async_tx_callback cb_fn = submit->cb_fn;
0215 void *cb_param = submit->cb_param;
0216 void *scribble = submit->scribble;
0217 int good_srcs, good, i;
0218
0219 good_srcs = 0;
0220 good = -1;
0221 for (i = 0; i < disks-2; i++) {
0222 if (blocks[i] == NULL)
0223 continue;
0224 if (i == faila || i == failb)
0225 continue;
0226 good = i;
0227 good_srcs++;
0228 }
0229 BUG_ON(good_srcs > 1);
0230
0231 p = blocks[disks-2];
0232 p_off = offs[disks-2];
0233 q = blocks[disks-1];
0234 q_off = offs[disks-1];
0235 g = blocks[good];
0236 g_off = offs[good];
0237
0238
0239
0240
0241
0242 dp = blocks[faila];
0243 dp_off = offs[faila];
0244 dq = blocks[failb];
0245 dq_off = offs[failb];
0246
0247 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0248 tx = async_memcpy(dp, g, dp_off, g_off, bytes, submit);
0249 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0250 tx = async_mult(dq, dq_off, g, g_off,
0251 raid6_gfexp[good], bytes, submit);
0252
0253
0254 srcs[0] = dp;
0255 src_offs[0] = dp_off;
0256 srcs[1] = p;
0257 src_offs[1] = p_off;
0258 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
0259 NULL, NULL, scribble);
0260 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
0261
0262
0263 srcs[0] = dq;
0264 src_offs[0] = dq_off;
0265 srcs[1] = q;
0266 src_offs[1] = q_off;
0267 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
0268 NULL, NULL, scribble);
0269 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit);
0270
0271
0272 srcs[0] = dp;
0273 src_offs[0] = dp_off;
0274 srcs[1] = dq;
0275 src_offs[1] = dq_off;
0276 coef[0] = raid6_gfexi[failb-faila];
0277 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
0278 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0279 tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit);
0280
0281
0282 srcs[0] = dp;
0283 src_offs[0] = dp_off;
0284 srcs[1] = dq;
0285 src_offs[1] = dq_off;
0286 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
0287 cb_param, scribble);
0288 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
0289
0290 return tx;
0291 }
0292
0293 static struct dma_async_tx_descriptor *
0294 __2data_recov_n(int disks, size_t bytes, int faila, int failb,
0295 struct page **blocks, unsigned int *offs,
0296 struct async_submit_ctl *submit)
0297 {
0298 struct dma_async_tx_descriptor *tx = NULL;
0299 struct page *p, *q, *dp, *dq;
0300 unsigned int p_off, q_off, dp_off, dq_off;
0301 struct page *srcs[2];
0302 unsigned int src_offs[2];
0303 unsigned char coef[2];
0304 enum async_tx_flags flags = submit->flags;
0305 dma_async_tx_callback cb_fn = submit->cb_fn;
0306 void *cb_param = submit->cb_param;
0307 void *scribble = submit->scribble;
0308
0309 p = blocks[disks-2];
0310 p_off = offs[disks-2];
0311 q = blocks[disks-1];
0312 q_off = offs[disks-1];
0313
0314
0315
0316
0317
0318 dp = blocks[faila];
0319 dp_off = offs[faila];
0320 blocks[faila] = NULL;
0321 blocks[disks-2] = dp;
0322 offs[disks-2] = dp_off;
0323 dq = blocks[failb];
0324 dq_off = offs[failb];
0325 blocks[failb] = NULL;
0326 blocks[disks-1] = dq;
0327 offs[disks-1] = dq_off;
0328
0329 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0330 tx = async_gen_syndrome(blocks, offs, disks, bytes, submit);
0331
0332
0333 blocks[faila] = dp;
0334 offs[faila] = dp_off;
0335 blocks[failb] = dq;
0336 offs[failb] = dq_off;
0337 blocks[disks-2] = p;
0338 offs[disks-2] = p_off;
0339 blocks[disks-1] = q;
0340 offs[disks-1] = q_off;
0341
0342
0343 srcs[0] = dp;
0344 src_offs[0] = dp_off;
0345 srcs[1] = p;
0346 src_offs[1] = p_off;
0347 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
0348 NULL, NULL, scribble);
0349 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
0350
0351
0352 srcs[0] = dq;
0353 src_offs[0] = dq_off;
0354 srcs[1] = q;
0355 src_offs[1] = q_off;
0356 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
0357 NULL, NULL, scribble);
0358 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit);
0359
0360
0361 srcs[0] = dp;
0362 src_offs[0] = dp_off;
0363 srcs[1] = dq;
0364 src_offs[1] = dq_off;
0365 coef[0] = raid6_gfexi[failb-faila];
0366 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
0367 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0368 tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit);
0369
0370
0371 srcs[0] = dp;
0372 src_offs[0] = dp_off;
0373 srcs[1] = dq;
0374 src_offs[1] = dq_off;
0375 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
0376 cb_param, scribble);
0377 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
0378
0379 return tx;
0380 }
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392 struct dma_async_tx_descriptor *
0393 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
0394 struct page **blocks, unsigned int *offs,
0395 struct async_submit_ctl *submit)
0396 {
0397 void *scribble = submit->scribble;
0398 int non_zero_srcs, i;
0399
0400 BUG_ON(faila == failb);
0401 if (failb < faila)
0402 swap(faila, failb);
0403
0404 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
0405
0406
0407
0408
0409
0410
0411 if (!async_dma_find_channel(DMA_PQ) || !scribble) {
0412 void **ptrs = scribble ? scribble : (void **) blocks;
0413
0414 async_tx_quiesce(&submit->depend_tx);
0415 for (i = 0; i < disks; i++)
0416 if (blocks[i] == NULL)
0417 ptrs[i] = (void *) raid6_empty_zero_page;
0418 else
0419 ptrs[i] = page_address(blocks[i]) + offs[i];
0420
0421 raid6_2data_recov(disks, bytes, faila, failb, ptrs);
0422
0423 async_tx_sync_epilog(submit);
0424
0425 return NULL;
0426 }
0427
0428 non_zero_srcs = 0;
0429 for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
0430 if (blocks[i])
0431 non_zero_srcs++;
0432 switch (non_zero_srcs) {
0433 case 0:
0434 case 1:
0435
0436 BUG();
0437
0438 case 2:
0439
0440
0441
0442
0443
0444 return __2data_recov_4(disks, bytes, faila, failb,
0445 blocks, offs, submit);
0446 case 3:
0447
0448
0449
0450
0451
0452 return __2data_recov_5(disks, bytes, faila, failb,
0453 blocks, offs, submit);
0454 default:
0455 return __2data_recov_n(disks, bytes, faila, failb,
0456 blocks, offs, submit);
0457 }
0458 }
0459 EXPORT_SYMBOL_GPL(async_raid6_2data_recov);
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470 struct dma_async_tx_descriptor *
0471 async_raid6_datap_recov(int disks, size_t bytes, int faila,
0472 struct page **blocks, unsigned int *offs,
0473 struct async_submit_ctl *submit)
0474 {
0475 struct dma_async_tx_descriptor *tx = NULL;
0476 struct page *p, *q, *dq;
0477 unsigned int p_off, q_off, dq_off;
0478 u8 coef;
0479 enum async_tx_flags flags = submit->flags;
0480 dma_async_tx_callback cb_fn = submit->cb_fn;
0481 void *cb_param = submit->cb_param;
0482 void *scribble = submit->scribble;
0483 int good_srcs, good, i;
0484 struct page *srcs[2];
0485 unsigned int src_offs[2];
0486
0487 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
0488
0489
0490
0491
0492
0493
0494 if (!async_dma_find_channel(DMA_PQ) || !scribble) {
0495 void **ptrs = scribble ? scribble : (void **) blocks;
0496
0497 async_tx_quiesce(&submit->depend_tx);
0498 for (i = 0; i < disks; i++)
0499 if (blocks[i] == NULL)
0500 ptrs[i] = (void*)raid6_empty_zero_page;
0501 else
0502 ptrs[i] = page_address(blocks[i]) + offs[i];
0503
0504 raid6_datap_recov(disks, bytes, faila, ptrs);
0505
0506 async_tx_sync_epilog(submit);
0507
0508 return NULL;
0509 }
0510
0511 good_srcs = 0;
0512 good = -1;
0513 for (i = 0; i < disks-2; i++) {
0514 if (i == faila)
0515 continue;
0516 if (blocks[i]) {
0517 good = i;
0518 good_srcs++;
0519 if (good_srcs > 1)
0520 break;
0521 }
0522 }
0523 BUG_ON(good_srcs == 0);
0524
0525 p = blocks[disks-2];
0526 p_off = offs[disks-2];
0527 q = blocks[disks-1];
0528 q_off = offs[disks-1];
0529
0530
0531
0532
0533 dq = blocks[faila];
0534 dq_off = offs[faila];
0535 blocks[faila] = NULL;
0536 blocks[disks-1] = dq;
0537 offs[disks-1] = dq_off;
0538
0539
0540
0541
0542 if (good_srcs == 1) {
0543 struct page *g = blocks[good];
0544 unsigned int g_off = offs[good];
0545
0546 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
0547 scribble);
0548 tx = async_memcpy(p, g, p_off, g_off, bytes, submit);
0549
0550 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
0551 scribble);
0552 tx = async_mult(dq, dq_off, g, g_off,
0553 raid6_gfexp[good], bytes, submit);
0554 } else {
0555 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
0556 scribble);
0557 tx = async_gen_syndrome(blocks, offs, disks, bytes, submit);
0558 }
0559
0560
0561 blocks[faila] = dq;
0562 offs[faila] = dq_off;
0563 blocks[disks-1] = q;
0564 offs[disks-1] = q_off;
0565
0566
0567 coef = raid6_gfinv[raid6_gfexp[faila]];
0568
0569 srcs[0] = dq;
0570 src_offs[0] = dq_off;
0571 srcs[1] = q;
0572 src_offs[1] = q_off;
0573 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
0574 NULL, NULL, scribble);
0575 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit);
0576
0577 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
0578 tx = async_mult(dq, dq_off, dq, dq_off, coef, bytes, submit);
0579
0580 srcs[0] = p;
0581 src_offs[0] = p_off;
0582 srcs[1] = dq;
0583 src_offs[1] = dq_off;
0584 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
0585 cb_param, scribble);
0586 tx = async_xor_offs(p, p_off, srcs, src_offs, 2, bytes, submit);
0587
0588 return tx;
0589 }
0590 EXPORT_SYMBOL_GPL(async_raid6_datap_recov);
0591
0592 MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
0593 MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
0594 MODULE_LICENSE("GPL");