0001
0002
0003
0004
0005
0006 #include <linux/module.h>
0007 #include <linux/pci.h>
0008 #include <linux/gfp.h>
0009 #include <linux/dmaengine.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/prefetch.h>
0012 #include "../dmaengine.h"
0013 #include "registers.h"
0014 #include "hw.h"
0015 #include "dma.h"
0016
0017 #define MAX_SCF 256
0018
0019
0020
0021
0022 static const u8 xor_idx_to_desc = 0xe0;
0023 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
0024 static const u8 pq_idx_to_desc = 0xf8;
0025 static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
0026 2, 2, 2, 2, 2, 2, 2 };
0027 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
0028 static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
0029 0, 1, 2, 3, 4, 5, 6 };
0030
0031 static void xor_set_src(struct ioat_raw_descriptor *descs[2],
0032 dma_addr_t addr, u32 offset, int idx)
0033 {
0034 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
0035
0036 raw->field[xor_idx_to_field[idx]] = addr + offset;
0037 }
0038
0039 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
0040 {
0041 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
0042
0043 return raw->field[pq_idx_to_field[idx]];
0044 }
0045
0046 static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
0047 {
0048 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
0049
0050 return raw->field[pq16_idx_to_field[idx]];
0051 }
0052
0053 static void pq_set_src(struct ioat_raw_descriptor *descs[2],
0054 dma_addr_t addr, u32 offset, u8 coef, int idx)
0055 {
0056 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
0057 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
0058
0059 raw->field[pq_idx_to_field[idx]] = addr + offset;
0060 pq->coef[idx] = coef;
0061 }
0062
0063 static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
0064 dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
0065 {
0066 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
0067 struct ioat_pq16a_descriptor *pq16 =
0068 (struct ioat_pq16a_descriptor *)desc[1];
0069 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
0070
0071 raw->field[pq16_idx_to_field[idx]] = addr + offset;
0072
0073 if (idx < 8)
0074 pq->coef[idx] = coef;
0075 else
0076 pq16->coef[idx - 8] = coef;
0077 }
0078
0079 static struct ioat_sed_ent *
0080 ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool)
0081 {
0082 struct ioat_sed_ent *sed;
0083 gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
0084
0085 sed = kmem_cache_alloc(ioat_sed_cache, flags);
0086 if (!sed)
0087 return NULL;
0088
0089 sed->hw_pool = hw_pool;
0090 sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool],
0091 flags, &sed->dma);
0092 if (!sed->hw) {
0093 kmem_cache_free(ioat_sed_cache, sed);
0094 return NULL;
0095 }
0096
0097 return sed;
0098 }
0099
0100 struct dma_async_tx_descriptor *
0101 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
0102 dma_addr_t dma_src, size_t len, unsigned long flags)
0103 {
0104 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
0105 struct ioat_dma_descriptor *hw;
0106 struct ioat_ring_ent *desc;
0107 dma_addr_t dst = dma_dest;
0108 dma_addr_t src = dma_src;
0109 size_t total_len = len;
0110 int num_descs, idx, i;
0111
0112 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
0113 return NULL;
0114
0115 num_descs = ioat_xferlen_to_descs(ioat_chan, len);
0116 if (likely(num_descs) &&
0117 ioat_check_space_lock(ioat_chan, num_descs) == 0)
0118 idx = ioat_chan->head;
0119 else
0120 return NULL;
0121 i = 0;
0122 do {
0123 size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
0124
0125 desc = ioat_get_ring_ent(ioat_chan, idx + i);
0126 hw = desc->hw;
0127
0128 hw->size = copy;
0129 hw->ctl = 0;
0130 hw->src_addr = src;
0131 hw->dst_addr = dst;
0132
0133 len -= copy;
0134 dst += copy;
0135 src += copy;
0136 dump_desc_dbg(ioat_chan, desc);
0137 } while (++i < num_descs);
0138
0139 desc->txd.flags = flags;
0140 desc->len = total_len;
0141 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
0142 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
0143 hw->ctl_f.compl_write = 1;
0144 dump_desc_dbg(ioat_chan, desc);
0145
0146
0147 return &desc->txd;
0148 }
0149
0150
0151 static struct dma_async_tx_descriptor *
0152 __ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
0153 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
0154 size_t len, unsigned long flags)
0155 {
0156 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
0157 struct ioat_ring_ent *compl_desc;
0158 struct ioat_ring_ent *desc;
0159 struct ioat_ring_ent *ext;
0160 size_t total_len = len;
0161 struct ioat_xor_descriptor *xor;
0162 struct ioat_xor_ext_descriptor *xor_ex = NULL;
0163 struct ioat_dma_descriptor *hw;
0164 int num_descs, with_ext, idx, i;
0165 u32 offset = 0;
0166 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
0167
0168 BUG_ON(src_cnt < 2);
0169
0170 num_descs = ioat_xferlen_to_descs(ioat_chan, len);
0171
0172
0173
0174 if (src_cnt > 5) {
0175 with_ext = 1;
0176 num_descs *= 2;
0177 } else
0178 with_ext = 0;
0179
0180
0181
0182
0183
0184
0185 if (likely(num_descs) &&
0186 ioat_check_space_lock(ioat_chan, num_descs+1) == 0)
0187 idx = ioat_chan->head;
0188 else
0189 return NULL;
0190 i = 0;
0191 do {
0192 struct ioat_raw_descriptor *descs[2];
0193 size_t xfer_size = min_t(size_t,
0194 len, 1 << ioat_chan->xfercap_log);
0195 int s;
0196
0197 desc = ioat_get_ring_ent(ioat_chan, idx + i);
0198 xor = desc->xor;
0199
0200
0201
0202
0203
0204 ext = ioat_get_ring_ent(ioat_chan, idx + i + 1);
0205 xor_ex = ext->xor_ex;
0206
0207 descs[0] = (struct ioat_raw_descriptor *) xor;
0208 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
0209 for (s = 0; s < src_cnt; s++)
0210 xor_set_src(descs, src[s], offset, s);
0211 xor->size = xfer_size;
0212 xor->dst_addr = dest + offset;
0213 xor->ctl = 0;
0214 xor->ctl_f.op = op;
0215 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
0216
0217 len -= xfer_size;
0218 offset += xfer_size;
0219 dump_desc_dbg(ioat_chan, desc);
0220 } while ((i += 1 + with_ext) < num_descs);
0221
0222
0223 desc->txd.flags = flags;
0224 desc->len = total_len;
0225 if (result)
0226 desc->result = result;
0227 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
0228
0229
0230 compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
0231 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
0232 hw = compl_desc->hw;
0233 hw->ctl = 0;
0234 hw->ctl_f.null = 1;
0235 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
0236 hw->ctl_f.compl_write = 1;
0237 hw->size = NULL_DESC_BUFFER_SIZE;
0238 dump_desc_dbg(ioat_chan, compl_desc);
0239
0240
0241 return &compl_desc->txd;
0242 }
0243
0244 struct dma_async_tx_descriptor *
0245 ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
0246 unsigned int src_cnt, size_t len, unsigned long flags)
0247 {
0248 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
0249
0250 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
0251 return NULL;
0252
0253 return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
0254 }
0255
0256 struct dma_async_tx_descriptor *
0257 ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
0258 unsigned int src_cnt, size_t len,
0259 enum sum_check_flags *result, unsigned long flags)
0260 {
0261 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
0262
0263 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
0264 return NULL;
0265
0266
0267
0268
0269 *result = 0;
0270
0271 return __ioat_prep_xor_lock(chan, result, src[0], &src[1],
0272 src_cnt - 1, len, flags);
0273 }
0274
0275 static void
0276 dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc,
0277 struct ioat_ring_ent *ext)
0278 {
0279 struct device *dev = to_dev(ioat_chan);
0280 struct ioat_pq_descriptor *pq = desc->pq;
0281 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
0282 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
0283 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
0284 int i;
0285
0286 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
0287 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
0288 " src_cnt: %d)\n",
0289 desc_id(desc), (unsigned long long) desc->txd.phys,
0290 (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
0291 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op,
0292 pq->ctl_f.int_en, pq->ctl_f.compl_write,
0293 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
0294 pq->ctl_f.src_cnt);
0295 for (i = 0; i < src_cnt; i++)
0296 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
0297 (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
0298 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
0299 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
0300 dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
0301 }
0302
0303 static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan,
0304 struct ioat_ring_ent *desc)
0305 {
0306 struct device *dev = to_dev(ioat_chan);
0307 struct ioat_pq_descriptor *pq = desc->pq;
0308 struct ioat_raw_descriptor *descs[] = { (void *)pq,
0309 (void *)pq,
0310 (void *)pq };
0311 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
0312 int i;
0313
0314 if (desc->sed) {
0315 descs[1] = (void *)desc->sed->hw;
0316 descs[2] = (void *)desc->sed->hw + 64;
0317 }
0318
0319 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
0320 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
0321 " src_cnt: %d)\n",
0322 desc_id(desc), (unsigned long long) desc->txd.phys,
0323 (unsigned long long) pq->next,
0324 desc->txd.flags, pq->size, pq->ctl,
0325 pq->ctl_f.op, pq->ctl_f.int_en,
0326 pq->ctl_f.compl_write,
0327 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
0328 pq->ctl_f.src_cnt);
0329 for (i = 0; i < src_cnt; i++) {
0330 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
0331 (unsigned long long) pq16_get_src(descs, i),
0332 pq->coef[i]);
0333 }
0334 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
0335 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
0336 }
0337
0338 static struct dma_async_tx_descriptor *
0339 __ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
0340 const dma_addr_t *dst, const dma_addr_t *src,
0341 unsigned int src_cnt, const unsigned char *scf,
0342 size_t len, unsigned long flags)
0343 {
0344 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
0345 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
0346 struct ioat_ring_ent *compl_desc;
0347 struct ioat_ring_ent *desc;
0348 struct ioat_ring_ent *ext;
0349 size_t total_len = len;
0350 struct ioat_pq_descriptor *pq;
0351 struct ioat_pq_ext_descriptor *pq_ex = NULL;
0352 struct ioat_dma_descriptor *hw;
0353 u32 offset = 0;
0354 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
0355 int i, s, idx, with_ext, num_descs;
0356 int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0;
0357
0358 dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
0359
0360
0361
0362 BUG_ON(src_cnt + dmaf_continue(flags) < 2);
0363
0364 num_descs = ioat_xferlen_to_descs(ioat_chan, len);
0365
0366
0367
0368
0369 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
0370 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
0371 with_ext = 1;
0372 num_descs *= 2;
0373 } else
0374 with_ext = 0;
0375
0376
0377
0378
0379
0380
0381 if (likely(num_descs) &&
0382 ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0)
0383 idx = ioat_chan->head;
0384 else
0385 return NULL;
0386 i = 0;
0387 do {
0388 struct ioat_raw_descriptor *descs[2];
0389 size_t xfer_size = min_t(size_t, len,
0390 1 << ioat_chan->xfercap_log);
0391
0392 desc = ioat_get_ring_ent(ioat_chan, idx + i);
0393 pq = desc->pq;
0394
0395
0396
0397
0398
0399 ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext);
0400 pq_ex = ext->pq_ex;
0401
0402 descs[0] = (struct ioat_raw_descriptor *) pq;
0403 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
0404
0405 for (s = 0; s < src_cnt; s++)
0406 pq_set_src(descs, src[s], offset, scf[s], s);
0407
0408
0409 if (dmaf_p_disabled_continue(flags))
0410 pq_set_src(descs, dst[1], offset, 1, s++);
0411 else if (dmaf_continue(flags)) {
0412 pq_set_src(descs, dst[0], offset, 0, s++);
0413 pq_set_src(descs, dst[1], offset, 1, s++);
0414 pq_set_src(descs, dst[1], offset, 0, s++);
0415 }
0416 pq->size = xfer_size;
0417 pq->p_addr = dst[0] + offset;
0418 pq->q_addr = dst[1] + offset;
0419 pq->ctl = 0;
0420 pq->ctl_f.op = op;
0421
0422 if (ioat_dma->cap & IOAT_CAP_DWBES)
0423 pq->ctl_f.wb_en = result ? 1 : 0;
0424 pq->ctl_f.src_cnt = src_cnt_to_hw(s);
0425 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
0426 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
0427
0428 len -= xfer_size;
0429 offset += xfer_size;
0430 } while ((i += 1 + with_ext) < num_descs);
0431
0432
0433 desc->txd.flags = flags;
0434 desc->len = total_len;
0435 if (result)
0436 desc->result = result;
0437 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
0438 dump_pq_desc_dbg(ioat_chan, desc, ext);
0439
0440 if (!cb32) {
0441 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
0442 pq->ctl_f.compl_write = 1;
0443 compl_desc = desc;
0444 } else {
0445
0446 compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
0447 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
0448 hw = compl_desc->hw;
0449 hw->ctl = 0;
0450 hw->ctl_f.null = 1;
0451 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
0452 hw->ctl_f.compl_write = 1;
0453 hw->size = NULL_DESC_BUFFER_SIZE;
0454 dump_desc_dbg(ioat_chan, compl_desc);
0455 }
0456
0457
0458
0459 return &compl_desc->txd;
0460 }
0461
0462 static struct dma_async_tx_descriptor *
0463 __ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
0464 const dma_addr_t *dst, const dma_addr_t *src,
0465 unsigned int src_cnt, const unsigned char *scf,
0466 size_t len, unsigned long flags)
0467 {
0468 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
0469 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
0470 struct ioat_ring_ent *desc;
0471 size_t total_len = len;
0472 struct ioat_pq_descriptor *pq;
0473 u32 offset = 0;
0474 u8 op;
0475 int i, s, idx, num_descs;
0476
0477
0478 op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
0479
0480 dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
0481
0482 num_descs = ioat_xferlen_to_descs(ioat_chan, len);
0483
0484
0485
0486
0487
0488 if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0)
0489 idx = ioat_chan->head;
0490 else
0491 return NULL;
0492
0493 i = 0;
0494
0495 do {
0496 struct ioat_raw_descriptor *descs[4];
0497 size_t xfer_size = min_t(size_t, len,
0498 1 << ioat_chan->xfercap_log);
0499
0500 desc = ioat_get_ring_ent(ioat_chan, idx + i);
0501 pq = desc->pq;
0502
0503 descs[0] = (struct ioat_raw_descriptor *) pq;
0504
0505 desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3);
0506 if (!desc->sed) {
0507 dev_err(to_dev(ioat_chan),
0508 "%s: no free sed entries\n", __func__);
0509 return NULL;
0510 }
0511
0512 pq->sed_addr = desc->sed->dma;
0513 desc->sed->parent = desc;
0514
0515 descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
0516 descs[2] = (void *)descs[1] + 64;
0517
0518 for (s = 0; s < src_cnt; s++)
0519 pq16_set_src(descs, src[s], offset, scf[s], s);
0520
0521
0522 if (dmaf_p_disabled_continue(flags))
0523 pq16_set_src(descs, dst[1], offset, 1, s++);
0524 else if (dmaf_continue(flags)) {
0525 pq16_set_src(descs, dst[0], offset, 0, s++);
0526 pq16_set_src(descs, dst[1], offset, 1, s++);
0527 pq16_set_src(descs, dst[1], offset, 0, s++);
0528 }
0529
0530 pq->size = xfer_size;
0531 pq->p_addr = dst[0] + offset;
0532 pq->q_addr = dst[1] + offset;
0533 pq->ctl = 0;
0534 pq->ctl_f.op = op;
0535 pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
0536
0537 if (ioat_dma->cap & IOAT_CAP_DWBES)
0538 pq->ctl_f.wb_en = result ? 1 : 0;
0539 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
0540 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
0541
0542 len -= xfer_size;
0543 offset += xfer_size;
0544 } while (++i < num_descs);
0545
0546
0547 desc->txd.flags = flags;
0548 desc->len = total_len;
0549 if (result)
0550 desc->result = result;
0551 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
0552
0553
0554 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
0555 pq->ctl_f.compl_write = 1;
0556
0557 dump_pq16_desc_dbg(ioat_chan, desc);
0558
0559
0560 return &desc->txd;
0561 }
0562
0563 static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
0564 {
0565 if (dmaf_p_disabled_continue(flags))
0566 return src_cnt + 1;
0567 else if (dmaf_continue(flags))
0568 return src_cnt + 3;
0569 else
0570 return src_cnt;
0571 }
0572
0573 struct dma_async_tx_descriptor *
0574 ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
0575 unsigned int src_cnt, const unsigned char *scf, size_t len,
0576 unsigned long flags)
0577 {
0578 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
0579
0580 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
0581 return NULL;
0582
0583
0584 if (flags & DMA_PREP_PQ_DISABLE_P)
0585 dst[0] = dst[1];
0586 if (flags & DMA_PREP_PQ_DISABLE_Q)
0587 dst[1] = dst[0];
0588
0589
0590
0591
0592 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
0593 dma_addr_t single_source[2];
0594 unsigned char single_source_coef[2];
0595
0596 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
0597 single_source[0] = src[0];
0598 single_source[1] = src[0];
0599 single_source_coef[0] = scf[0];
0600 single_source_coef[1] = 0;
0601
0602 return src_cnt_flags(src_cnt, flags) > 8 ?
0603 __ioat_prep_pq16_lock(chan, NULL, dst, single_source,
0604 2, single_source_coef, len,
0605 flags) :
0606 __ioat_prep_pq_lock(chan, NULL, dst, single_source, 2,
0607 single_source_coef, len, flags);
0608
0609 } else {
0610 return src_cnt_flags(src_cnt, flags) > 8 ?
0611 __ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
0612 scf, len, flags) :
0613 __ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt,
0614 scf, len, flags);
0615 }
0616 }
0617
0618 struct dma_async_tx_descriptor *
0619 ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
0620 unsigned int src_cnt, const unsigned char *scf, size_t len,
0621 enum sum_check_flags *pqres, unsigned long flags)
0622 {
0623 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
0624
0625 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
0626 return NULL;
0627
0628
0629 if (flags & DMA_PREP_PQ_DISABLE_P)
0630 pq[0] = pq[1];
0631 if (flags & DMA_PREP_PQ_DISABLE_Q)
0632 pq[1] = pq[0];
0633
0634
0635
0636
0637 *pqres = 0;
0638
0639 return src_cnt_flags(src_cnt, flags) > 8 ?
0640 __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
0641 flags) :
0642 __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
0643 flags);
0644 }
0645
0646 struct dma_async_tx_descriptor *
0647 ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
0648 unsigned int src_cnt, size_t len, unsigned long flags)
0649 {
0650 unsigned char scf[MAX_SCF];
0651 dma_addr_t pq[2];
0652 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
0653
0654 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
0655 return NULL;
0656
0657 if (src_cnt > MAX_SCF)
0658 return NULL;
0659
0660 memset(scf, 0, src_cnt);
0661 pq[0] = dst;
0662 flags |= DMA_PREP_PQ_DISABLE_Q;
0663 pq[1] = dst;
0664
0665 return src_cnt_flags(src_cnt, flags) > 8 ?
0666 __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
0667 flags) :
0668 __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
0669 flags);
0670 }
0671
0672 struct dma_async_tx_descriptor *
0673 ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
0674 unsigned int src_cnt, size_t len,
0675 enum sum_check_flags *result, unsigned long flags)
0676 {
0677 unsigned char scf[MAX_SCF];
0678 dma_addr_t pq[2];
0679 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
0680
0681 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
0682 return NULL;
0683
0684 if (src_cnt > MAX_SCF)
0685 return NULL;
0686
0687
0688
0689
0690 *result = 0;
0691
0692 memset(scf, 0, src_cnt);
0693 pq[0] = src[0];
0694 flags |= DMA_PREP_PQ_DISABLE_Q;
0695 pq[1] = pq[0];
0696
0697 return src_cnt_flags(src_cnt, flags) > 8 ?
0698 __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
0699 scf, len, flags) :
0700 __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
0701 scf, len, flags);
0702 }
0703
0704 struct dma_async_tx_descriptor *
0705 ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
0706 {
0707 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
0708 struct ioat_ring_ent *desc;
0709 struct ioat_dma_descriptor *hw;
0710
0711 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
0712 return NULL;
0713
0714 if (ioat_check_space_lock(ioat_chan, 1) == 0)
0715 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
0716 else
0717 return NULL;
0718
0719 hw = desc->hw;
0720 hw->ctl = 0;
0721 hw->ctl_f.null = 1;
0722 hw->ctl_f.int_en = 1;
0723 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
0724 hw->ctl_f.compl_write = 1;
0725 hw->size = NULL_DESC_BUFFER_SIZE;
0726 hw->src_addr = 0;
0727 hw->dst_addr = 0;
0728
0729 desc->txd.flags = flags;
0730 desc->len = 1;
0731
0732 dump_desc_dbg(ioat_chan, desc);
0733
0734
0735 return &desc->txd;
0736 }
0737