0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/init.h>
0013 #include <linux/module.h>
0014 #include <linux/slab.h>
0015 #include <linux/pci.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/dmaengine.h>
0018 #include <linux/delay.h>
0019 #include <linux/dma-mapping.h>
0020 #include <linux/workqueue.h>
0021 #include <linux/prefetch.h>
0022 #include <linux/sizes.h>
0023 #include "dma.h"
0024 #include "registers.h"
0025 #include "hw.h"
0026
0027 #include "../dmaengine.h"
0028
0029 static int completion_timeout = 200;
0030 module_param(completion_timeout, int, 0644);
0031 MODULE_PARM_DESC(completion_timeout,
0032 "set ioat completion timeout [msec] (default 200 [msec])");
0033 static int idle_timeout = 2000;
0034 module_param(idle_timeout, int, 0644);
0035 MODULE_PARM_DESC(idle_timeout,
0036 "set ioat idel timeout [msec] (default 2000 [msec])");
0037
0038 #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
0039 #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
0040
0041 static char *chanerr_str[] = {
0042 "DMA Transfer Source Address Error",
0043 "DMA Transfer Destination Address Error",
0044 "Next Descriptor Address Error",
0045 "Descriptor Error",
0046 "Chan Address Value Error",
0047 "CHANCMD Error",
0048 "Chipset Uncorrectable Data Integrity Error",
0049 "DMA Uncorrectable Data Integrity Error",
0050 "Read Data Error",
0051 "Write Data Error",
0052 "Descriptor Control Error",
0053 "Descriptor Transfer Size Error",
0054 "Completion Address Error",
0055 "Interrupt Configuration Error",
0056 "Super extended descriptor Address Error",
0057 "Unaffiliated Error",
0058 "CRC or XOR P Error",
0059 "XOR Q Error",
0060 "Descriptor Count Error",
0061 "DIF All F detect Error",
0062 "Guard Tag verification Error",
0063 "Application Tag verification Error",
0064 "Reference Tag verification Error",
0065 "Bundle Bit Error",
0066 "Result DIF All F detect Error",
0067 "Result Guard Tag verification Error",
0068 "Result Application Tag verification Error",
0069 "Result Reference Tag verification Error",
0070 };
0071
0072 static void ioat_eh(struct ioatdma_chan *ioat_chan);
0073
0074 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
0075 {
0076 int i;
0077
0078 for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
0079 if ((chanerr >> i) & 1) {
0080 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
0081 i, chanerr_str[i]);
0082 }
0083 }
0084 }
0085
0086
0087
0088
0089
0090
0091 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
0092 {
0093 struct ioatdma_device *instance = data;
0094 struct ioatdma_chan *ioat_chan;
0095 unsigned long attnstatus;
0096 int bit;
0097 u8 intrctrl;
0098
0099 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
0100
0101 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
0102 return IRQ_NONE;
0103
0104 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
0105 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
0106 return IRQ_NONE;
0107 }
0108
0109 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
0110 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
0111 ioat_chan = ioat_chan_by_index(instance, bit);
0112 if (test_bit(IOAT_RUN, &ioat_chan->state))
0113 tasklet_schedule(&ioat_chan->cleanup_task);
0114 }
0115
0116 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
0117 return IRQ_HANDLED;
0118 }
0119
0120
0121
0122
0123
0124
0125 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
0126 {
0127 struct ioatdma_chan *ioat_chan = data;
0128
0129 if (test_bit(IOAT_RUN, &ioat_chan->state))
0130 tasklet_schedule(&ioat_chan->cleanup_task);
0131
0132 return IRQ_HANDLED;
0133 }
0134
0135 void ioat_stop(struct ioatdma_chan *ioat_chan)
0136 {
0137 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
0138 struct pci_dev *pdev = ioat_dma->pdev;
0139 int chan_id = chan_num(ioat_chan);
0140 struct msix_entry *msix;
0141
0142
0143
0144
0145 clear_bit(IOAT_RUN, &ioat_chan->state);
0146
0147
0148 switch (ioat_dma->irq_mode) {
0149 case IOAT_MSIX:
0150 msix = &ioat_dma->msix_entries[chan_id];
0151 synchronize_irq(msix->vector);
0152 break;
0153 case IOAT_MSI:
0154 case IOAT_INTX:
0155 synchronize_irq(pdev->irq);
0156 break;
0157 default:
0158 break;
0159 }
0160
0161
0162 del_timer_sync(&ioat_chan->timer);
0163
0164
0165 tasklet_kill(&ioat_chan->cleanup_task);
0166
0167
0168 ioat_cleanup_event(&ioat_chan->cleanup_task);
0169 }
0170
0171 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
0172 {
0173 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
0174 ioat_chan->issued = ioat_chan->head;
0175 writew(ioat_chan->dmacount,
0176 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
0177 dev_dbg(to_dev(ioat_chan),
0178 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
0179 __func__, ioat_chan->head, ioat_chan->tail,
0180 ioat_chan->issued, ioat_chan->dmacount);
0181 }
0182
0183 void ioat_issue_pending(struct dma_chan *c)
0184 {
0185 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
0186
0187 if (ioat_ring_pending(ioat_chan)) {
0188 spin_lock_bh(&ioat_chan->prep_lock);
0189 __ioat_issue_pending(ioat_chan);
0190 spin_unlock_bh(&ioat_chan->prep_lock);
0191 }
0192 }
0193
0194
0195
0196
0197
0198
0199
0200
0201 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
0202 {
0203 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
0204 __ioat_issue_pending(ioat_chan);
0205 }
0206
0207 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
0208 {
0209 struct ioat_ring_ent *desc;
0210 struct ioat_dma_descriptor *hw;
0211
0212 if (ioat_ring_space(ioat_chan) < 1) {
0213 dev_err(to_dev(ioat_chan),
0214 "Unable to start null desc - ring full\n");
0215 return;
0216 }
0217
0218 dev_dbg(to_dev(ioat_chan),
0219 "%s: head: %#x tail: %#x issued: %#x\n",
0220 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
0221 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
0222
0223 hw = desc->hw;
0224 hw->ctl = 0;
0225 hw->ctl_f.null = 1;
0226 hw->ctl_f.int_en = 1;
0227 hw->ctl_f.compl_write = 1;
0228
0229 hw->size = NULL_DESC_BUFFER_SIZE;
0230 hw->src_addr = 0;
0231 hw->dst_addr = 0;
0232 async_tx_ack(&desc->txd);
0233 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
0234 dump_desc_dbg(ioat_chan, desc);
0235
0236 wmb();
0237 ioat_chan->head += 1;
0238 __ioat_issue_pending(ioat_chan);
0239 }
0240
0241 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
0242 {
0243 spin_lock_bh(&ioat_chan->prep_lock);
0244 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
0245 __ioat_start_null_desc(ioat_chan);
0246 spin_unlock_bh(&ioat_chan->prep_lock);
0247 }
0248
0249 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
0250 {
0251
0252 ioat_chan->issued = ioat_chan->tail;
0253 ioat_chan->dmacount = 0;
0254 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
0255
0256 dev_dbg(to_dev(ioat_chan),
0257 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
0258 __func__, ioat_chan->head, ioat_chan->tail,
0259 ioat_chan->issued, ioat_chan->dmacount);
0260
0261 if (ioat_ring_pending(ioat_chan)) {
0262 struct ioat_ring_ent *desc;
0263
0264 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
0265 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
0266 __ioat_issue_pending(ioat_chan);
0267 } else
0268 __ioat_start_null_desc(ioat_chan);
0269 }
0270
0271 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
0272 {
0273 unsigned long end = jiffies + tmo;
0274 int err = 0;
0275 u32 status;
0276
0277 status = ioat_chansts(ioat_chan);
0278 if (is_ioat_active(status) || is_ioat_idle(status))
0279 ioat_suspend(ioat_chan);
0280 while (is_ioat_active(status) || is_ioat_idle(status)) {
0281 if (tmo && time_after(jiffies, end)) {
0282 err = -ETIMEDOUT;
0283 break;
0284 }
0285 status = ioat_chansts(ioat_chan);
0286 cpu_relax();
0287 }
0288
0289 return err;
0290 }
0291
0292 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
0293 {
0294 unsigned long end = jiffies + tmo;
0295 int err = 0;
0296
0297 ioat_reset(ioat_chan);
0298 while (ioat_reset_pending(ioat_chan)) {
0299 if (end && time_after(jiffies, end)) {
0300 err = -ETIMEDOUT;
0301 break;
0302 }
0303 cpu_relax();
0304 }
0305
0306 return err;
0307 }
0308
0309 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
0310 __releases(&ioat_chan->prep_lock)
0311 {
0312 struct dma_chan *c = tx->chan;
0313 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
0314 dma_cookie_t cookie;
0315
0316 cookie = dma_cookie_assign(tx);
0317 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
0318
0319 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
0320 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
0321
0322
0323
0324
0325
0326 wmb();
0327
0328 ioat_chan->head += ioat_chan->produce;
0329
0330 ioat_update_pending(ioat_chan);
0331 spin_unlock_bh(&ioat_chan->prep_lock);
0332
0333 return cookie;
0334 }
0335
0336 static struct ioat_ring_ent *
0337 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
0338 {
0339 struct ioat_dma_descriptor *hw;
0340 struct ioat_ring_ent *desc;
0341 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
0342 int chunk;
0343 dma_addr_t phys;
0344 u8 *pos;
0345 off_t offs;
0346
0347 chunk = idx / IOAT_DESCS_PER_CHUNK;
0348 idx &= (IOAT_DESCS_PER_CHUNK - 1);
0349 offs = idx * IOAT_DESC_SZ;
0350 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
0351 phys = ioat_chan->descs[chunk].hw + offs;
0352 hw = (struct ioat_dma_descriptor *)pos;
0353 memset(hw, 0, sizeof(*hw));
0354
0355 desc = kmem_cache_zalloc(ioat_cache, flags);
0356 if (!desc)
0357 return NULL;
0358
0359 dma_async_tx_descriptor_init(&desc->txd, chan);
0360 desc->txd.tx_submit = ioat_tx_submit_unlock;
0361 desc->hw = hw;
0362 desc->txd.phys = phys;
0363 return desc;
0364 }
0365
0366 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
0367 {
0368 kmem_cache_free(ioat_cache, desc);
0369 }
0370
0371 struct ioat_ring_ent **
0372 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
0373 {
0374 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
0375 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
0376 struct ioat_ring_ent **ring;
0377 int total_descs = 1 << order;
0378 int i, chunks;
0379
0380
0381 ring = kcalloc(total_descs, sizeof(*ring), flags);
0382 if (!ring)
0383 return NULL;
0384
0385 chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
0386 ioat_chan->desc_chunks = chunks;
0387
0388 for (i = 0; i < chunks; i++) {
0389 struct ioat_descs *descs = &ioat_chan->descs[i];
0390
0391 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
0392 IOAT_CHUNK_SIZE, &descs->hw, flags);
0393 if (!descs->virt) {
0394 int idx;
0395
0396 for (idx = 0; idx < i; idx++) {
0397 descs = &ioat_chan->descs[idx];
0398 dma_free_coherent(to_dev(ioat_chan),
0399 IOAT_CHUNK_SIZE,
0400 descs->virt, descs->hw);
0401 descs->virt = NULL;
0402 descs->hw = 0;
0403 }
0404
0405 ioat_chan->desc_chunks = 0;
0406 kfree(ring);
0407 return NULL;
0408 }
0409 }
0410
0411 for (i = 0; i < total_descs; i++) {
0412 ring[i] = ioat_alloc_ring_ent(c, i, flags);
0413 if (!ring[i]) {
0414 int idx;
0415
0416 while (i--)
0417 ioat_free_ring_ent(ring[i], c);
0418
0419 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
0420 dma_free_coherent(to_dev(ioat_chan),
0421 IOAT_CHUNK_SIZE,
0422 ioat_chan->descs[idx].virt,
0423 ioat_chan->descs[idx].hw);
0424 ioat_chan->descs[idx].virt = NULL;
0425 ioat_chan->descs[idx].hw = 0;
0426 }
0427
0428 ioat_chan->desc_chunks = 0;
0429 kfree(ring);
0430 return NULL;
0431 }
0432 set_desc_id(ring[i], i);
0433 }
0434
0435
0436 for (i = 0; i < total_descs-1; i++) {
0437 struct ioat_ring_ent *next = ring[i+1];
0438 struct ioat_dma_descriptor *hw = ring[i]->hw;
0439
0440 hw->next = next->txd.phys;
0441 }
0442 ring[i]->hw->next = ring[0]->txd.phys;
0443
0444
0445 if (ioat_dma->cap & IOAT_CAP_DPS) {
0446 u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
0447
0448 if (chunks == 1)
0449 drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
0450
0451 writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
0452
0453 }
0454
0455 return ring;
0456 }
0457
0458
0459
0460
0461
0462
0463 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
0464 __acquires(&ioat_chan->prep_lock)
0465 {
0466 spin_lock_bh(&ioat_chan->prep_lock);
0467
0468
0469
0470
0471 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
0472 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
0473 __func__, num_descs, ioat_chan->head,
0474 ioat_chan->tail, ioat_chan->issued);
0475 ioat_chan->produce = num_descs;
0476 return 0;
0477 }
0478 spin_unlock_bh(&ioat_chan->prep_lock);
0479
0480 dev_dbg_ratelimited(to_dev(ioat_chan),
0481 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
0482 __func__, num_descs, ioat_chan->head,
0483 ioat_chan->tail, ioat_chan->issued);
0484
0485
0486
0487
0488
0489 if (time_is_before_jiffies(ioat_chan->timer.expires)
0490 && timer_pending(&ioat_chan->timer)) {
0491 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
0492 ioat_timer_event(&ioat_chan->timer);
0493 }
0494
0495 return -ENOMEM;
0496 }
0497
0498 static bool desc_has_ext(struct ioat_ring_ent *desc)
0499 {
0500 struct ioat_dma_descriptor *hw = desc->hw;
0501
0502 if (hw->ctl_f.op == IOAT_OP_XOR ||
0503 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
0504 struct ioat_xor_descriptor *xor = desc->xor;
0505
0506 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
0507 return true;
0508 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
0509 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
0510 struct ioat_pq_descriptor *pq = desc->pq;
0511
0512 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
0513 return true;
0514 }
0515
0516 return false;
0517 }
0518
0519 static void
0520 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
0521 {
0522 if (!sed)
0523 return;
0524
0525 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
0526 kmem_cache_free(ioat_sed_cache, sed);
0527 }
0528
0529 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
0530 {
0531 u64 phys_complete;
0532 u64 completion;
0533
0534 completion = *ioat_chan->completion;
0535 phys_complete = ioat_chansts_to_addr(completion);
0536
0537 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
0538 (unsigned long long) phys_complete);
0539
0540 return phys_complete;
0541 }
0542
0543 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
0544 u64 *phys_complete)
0545 {
0546 *phys_complete = ioat_get_current_completion(ioat_chan);
0547 if (*phys_complete == ioat_chan->last_completion)
0548 return false;
0549
0550 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
0551 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
0552
0553 return true;
0554 }
0555
0556 static void
0557 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
0558 {
0559 struct ioat_dma_descriptor *hw = desc->hw;
0560
0561 switch (hw->ctl_f.op) {
0562 case IOAT_OP_PQ_VAL:
0563 case IOAT_OP_PQ_VAL_16S:
0564 {
0565 struct ioat_pq_descriptor *pq = desc->pq;
0566
0567
0568 if (!pq->dwbes_f.wbes)
0569 return;
0570
0571
0572
0573 if (pq->dwbes_f.p_val_err)
0574 *desc->result |= SUM_CHECK_P_RESULT;
0575
0576 if (pq->dwbes_f.q_val_err)
0577 *desc->result |= SUM_CHECK_Q_RESULT;
0578
0579 return;
0580 }
0581 default:
0582 return;
0583 }
0584 }
0585
0586
0587
0588
0589
0590
0591 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
0592 {
0593 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
0594 struct ioat_ring_ent *desc;
0595 bool seen_current = false;
0596 int idx = ioat_chan->tail, i;
0597 u16 active;
0598
0599 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
0600 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
0601
0602
0603
0604
0605
0606
0607
0608
0609 if (!phys_complete)
0610 return;
0611
0612 active = ioat_ring_active(ioat_chan);
0613 for (i = 0; i < active && !seen_current; i++) {
0614 struct dma_async_tx_descriptor *tx;
0615
0616 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
0617 desc = ioat_get_ring_ent(ioat_chan, idx + i);
0618 dump_desc_dbg(ioat_chan, desc);
0619
0620
0621 if (ioat_dma->cap & IOAT_CAP_DWBES)
0622 desc_get_errstat(ioat_chan, desc);
0623
0624 tx = &desc->txd;
0625 if (tx->cookie) {
0626 dma_cookie_complete(tx);
0627 dma_descriptor_unmap(tx);
0628 dmaengine_desc_get_callback_invoke(tx, NULL);
0629 tx->callback = NULL;
0630 tx->callback_result = NULL;
0631 }
0632
0633 if (tx->phys == phys_complete)
0634 seen_current = true;
0635
0636
0637 if (desc_has_ext(desc)) {
0638 BUG_ON(i + 1 >= active);
0639 i++;
0640 }
0641
0642
0643 if (desc->sed) {
0644 ioat_free_sed(ioat_dma, desc->sed);
0645 desc->sed = NULL;
0646 }
0647 }
0648
0649
0650 smp_mb();
0651 ioat_chan->tail = idx + i;
0652
0653 BUG_ON(active && !seen_current);
0654 ioat_chan->last_completion = phys_complete;
0655
0656 if (active - i == 0) {
0657 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
0658 __func__);
0659 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
0660 }
0661
0662
0663 if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
0664 writew(min((ioat_chan->intr_coalesce * (active - i)),
0665 IOAT_INTRDELAY_MASK),
0666 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
0667 ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
0668 }
0669 }
0670
0671 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
0672 {
0673 u64 phys_complete;
0674
0675 spin_lock_bh(&ioat_chan->cleanup_lock);
0676
0677 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
0678 __cleanup(ioat_chan, phys_complete);
0679
0680 if (is_ioat_halted(*ioat_chan->completion)) {
0681 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
0682
0683 if (chanerr &
0684 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
0685 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
0686 ioat_eh(ioat_chan);
0687 }
0688 }
0689
0690 spin_unlock_bh(&ioat_chan->cleanup_lock);
0691 }
0692
0693 void ioat_cleanup_event(struct tasklet_struct *t)
0694 {
0695 struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task);
0696
0697 ioat_cleanup(ioat_chan);
0698 if (!test_bit(IOAT_RUN, &ioat_chan->state))
0699 return;
0700 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
0701 }
0702
0703 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
0704 {
0705 u64 phys_complete;
0706
0707
0708 writel(lower_32_bits(ioat_chan->completion_dma),
0709 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
0710 writel(upper_32_bits(ioat_chan->completion_dma),
0711 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
0712
0713 ioat_quiesce(ioat_chan, 0);
0714 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
0715 __cleanup(ioat_chan, phys_complete);
0716
0717 __ioat_restart_chan(ioat_chan);
0718 }
0719
0720
0721 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
0722 {
0723 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
0724 struct ioat_ring_ent *desc;
0725 u16 active;
0726 int idx = ioat_chan->tail, i;
0727
0728
0729
0730
0731
0732
0733 active = ioat_ring_active(ioat_chan);
0734
0735
0736 for (i = 1; i < active; i++) {
0737 struct dma_async_tx_descriptor *tx;
0738
0739 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
0740 desc = ioat_get_ring_ent(ioat_chan, idx + i);
0741
0742 tx = &desc->txd;
0743 if (tx->cookie) {
0744 struct dmaengine_result res;
0745
0746 dma_cookie_complete(tx);
0747 dma_descriptor_unmap(tx);
0748 res.result = DMA_TRANS_ABORTED;
0749 dmaengine_desc_get_callback_invoke(tx, &res);
0750 tx->callback = NULL;
0751 tx->callback_result = NULL;
0752 }
0753
0754
0755 if (desc_has_ext(desc)) {
0756 WARN_ON(i + 1 >= active);
0757 i++;
0758 }
0759
0760
0761 if (desc->sed) {
0762 ioat_free_sed(ioat_dma, desc->sed);
0763 desc->sed = NULL;
0764 }
0765 }
0766
0767 smp_mb();
0768 ioat_chan->tail = idx + active;
0769
0770 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
0771 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
0772 }
0773
0774 static void ioat_eh(struct ioatdma_chan *ioat_chan)
0775 {
0776 struct pci_dev *pdev = to_pdev(ioat_chan);
0777 struct ioat_dma_descriptor *hw;
0778 struct dma_async_tx_descriptor *tx;
0779 u64 phys_complete;
0780 struct ioat_ring_ent *desc;
0781 u32 err_handled = 0;
0782 u32 chanerr_int;
0783 u32 chanerr;
0784 bool abort = false;
0785 struct dmaengine_result res;
0786
0787
0788 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
0789 __cleanup(ioat_chan, phys_complete);
0790
0791 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
0792 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
0793
0794 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
0795 __func__, chanerr, chanerr_int);
0796
0797 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
0798 hw = desc->hw;
0799 dump_desc_dbg(ioat_chan, desc);
0800
0801 switch (hw->ctl_f.op) {
0802 case IOAT_OP_XOR_VAL:
0803 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
0804 *desc->result |= SUM_CHECK_P_RESULT;
0805 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
0806 }
0807 break;
0808 case IOAT_OP_PQ_VAL:
0809 case IOAT_OP_PQ_VAL_16S:
0810 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
0811 *desc->result |= SUM_CHECK_P_RESULT;
0812 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
0813 }
0814 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
0815 *desc->result |= SUM_CHECK_Q_RESULT;
0816 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
0817 }
0818 break;
0819 }
0820
0821 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
0822 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
0823 res.result = DMA_TRANS_READ_FAILED;
0824 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
0825 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
0826 res.result = DMA_TRANS_WRITE_FAILED;
0827 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
0828 }
0829
0830 abort = true;
0831 } else
0832 res.result = DMA_TRANS_NOERROR;
0833
0834
0835 if (chanerr ^ err_handled || chanerr == 0) {
0836 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
0837 __func__, chanerr, err_handled);
0838 dev_err(to_dev(ioat_chan), "Errors handled:\n");
0839 ioat_print_chanerrs(ioat_chan, err_handled);
0840 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
0841 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
0842
0843 BUG();
0844 }
0845
0846
0847 tx = &desc->txd;
0848 if (tx->cookie) {
0849 dma_cookie_complete(tx);
0850 dma_descriptor_unmap(tx);
0851 dmaengine_desc_get_callback_invoke(tx, &res);
0852 tx->callback = NULL;
0853 tx->callback_result = NULL;
0854 }
0855
0856
0857 *ioat_chan->completion = desc->txd.phys;
0858
0859 spin_lock_bh(&ioat_chan->prep_lock);
0860
0861 if (abort) {
0862 ioat_abort_descs(ioat_chan);
0863
0864 ioat_reset_hw(ioat_chan);
0865 }
0866
0867 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
0868 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
0869
0870 ioat_restart_channel(ioat_chan);
0871 spin_unlock_bh(&ioat_chan->prep_lock);
0872 }
0873
0874 static void check_active(struct ioatdma_chan *ioat_chan)
0875 {
0876 if (ioat_ring_active(ioat_chan)) {
0877 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
0878 return;
0879 }
0880
0881 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
0882 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
0883 }
0884
0885 static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
0886 {
0887 spin_lock_bh(&ioat_chan->prep_lock);
0888 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
0889 spin_unlock_bh(&ioat_chan->prep_lock);
0890
0891 ioat_abort_descs(ioat_chan);
0892 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
0893 ioat_reset_hw(ioat_chan);
0894 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
0895 ioat_restart_channel(ioat_chan);
0896
0897 spin_lock_bh(&ioat_chan->prep_lock);
0898 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
0899 spin_unlock_bh(&ioat_chan->prep_lock);
0900 }
0901
0902 void ioat_timer_event(struct timer_list *t)
0903 {
0904 struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
0905 dma_addr_t phys_complete;
0906 u64 status;
0907
0908 status = ioat_chansts(ioat_chan);
0909
0910
0911
0912
0913 if (is_ioat_halted(status)) {
0914 u32 chanerr;
0915
0916 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
0917 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
0918 __func__, chanerr);
0919 dev_err(to_dev(ioat_chan), "Errors:\n");
0920 ioat_print_chanerrs(ioat_chan, chanerr);
0921
0922 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
0923 spin_lock_bh(&ioat_chan->cleanup_lock);
0924 ioat_reboot_chan(ioat_chan);
0925 spin_unlock_bh(&ioat_chan->cleanup_lock);
0926 }
0927
0928 return;
0929 }
0930
0931 spin_lock_bh(&ioat_chan->cleanup_lock);
0932
0933
0934 if (!ioat_ring_active(ioat_chan)) {
0935 spin_lock_bh(&ioat_chan->prep_lock);
0936 check_active(ioat_chan);
0937 spin_unlock_bh(&ioat_chan->prep_lock);
0938 goto unlock_out;
0939 }
0940
0941
0942 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
0943
0944
0945
0946 __cleanup(ioat_chan, phys_complete);
0947 goto unlock_out;
0948 }
0949
0950
0951
0952
0953
0954 if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
0955 u32 chanerr;
0956
0957 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
0958 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
0959 status, chanerr);
0960 dev_err(to_dev(ioat_chan), "Errors:\n");
0961 ioat_print_chanerrs(ioat_chan, chanerr);
0962
0963 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
0964 ioat_ring_active(ioat_chan));
0965
0966 ioat_reboot_chan(ioat_chan);
0967
0968 goto unlock_out;
0969 }
0970
0971
0972 if (ioat_ring_pending(ioat_chan)) {
0973 dev_warn(to_dev(ioat_chan),
0974 "Completion timeout with pending descriptors\n");
0975 spin_lock_bh(&ioat_chan->prep_lock);
0976 __ioat_issue_pending(ioat_chan);
0977 spin_unlock_bh(&ioat_chan->prep_lock);
0978 }
0979
0980 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
0981 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
0982 unlock_out:
0983 spin_unlock_bh(&ioat_chan->cleanup_lock);
0984 }
0985
0986 enum dma_status
0987 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
0988 struct dma_tx_state *txstate)
0989 {
0990 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
0991 enum dma_status ret;
0992
0993 ret = dma_cookie_status(c, cookie, txstate);
0994 if (ret == DMA_COMPLETE)
0995 return ret;
0996
0997 ioat_cleanup(ioat_chan);
0998
0999 return dma_cookie_status(c, cookie, txstate);
1000 }
1001
1002 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
1003 {
1004
1005
1006
1007 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
1008 struct pci_dev *pdev = ioat_dma->pdev;
1009 u32 chanerr;
1010 u16 dev_id;
1011 int err;
1012
1013 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
1014
1015 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1016 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1017
1018 if (ioat_dma->version < IOAT_VER_3_3) {
1019
1020 err = pci_read_config_dword(pdev,
1021 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1022 if (err) {
1023 dev_err(&pdev->dev,
1024 "channel error register unreachable\n");
1025 return err;
1026 }
1027 pci_write_config_dword(pdev,
1028 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1029
1030
1031
1032
1033 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1034 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1035 pci_write_config_dword(pdev,
1036 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1037 0x10);
1038 }
1039 }
1040
1041 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1042 ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1043 ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1044 ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1045 }
1046
1047
1048 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1049 if (!err) {
1050 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1051 writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1052 writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1053 writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1054 }
1055 }
1056
1057 if (err)
1058 dev_err(&pdev->dev, "Failed to reset: %d\n", err);
1059
1060 return err;
1061 }