0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/spinlock.h>
0035 #include <linux/netdevice.h>
0036 #include <linux/moduleparam.h>
0037
0038 #include "qib.h"
0039 #include "qib_common.h"
0040
0041
0042 static ushort sdma_descq_cnt = 256;
0043 module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
0044 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
0045
0046
0047
0048
0049 #define SDMA_DESC_LAST (1ULL << 11)
0050 #define SDMA_DESC_FIRST (1ULL << 12)
0051 #define SDMA_DESC_DMA_HEAD (1ULL << 13)
0052 #define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
0053 #define SDMA_DESC_INTR (1ULL << 15)
0054 #define SDMA_DESC_COUNT_LSB 16
0055 #define SDMA_DESC_GEN_LSB 30
0056
0057
0058 static int alloc_sdma(struct qib_pportdata *);
0059 static void sdma_complete(struct kref *);
0060 static void sdma_finalput(struct qib_sdma_state *);
0061 static void sdma_get(struct qib_sdma_state *);
0062 static void sdma_put(struct qib_sdma_state *);
0063 static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
0064 static void sdma_start_sw_clean_up(struct qib_pportdata *);
0065 static void sdma_sw_clean_up_task(struct tasklet_struct *);
0066 static void unmap_desc(struct qib_pportdata *, unsigned);
0067
0068 static void sdma_get(struct qib_sdma_state *ss)
0069 {
0070 kref_get(&ss->kref);
0071 }
0072
0073 static void sdma_complete(struct kref *kref)
0074 {
0075 struct qib_sdma_state *ss =
0076 container_of(kref, struct qib_sdma_state, kref);
0077
0078 complete(&ss->comp);
0079 }
0080
0081 static void sdma_put(struct qib_sdma_state *ss)
0082 {
0083 kref_put(&ss->kref, sdma_complete);
0084 }
0085
0086 static void sdma_finalput(struct qib_sdma_state *ss)
0087 {
0088 sdma_put(ss);
0089 wait_for_completion(&ss->comp);
0090 }
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 static void clear_sdma_activelist(struct qib_pportdata *ppd)
0102 {
0103 struct qib_sdma_txreq *txp, *txp_next;
0104
0105 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
0106 list_del_init(&txp->list);
0107 if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
0108 unsigned idx;
0109
0110 idx = txp->start_idx;
0111 while (idx != txp->next_descq_idx) {
0112 unmap_desc(ppd, idx);
0113 if (++idx == ppd->sdma_descq_cnt)
0114 idx = 0;
0115 }
0116 }
0117 if (txp->callback)
0118 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
0119 }
0120 }
0121
0122 static void sdma_sw_clean_up_task(struct tasklet_struct *t)
0123 {
0124 struct qib_pportdata *ppd = from_tasklet(ppd, t,
0125 sdma_sw_clean_up_task);
0126 unsigned long flags;
0127
0128 spin_lock_irqsave(&ppd->sdma_lock, flags);
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 qib_sdma_make_progress(ppd);
0141
0142 clear_sdma_activelist(ppd);
0143
0144
0145
0146
0147
0148 ppd->sdma_descq_removed = ppd->sdma_descq_added;
0149
0150
0151
0152
0153
0154
0155 ppd->sdma_descq_tail = 0;
0156 ppd->sdma_descq_head = 0;
0157 ppd->sdma_head_dma[0] = 0;
0158 ppd->sdma_generation = 0;
0159
0160 __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
0161
0162 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
0163 }
0164
0165
0166
0167
0168
0169
0170 static void sdma_hw_start_up(struct qib_pportdata *ppd)
0171 {
0172 struct qib_sdma_state *ss = &ppd->sdma_state;
0173 unsigned bufno;
0174
0175 for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
0176 ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
0177
0178 ppd->dd->f_sdma_hw_start_up(ppd);
0179 }
0180
0181 static void sdma_sw_tear_down(struct qib_pportdata *ppd)
0182 {
0183 struct qib_sdma_state *ss = &ppd->sdma_state;
0184
0185
0186 sdma_put(ss);
0187 }
0188
0189 static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
0190 {
0191 tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
0192 }
0193
0194 static void sdma_set_state(struct qib_pportdata *ppd,
0195 enum qib_sdma_states next_state)
0196 {
0197 struct qib_sdma_state *ss = &ppd->sdma_state;
0198 struct sdma_set_state_action *action = ss->set_state_action;
0199 unsigned op = 0;
0200
0201
0202 ss->previous_state = ss->current_state;
0203 ss->previous_op = ss->current_op;
0204
0205 ss->current_state = next_state;
0206
0207 if (action[next_state].op_enable)
0208 op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
0209
0210 if (action[next_state].op_intenable)
0211 op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
0212
0213 if (action[next_state].op_halt)
0214 op |= QIB_SDMA_SENDCTRL_OP_HALT;
0215
0216 if (action[next_state].op_drain)
0217 op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
0218
0219 if (action[next_state].go_s99_running_tofalse)
0220 ss->go_s99_running = 0;
0221
0222 if (action[next_state].go_s99_running_totrue)
0223 ss->go_s99_running = 1;
0224
0225 ss->current_op = op;
0226
0227 ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
0228 }
0229
0230 static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
0231 {
0232 __le64 *descqp = &ppd->sdma_descq[head].qw[0];
0233 u64 desc[2];
0234 dma_addr_t addr;
0235 size_t len;
0236
0237 desc[0] = le64_to_cpu(descqp[0]);
0238 desc[1] = le64_to_cpu(descqp[1]);
0239
0240 addr = (desc[1] << 32) | (desc[0] >> 32);
0241 len = (desc[0] >> 14) & (0x7ffULL << 2);
0242 dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
0243 }
0244
0245 static int alloc_sdma(struct qib_pportdata *ppd)
0246 {
0247 ppd->sdma_descq_cnt = sdma_descq_cnt;
0248 if (!ppd->sdma_descq_cnt)
0249 ppd->sdma_descq_cnt = 256;
0250
0251
0252 ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
0253 ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
0254 GFP_KERNEL);
0255
0256 if (!ppd->sdma_descq) {
0257 qib_dev_err(ppd->dd,
0258 "failed to allocate SendDMA descriptor FIFO memory\n");
0259 goto bail;
0260 }
0261
0262
0263 ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
0264 PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
0265 if (!ppd->sdma_head_dma) {
0266 qib_dev_err(ppd->dd,
0267 "failed to allocate SendDMA head memory\n");
0268 goto cleanup_descq;
0269 }
0270 ppd->sdma_head_dma[0] = 0;
0271 return 0;
0272
0273 cleanup_descq:
0274 dma_free_coherent(&ppd->dd->pcidev->dev,
0275 ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
0276 ppd->sdma_descq_phys);
0277 ppd->sdma_descq = NULL;
0278 ppd->sdma_descq_phys = 0;
0279 bail:
0280 ppd->sdma_descq_cnt = 0;
0281 return -ENOMEM;
0282 }
0283
0284 static void free_sdma(struct qib_pportdata *ppd)
0285 {
0286 struct qib_devdata *dd = ppd->dd;
0287
0288 if (ppd->sdma_head_dma) {
0289 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
0290 (void *)ppd->sdma_head_dma,
0291 ppd->sdma_head_phys);
0292 ppd->sdma_head_dma = NULL;
0293 ppd->sdma_head_phys = 0;
0294 }
0295
0296 if (ppd->sdma_descq) {
0297 dma_free_coherent(&dd->pcidev->dev,
0298 ppd->sdma_descq_cnt * sizeof(u64[2]),
0299 ppd->sdma_descq, ppd->sdma_descq_phys);
0300 ppd->sdma_descq = NULL;
0301 ppd->sdma_descq_phys = 0;
0302 }
0303 }
0304
0305 static inline void make_sdma_desc(struct qib_pportdata *ppd,
0306 u64 *sdmadesc, u64 addr, u64 dwlen,
0307 u64 dwoffset)
0308 {
0309
0310 WARN_ON(addr & 3);
0311
0312 sdmadesc[1] = addr >> 32;
0313
0314 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
0315
0316 sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
0317 SDMA_DESC_GEN_LSB;
0318
0319 sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
0320
0321 sdmadesc[0] |= dwoffset & 0x7ffULL;
0322 }
0323
0324
0325 int qib_sdma_make_progress(struct qib_pportdata *ppd)
0326 {
0327 struct list_head *lp = NULL;
0328 struct qib_sdma_txreq *txp = NULL;
0329 struct qib_devdata *dd = ppd->dd;
0330 int progress = 0;
0331 u16 hwhead;
0332 u16 idx = 0;
0333
0334 hwhead = dd->f_sdma_gethead(ppd);
0335
0336
0337
0338
0339
0340
0341
0342 if (!list_empty(&ppd->sdma_activelist)) {
0343 lp = ppd->sdma_activelist.next;
0344 txp = list_entry(lp, struct qib_sdma_txreq, list);
0345 idx = txp->start_idx;
0346 }
0347
0348 while (ppd->sdma_descq_head != hwhead) {
0349
0350 if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
0351 (idx == ppd->sdma_descq_head)) {
0352 unmap_desc(ppd, ppd->sdma_descq_head);
0353 if (++idx == ppd->sdma_descq_cnt)
0354 idx = 0;
0355 }
0356
0357
0358 ppd->sdma_descq_removed++;
0359
0360
0361 if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
0362 ppd->sdma_descq_head = 0;
0363
0364
0365 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
0366
0367 list_del_init(&txp->list);
0368 if (txp->callback)
0369 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
0370
0371 if (list_empty(&ppd->sdma_activelist))
0372 txp = NULL;
0373 else {
0374 lp = ppd->sdma_activelist.next;
0375 txp = list_entry(lp, struct qib_sdma_txreq,
0376 list);
0377 idx = txp->start_idx;
0378 }
0379 }
0380 progress = 1;
0381 }
0382 if (progress)
0383 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
0384 return progress;
0385 }
0386
0387
0388
0389
0390 void qib_sdma_intr(struct qib_pportdata *ppd)
0391 {
0392 unsigned long flags;
0393
0394 spin_lock_irqsave(&ppd->sdma_lock, flags);
0395
0396 __qib_sdma_intr(ppd);
0397
0398 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
0399 }
0400
0401 void __qib_sdma_intr(struct qib_pportdata *ppd)
0402 {
0403 if (__qib_sdma_running(ppd)) {
0404 qib_sdma_make_progress(ppd);
0405 if (!list_empty(&ppd->sdma_userpending))
0406 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
0407 }
0408 }
0409
0410 int qib_setup_sdma(struct qib_pportdata *ppd)
0411 {
0412 struct qib_devdata *dd = ppd->dd;
0413 unsigned long flags;
0414 int ret = 0;
0415
0416 ret = alloc_sdma(ppd);
0417 if (ret)
0418 goto bail;
0419
0420
0421 ppd->dd->f_sdma_init_early(ppd);
0422 spin_lock_irqsave(&ppd->sdma_lock, flags);
0423 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
0424 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
0425
0426
0427 kref_init(&ppd->sdma_state.kref);
0428 init_completion(&ppd->sdma_state.comp);
0429
0430 ppd->sdma_generation = 0;
0431 ppd->sdma_descq_head = 0;
0432 ppd->sdma_descq_removed = 0;
0433 ppd->sdma_descq_added = 0;
0434
0435 ppd->sdma_intrequest = 0;
0436 INIT_LIST_HEAD(&ppd->sdma_userpending);
0437
0438 INIT_LIST_HEAD(&ppd->sdma_activelist);
0439
0440 tasklet_setup(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task);
0441
0442 ret = dd->f_init_sdma_regs(ppd);
0443 if (ret)
0444 goto bail_alloc;
0445
0446 qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
0447
0448 return 0;
0449
0450 bail_alloc:
0451 qib_teardown_sdma(ppd);
0452 bail:
0453 return ret;
0454 }
0455
0456 void qib_teardown_sdma(struct qib_pportdata *ppd)
0457 {
0458 qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
0459
0460
0461
0462
0463
0464
0465 sdma_finalput(&ppd->sdma_state);
0466
0467 free_sdma(ppd);
0468 }
0469
0470 int qib_sdma_running(struct qib_pportdata *ppd)
0471 {
0472 unsigned long flags;
0473 int ret;
0474
0475 spin_lock_irqsave(&ppd->sdma_lock, flags);
0476 ret = __qib_sdma_running(ppd);
0477 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
0478
0479 return ret;
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489 static void complete_sdma_err_req(struct qib_pportdata *ppd,
0490 struct qib_verbs_txreq *tx)
0491 {
0492 struct qib_qp_priv *priv = tx->qp->priv;
0493
0494 atomic_inc(&priv->s_dma_busy);
0495
0496 tx->txreq.start_idx = 0;
0497 tx->txreq.next_descq_idx = 0;
0498 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
0499 clear_sdma_activelist(ppd);
0500 }
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511 int qib_sdma_verbs_send(struct qib_pportdata *ppd,
0512 struct rvt_sge_state *ss, u32 dwords,
0513 struct qib_verbs_txreq *tx)
0514 {
0515 unsigned long flags;
0516 struct rvt_sge *sge;
0517 struct rvt_qp *qp;
0518 int ret = 0;
0519 u16 tail;
0520 __le64 *descqp;
0521 u64 sdmadesc[2];
0522 u32 dwoffset;
0523 dma_addr_t addr;
0524 struct qib_qp_priv *priv;
0525
0526 spin_lock_irqsave(&ppd->sdma_lock, flags);
0527
0528 retry:
0529 if (unlikely(!__qib_sdma_running(ppd))) {
0530 complete_sdma_err_req(ppd, tx);
0531 goto unlock;
0532 }
0533
0534 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
0535 if (qib_sdma_make_progress(ppd))
0536 goto retry;
0537 if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
0538 ppd->dd->f_sdma_set_desc_cnt(ppd,
0539 ppd->sdma_descq_cnt / 2);
0540 goto busy;
0541 }
0542
0543 dwoffset = tx->hdr_dwords;
0544 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
0545
0546 sdmadesc[0] |= SDMA_DESC_FIRST;
0547 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
0548 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
0549
0550
0551 tail = ppd->sdma_descq_tail;
0552 descqp = &ppd->sdma_descq[tail].qw[0];
0553 *descqp++ = cpu_to_le64(sdmadesc[0]);
0554 *descqp++ = cpu_to_le64(sdmadesc[1]);
0555
0556
0557 if (++tail == ppd->sdma_descq_cnt) {
0558 tail = 0;
0559 descqp = &ppd->sdma_descq[0].qw[0];
0560 ++ppd->sdma_generation;
0561 }
0562
0563 tx->txreq.start_idx = tail;
0564
0565 sge = &ss->sge;
0566 while (dwords) {
0567 u32 dw;
0568 u32 len = rvt_get_sge_length(sge, dwords << 2);
0569
0570 dw = (len + 3) >> 2;
0571 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
0572 dw << 2, DMA_TO_DEVICE);
0573 if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
0574 ret = -ENOMEM;
0575 goto unmap;
0576 }
0577 sdmadesc[0] = 0;
0578 make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
0579
0580 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
0581 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
0582
0583 *descqp++ = cpu_to_le64(sdmadesc[0]);
0584 *descqp++ = cpu_to_le64(sdmadesc[1]);
0585
0586
0587 if (++tail == ppd->sdma_descq_cnt) {
0588 tail = 0;
0589 descqp = &ppd->sdma_descq[0].qw[0];
0590 ++ppd->sdma_generation;
0591 }
0592 rvt_update_sge(ss, len, false);
0593 dwoffset += dw;
0594 dwords -= dw;
0595 }
0596
0597 if (!tail)
0598 descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
0599 descqp -= 2;
0600 descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
0601 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
0602 descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
0603 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
0604 descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
0605 priv = tx->qp->priv;
0606 atomic_inc(&priv->s_dma_busy);
0607 tx->txreq.next_descq_idx = tail;
0608 ppd->dd->f_sdma_update_tail(ppd, tail);
0609 ppd->sdma_descq_added += tx->txreq.sg_count;
0610 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
0611 goto unlock;
0612
0613 unmap:
0614 for (;;) {
0615 if (!tail)
0616 tail = ppd->sdma_descq_cnt - 1;
0617 else
0618 tail--;
0619 if (tail == ppd->sdma_descq_tail)
0620 break;
0621 unmap_desc(ppd, tail);
0622 }
0623 qp = tx->qp;
0624 priv = qp->priv;
0625 qib_put_txreq(tx);
0626 spin_lock(&qp->r_lock);
0627 spin_lock(&qp->s_lock);
0628 if (qp->ibqp.qp_type == IB_QPT_RC) {
0629
0630 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
0631 rvt_error_qp(qp, IB_WC_GENERAL_ERR);
0632 } else if (qp->s_wqe)
0633 rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
0634 spin_unlock(&qp->s_lock);
0635 spin_unlock(&qp->r_lock);
0636
0637 goto unlock;
0638
0639 busy:
0640 qp = tx->qp;
0641 priv = qp->priv;
0642 spin_lock(&qp->s_lock);
0643 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
0644 struct qib_ibdev *dev;
0645
0646
0647
0648
0649
0650
0651 tx->ss = ss;
0652 tx->dwords = dwords;
0653 priv->s_tx = tx;
0654 dev = &ppd->dd->verbs_dev;
0655 spin_lock(&dev->rdi.pending_lock);
0656 if (list_empty(&priv->iowait)) {
0657 struct qib_ibport *ibp;
0658
0659 ibp = &ppd->ibport_data;
0660 ibp->rvp.n_dmawait++;
0661 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
0662 list_add_tail(&priv->iowait, &dev->dmawait);
0663 }
0664 spin_unlock(&dev->rdi.pending_lock);
0665 qp->s_flags &= ~RVT_S_BUSY;
0666 spin_unlock(&qp->s_lock);
0667 ret = -EBUSY;
0668 } else {
0669 spin_unlock(&qp->s_lock);
0670 qib_put_txreq(tx);
0671 }
0672 unlock:
0673 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
0674 return ret;
0675 }
0676
0677
0678
0679
0680 void dump_sdma_state(struct qib_pportdata *ppd)
0681 {
0682 struct qib_sdma_desc *descq;
0683 struct qib_sdma_txreq *txp, *txpnext;
0684 __le64 *descqp;
0685 u64 desc[2];
0686 u64 addr;
0687 u16 gen, dwlen, dwoffset;
0688 u16 head, tail, cnt;
0689
0690 head = ppd->sdma_descq_head;
0691 tail = ppd->sdma_descq_tail;
0692 cnt = qib_sdma_descq_freecnt(ppd);
0693 descq = ppd->sdma_descq;
0694
0695 qib_dev_porterr(ppd->dd, ppd->port,
0696 "SDMA ppd->sdma_descq_head: %u\n", head);
0697 qib_dev_porterr(ppd->dd, ppd->port,
0698 "SDMA ppd->sdma_descq_tail: %u\n", tail);
0699 qib_dev_porterr(ppd->dd, ppd->port,
0700 "SDMA sdma_descq_freecnt: %u\n", cnt);
0701
0702
0703 while (head != tail) {
0704 char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
0705
0706 descqp = &descq[head].qw[0];
0707 desc[0] = le64_to_cpu(descqp[0]);
0708 desc[1] = le64_to_cpu(descqp[1]);
0709 flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
0710 flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
0711 flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
0712 flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
0713 flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
0714 addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
0715 gen = (desc[0] >> 30) & 3ULL;
0716 dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
0717 dwoffset = (desc[0] & 0x7ffULL) << 2;
0718 qib_dev_porterr(ppd->dd, ppd->port,
0719 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
0720 head, flags, addr, gen, dwlen, dwoffset);
0721 if (++head == ppd->sdma_descq_cnt)
0722 head = 0;
0723 }
0724
0725
0726 list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
0727 list)
0728 qib_dev_porterr(ppd->dd, ppd->port,
0729 "SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
0730 txp->start_idx, txp->next_descq_idx);
0731 }
0732
0733 void qib_sdma_process_event(struct qib_pportdata *ppd,
0734 enum qib_sdma_events event)
0735 {
0736 unsigned long flags;
0737
0738 spin_lock_irqsave(&ppd->sdma_lock, flags);
0739
0740 __qib_sdma_process_event(ppd, event);
0741
0742 if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
0743 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
0744
0745 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
0746 }
0747
0748 void __qib_sdma_process_event(struct qib_pportdata *ppd,
0749 enum qib_sdma_events event)
0750 {
0751 struct qib_sdma_state *ss = &ppd->sdma_state;
0752
0753 switch (ss->current_state) {
0754 case qib_sdma_state_s00_hw_down:
0755 switch (event) {
0756 case qib_sdma_event_e00_go_hw_down:
0757 break;
0758 case qib_sdma_event_e30_go_running:
0759
0760
0761
0762
0763
0764
0765 ss->go_s99_running = 1;
0766 fallthrough;
0767 case qib_sdma_event_e10_go_hw_start:
0768
0769 sdma_get(&ppd->sdma_state);
0770 sdma_set_state(ppd,
0771 qib_sdma_state_s10_hw_start_up_wait);
0772 break;
0773 case qib_sdma_event_e20_hw_started:
0774 break;
0775 case qib_sdma_event_e40_sw_cleaned:
0776 sdma_sw_tear_down(ppd);
0777 break;
0778 case qib_sdma_event_e50_hw_cleaned:
0779 break;
0780 case qib_sdma_event_e60_hw_halted:
0781 break;
0782 case qib_sdma_event_e70_go_idle:
0783 break;
0784 case qib_sdma_event_e7220_err_halted:
0785 break;
0786 case qib_sdma_event_e7322_err_halted:
0787 break;
0788 case qib_sdma_event_e90_timer_tick:
0789 break;
0790 }
0791 break;
0792
0793 case qib_sdma_state_s10_hw_start_up_wait:
0794 switch (event) {
0795 case qib_sdma_event_e00_go_hw_down:
0796 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
0797 sdma_sw_tear_down(ppd);
0798 break;
0799 case qib_sdma_event_e10_go_hw_start:
0800 break;
0801 case qib_sdma_event_e20_hw_started:
0802 sdma_set_state(ppd, ss->go_s99_running ?
0803 qib_sdma_state_s99_running :
0804 qib_sdma_state_s20_idle);
0805 break;
0806 case qib_sdma_event_e30_go_running:
0807 ss->go_s99_running = 1;
0808 break;
0809 case qib_sdma_event_e40_sw_cleaned:
0810 break;
0811 case qib_sdma_event_e50_hw_cleaned:
0812 break;
0813 case qib_sdma_event_e60_hw_halted:
0814 break;
0815 case qib_sdma_event_e70_go_idle:
0816 ss->go_s99_running = 0;
0817 break;
0818 case qib_sdma_event_e7220_err_halted:
0819 break;
0820 case qib_sdma_event_e7322_err_halted:
0821 break;
0822 case qib_sdma_event_e90_timer_tick:
0823 break;
0824 }
0825 break;
0826
0827 case qib_sdma_state_s20_idle:
0828 switch (event) {
0829 case qib_sdma_event_e00_go_hw_down:
0830 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
0831 sdma_sw_tear_down(ppd);
0832 break;
0833 case qib_sdma_event_e10_go_hw_start:
0834 break;
0835 case qib_sdma_event_e20_hw_started:
0836 break;
0837 case qib_sdma_event_e30_go_running:
0838 sdma_set_state(ppd, qib_sdma_state_s99_running);
0839 ss->go_s99_running = 1;
0840 break;
0841 case qib_sdma_event_e40_sw_cleaned:
0842 break;
0843 case qib_sdma_event_e50_hw_cleaned:
0844 break;
0845 case qib_sdma_event_e60_hw_halted:
0846 break;
0847 case qib_sdma_event_e70_go_idle:
0848 break;
0849 case qib_sdma_event_e7220_err_halted:
0850 break;
0851 case qib_sdma_event_e7322_err_halted:
0852 break;
0853 case qib_sdma_event_e90_timer_tick:
0854 break;
0855 }
0856 break;
0857
0858 case qib_sdma_state_s30_sw_clean_up_wait:
0859 switch (event) {
0860 case qib_sdma_event_e00_go_hw_down:
0861 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
0862 break;
0863 case qib_sdma_event_e10_go_hw_start:
0864 break;
0865 case qib_sdma_event_e20_hw_started:
0866 break;
0867 case qib_sdma_event_e30_go_running:
0868 ss->go_s99_running = 1;
0869 break;
0870 case qib_sdma_event_e40_sw_cleaned:
0871 sdma_set_state(ppd,
0872 qib_sdma_state_s10_hw_start_up_wait);
0873 sdma_hw_start_up(ppd);
0874 break;
0875 case qib_sdma_event_e50_hw_cleaned:
0876 break;
0877 case qib_sdma_event_e60_hw_halted:
0878 break;
0879 case qib_sdma_event_e70_go_idle:
0880 ss->go_s99_running = 0;
0881 break;
0882 case qib_sdma_event_e7220_err_halted:
0883 break;
0884 case qib_sdma_event_e7322_err_halted:
0885 break;
0886 case qib_sdma_event_e90_timer_tick:
0887 break;
0888 }
0889 break;
0890
0891 case qib_sdma_state_s40_hw_clean_up_wait:
0892 switch (event) {
0893 case qib_sdma_event_e00_go_hw_down:
0894 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
0895 sdma_start_sw_clean_up(ppd);
0896 break;
0897 case qib_sdma_event_e10_go_hw_start:
0898 break;
0899 case qib_sdma_event_e20_hw_started:
0900 break;
0901 case qib_sdma_event_e30_go_running:
0902 ss->go_s99_running = 1;
0903 break;
0904 case qib_sdma_event_e40_sw_cleaned:
0905 break;
0906 case qib_sdma_event_e50_hw_cleaned:
0907 sdma_set_state(ppd,
0908 qib_sdma_state_s30_sw_clean_up_wait);
0909 sdma_start_sw_clean_up(ppd);
0910 break;
0911 case qib_sdma_event_e60_hw_halted:
0912 break;
0913 case qib_sdma_event_e70_go_idle:
0914 ss->go_s99_running = 0;
0915 break;
0916 case qib_sdma_event_e7220_err_halted:
0917 break;
0918 case qib_sdma_event_e7322_err_halted:
0919 break;
0920 case qib_sdma_event_e90_timer_tick:
0921 break;
0922 }
0923 break;
0924
0925 case qib_sdma_state_s50_hw_halt_wait:
0926 switch (event) {
0927 case qib_sdma_event_e00_go_hw_down:
0928 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
0929 sdma_start_sw_clean_up(ppd);
0930 break;
0931 case qib_sdma_event_e10_go_hw_start:
0932 break;
0933 case qib_sdma_event_e20_hw_started:
0934 break;
0935 case qib_sdma_event_e30_go_running:
0936 ss->go_s99_running = 1;
0937 break;
0938 case qib_sdma_event_e40_sw_cleaned:
0939 break;
0940 case qib_sdma_event_e50_hw_cleaned:
0941 break;
0942 case qib_sdma_event_e60_hw_halted:
0943 sdma_set_state(ppd,
0944 qib_sdma_state_s40_hw_clean_up_wait);
0945 ppd->dd->f_sdma_hw_clean_up(ppd);
0946 break;
0947 case qib_sdma_event_e70_go_idle:
0948 ss->go_s99_running = 0;
0949 break;
0950 case qib_sdma_event_e7220_err_halted:
0951 break;
0952 case qib_sdma_event_e7322_err_halted:
0953 break;
0954 case qib_sdma_event_e90_timer_tick:
0955 break;
0956 }
0957 break;
0958
0959 case qib_sdma_state_s99_running:
0960 switch (event) {
0961 case qib_sdma_event_e00_go_hw_down:
0962 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
0963 sdma_start_sw_clean_up(ppd);
0964 break;
0965 case qib_sdma_event_e10_go_hw_start:
0966 break;
0967 case qib_sdma_event_e20_hw_started:
0968 break;
0969 case qib_sdma_event_e30_go_running:
0970 break;
0971 case qib_sdma_event_e40_sw_cleaned:
0972 break;
0973 case qib_sdma_event_e50_hw_cleaned:
0974 break;
0975 case qib_sdma_event_e60_hw_halted:
0976 sdma_set_state(ppd,
0977 qib_sdma_state_s30_sw_clean_up_wait);
0978 sdma_start_sw_clean_up(ppd);
0979 break;
0980 case qib_sdma_event_e70_go_idle:
0981 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
0982 ss->go_s99_running = 0;
0983 break;
0984 case qib_sdma_event_e7220_err_halted:
0985 sdma_set_state(ppd,
0986 qib_sdma_state_s30_sw_clean_up_wait);
0987 sdma_start_sw_clean_up(ppd);
0988 break;
0989 case qib_sdma_event_e7322_err_halted:
0990 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
0991 break;
0992 case qib_sdma_event_e90_timer_tick:
0993 break;
0994 }
0995 break;
0996 }
0997
0998 ss->last_event = event;
0999 }