0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/spinlock.h>
0034 #include <linux/pci.h>
0035 #include <linux/io.h>
0036 #include <linux/delay.h>
0037 #include <linux/netdevice.h>
0038 #include <linux/vmalloc.h>
0039 #include <linux/moduleparam.h>
0040
0041 #include "qib.h"
0042
0043 static unsigned qib_hol_timeout_ms = 3000;
0044 module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
0045 MODULE_PARM_DESC(hol_timeout_ms,
0046 "duration of user app suspension after link failure");
0047
0048 unsigned qib_sdma_fetch_arb = 1;
0049 module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
0050 MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
0062 {
0063 unsigned long flags;
0064 unsigned i;
0065 unsigned last;
0066
0067 last = first + cnt;
0068 spin_lock_irqsave(&dd->pioavail_lock, flags);
0069 for (i = first; i < last; i++) {
0070 __clear_bit(i, dd->pio_need_disarm);
0071 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
0072 }
0073 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
0074 }
0075
0076
0077
0078
0079
0080 int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
0081 {
0082 struct qib_devdata *dd = rcd->dd;
0083 unsigned i;
0084 unsigned last;
0085 unsigned n = 0;
0086
0087 last = rcd->pio_base + rcd->piocnt;
0088
0089
0090
0091
0092
0093 if (rcd->user_event_mask) {
0094
0095
0096
0097
0098 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
0099 for (i = 1; i < rcd->subctxt_cnt; i++)
0100 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
0101 &rcd->user_event_mask[i]);
0102 }
0103 spin_lock_irq(&dd->pioavail_lock);
0104 for (i = rcd->pio_base; i < last; i++) {
0105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
0106 n++;
0107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
0108 }
0109 }
0110 spin_unlock_irq(&dd->pioavail_lock);
0111 return 0;
0112 }
0113
0114 static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
0115 {
0116 struct qib_pportdata *ppd;
0117 unsigned pidx;
0118
0119 for (pidx = 0; pidx < dd->num_pports; pidx++) {
0120 ppd = dd->pport + pidx;
0121 if (i >= ppd->sdma_state.first_sendbuf &&
0122 i < ppd->sdma_state.last_sendbuf)
0123 return ppd;
0124 }
0125 return NULL;
0126 }
0127
0128
0129
0130
0131
0132 static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
0133 {
0134 struct qib_ctxtdata *rcd;
0135 unsigned ctxt;
0136 int ret = 0;
0137
0138 spin_lock(&dd->uctxt_lock);
0139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
0140 rcd = dd->rcd[ctxt];
0141 if (!rcd || bufn < rcd->pio_base ||
0142 bufn >= rcd->pio_base + rcd->piocnt)
0143 continue;
0144 if (rcd->user_event_mask) {
0145 int i;
0146
0147
0148
0149
0150 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
0151 &rcd->user_event_mask[0]);
0152 for (i = 1; i < rcd->subctxt_cnt; i++)
0153 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
0154 &rcd->user_event_mask[i]);
0155 }
0156 ret = 1;
0157 break;
0158 }
0159 spin_unlock(&dd->uctxt_lock);
0160
0161 return ret;
0162 }
0163
0164
0165
0166
0167
0168
0169
0170
0171 void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
0172 unsigned cnt)
0173 {
0174 struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
0175 unsigned i;
0176 unsigned long flags;
0177
0178 for (i = 0; i < dd->num_pports; i++)
0179 pppd[i] = NULL;
0180
0181 for (i = 0; i < cnt; i++) {
0182 if (!test_bit(i, mask))
0183 continue;
0184
0185
0186
0187
0188 ppd = is_sdma_buf(dd, i);
0189 if (ppd) {
0190 pppd[ppd->port] = ppd;
0191 continue;
0192 }
0193
0194
0195
0196
0197 spin_lock_irqsave(&dd->pioavail_lock, flags);
0198 if (test_bit(i, dd->pio_writing) ||
0199 (!test_bit(i << 1, dd->pioavailkernel) &&
0200 find_ctxt(dd, i))) {
0201 __set_bit(i, dd->pio_need_disarm);
0202 } else {
0203 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
0204 }
0205 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
0206 }
0207
0208
0209 for (i = 0; i < dd->num_pports; i++)
0210 if (pppd[i])
0211 qib_cancel_sends(pppd[i]);
0212 }
0213
0214
0215
0216
0217
0218
0219
0220 static void update_send_bufs(struct qib_devdata *dd)
0221 {
0222 unsigned long flags;
0223 unsigned i;
0224 const unsigned piobregs = dd->pioavregs;
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 if (!dd->pioavailregs_dma)
0245 return;
0246 spin_lock_irqsave(&dd->pioavail_lock, flags);
0247 for (i = 0; i < piobregs; i++) {
0248 u64 pchbusy, pchg, piov, pnew;
0249
0250 piov = le64_to_cpu(dd->pioavailregs_dma[i]);
0251 pchg = dd->pioavailkernel[i] &
0252 ~(dd->pioavailshadow[i] ^ piov);
0253 pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
0254 if (pchg && (pchbusy & dd->pioavailshadow[i])) {
0255 pnew = dd->pioavailshadow[i] & ~pchbusy;
0256 pnew |= piov & pchbusy;
0257 dd->pioavailshadow[i] = pnew;
0258 }
0259 }
0260 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
0261 }
0262
0263
0264
0265
0266 static noinline void no_send_bufs(struct qib_devdata *dd)
0267 {
0268 dd->upd_pio_shadow = 1;
0269
0270
0271 qib_stats.sps_nopiobufs++;
0272 }
0273
0274
0275
0276
0277
0278
0279
0280
0281 u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
0282 u32 first, u32 last)
0283 {
0284 unsigned i, j, updated = 0;
0285 unsigned nbufs;
0286 unsigned long flags;
0287 unsigned long *shadow = dd->pioavailshadow;
0288 u32 __iomem *buf;
0289
0290 if (!(dd->flags & QIB_PRESENT))
0291 return NULL;
0292
0293 nbufs = last - first + 1;
0294 if (dd->upd_pio_shadow) {
0295 update_shadow:
0296
0297
0298
0299
0300
0301 update_send_bufs(dd);
0302 updated++;
0303 }
0304 i = first;
0305
0306
0307
0308
0309
0310 spin_lock_irqsave(&dd->pioavail_lock, flags);
0311 if (dd->last_pio >= first && dd->last_pio <= last)
0312 i = dd->last_pio + 1;
0313 if (!first)
0314
0315 nbufs = last - dd->min_kernel_pio + 1;
0316 for (j = 0; j < nbufs; j++, i++) {
0317 if (i > last)
0318 i = !first ? dd->min_kernel_pio : first;
0319 if (__test_and_set_bit((2 * i) + 1, shadow))
0320 continue;
0321
0322 __change_bit(2 * i, shadow);
0323
0324 __set_bit(i, dd->pio_writing);
0325 if (!first && first != last)
0326 dd->last_pio = i;
0327 break;
0328 }
0329 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
0330
0331 if (j == nbufs) {
0332 if (!updated)
0333
0334
0335
0336
0337 goto update_shadow;
0338 no_send_bufs(dd);
0339 buf = NULL;
0340 } else {
0341 if (i < dd->piobcnt2k)
0342 buf = (u32 __iomem *)(dd->pio2kbase +
0343 i * dd->palign);
0344 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
0345 buf = (u32 __iomem *)(dd->pio4kbase +
0346 (i - dd->piobcnt2k) * dd->align4k);
0347 else
0348 buf = (u32 __iomem *)(dd->piovl15base +
0349 (i - (dd->piobcnt2k + dd->piobcnt4k)) *
0350 dd->align4k);
0351 if (pbufnum)
0352 *pbufnum = i;
0353 dd->upd_pio_shadow = 0;
0354 }
0355
0356 return buf;
0357 }
0358
0359
0360
0361
0362
0363 void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
0364 {
0365 unsigned long flags;
0366
0367 spin_lock_irqsave(&dd->pioavail_lock, flags);
0368 __clear_bit(n, dd->pio_writing);
0369 if (__test_and_clear_bit(n, dd->pio_need_disarm))
0370 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
0371 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
0372 }
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382 void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
0383 unsigned len, u32 avail, struct qib_ctxtdata *rcd)
0384 {
0385 unsigned long flags;
0386 unsigned end;
0387 unsigned ostart = start;
0388
0389
0390 start *= 2;
0391 end = start + len * 2;
0392
0393 spin_lock_irqsave(&dd->pioavail_lock, flags);
0394
0395 while (start < end) {
0396 if (avail) {
0397 unsigned long dma;
0398 int i;
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 i = start / BITS_PER_LONG;
0414 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
0415 dd->pioavailshadow);
0416 dma = (unsigned long)
0417 le64_to_cpu(dd->pioavailregs_dma[i]);
0418 if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
0419 start) % BITS_PER_LONG, &dma))
0420 __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
0421 start, dd->pioavailshadow);
0422 else
0423 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
0424 + start, dd->pioavailshadow);
0425 __set_bit(start, dd->pioavailkernel);
0426 if ((start >> 1) < dd->min_kernel_pio)
0427 dd->min_kernel_pio = start >> 1;
0428 } else {
0429 __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
0430 dd->pioavailshadow);
0431 __clear_bit(start, dd->pioavailkernel);
0432 if ((start >> 1) > dd->min_kernel_pio)
0433 dd->min_kernel_pio = start >> 1;
0434 }
0435 start += 2;
0436 }
0437
0438 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
0439 dd->last_pio = dd->min_kernel_pio - 1;
0440 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
0441
0442 dd->f_txchk_change(dd, ostart, len, avail, rcd);
0443 }
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454 void qib_cancel_sends(struct qib_pportdata *ppd)
0455 {
0456 struct qib_devdata *dd = ppd->dd;
0457 struct qib_ctxtdata *rcd;
0458 unsigned long flags;
0459 unsigned ctxt;
0460 unsigned i;
0461 unsigned last;
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
0472 spin_lock_irqsave(&dd->uctxt_lock, flags);
0473 rcd = dd->rcd[ctxt];
0474 if (rcd && rcd->ppd == ppd) {
0475 last = rcd->pio_base + rcd->piocnt;
0476 if (rcd->user_event_mask) {
0477
0478
0479
0480
0481
0482 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
0483 &rcd->user_event_mask[0]);
0484 for (i = 1; i < rcd->subctxt_cnt; i++)
0485 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
0486 &rcd->user_event_mask[i]);
0487 }
0488 i = rcd->pio_base;
0489 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
0490 spin_lock_irqsave(&dd->pioavail_lock, flags);
0491 for (; i < last; i++)
0492 __set_bit(i, dd->pio_need_disarm);
0493 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
0494 } else
0495 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
0496 }
0497
0498 if (!(dd->flags & QIB_HAS_SEND_DMA))
0499 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
0500 QIB_SENDCTRL_FLUSH);
0501 }
0502
0503
0504
0505
0506
0507
0508
0509
0510 void qib_force_pio_avail_update(struct qib_devdata *dd)
0511 {
0512 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
0513 }
0514
0515 void qib_hol_down(struct qib_pportdata *ppd)
0516 {
0517
0518
0519
0520
0521 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
0522 qib_cancel_sends(ppd);
0523 }
0524
0525
0526
0527
0528
0529
0530 void qib_hol_init(struct qib_pportdata *ppd)
0531 {
0532 if (ppd->hol_state != QIB_HOL_INIT) {
0533 ppd->hol_state = QIB_HOL_INIT;
0534 mod_timer(&ppd->hol_timer,
0535 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
0536 }
0537 }
0538
0539
0540
0541
0542
0543
0544 void qib_hol_up(struct qib_pportdata *ppd)
0545 {
0546 ppd->hol_state = QIB_HOL_UP;
0547 }
0548
0549
0550
0551
0552 void qib_hol_event(struct timer_list *t)
0553 {
0554 struct qib_pportdata *ppd = from_timer(ppd, t, hol_timer);
0555
0556
0557 if (!(ppd->dd->flags & QIB_INITTED))
0558 return;
0559
0560 if (ppd->hol_state != QIB_HOL_UP) {
0561
0562
0563
0564
0565 qib_hol_down(ppd);
0566 mod_timer(&ppd->hol_timer,
0567 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
0568 }
0569 }