0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #include <linux/spinlock.h>
0037 #include <linux/pci.h>
0038 #include <linux/io.h>
0039 #include <linux/delay.h>
0040 #include <linux/netdevice.h>
0041 #include <linux/vmalloc.h>
0042 #include <linux/module.h>
0043 #include <linux/prefetch.h>
0044
0045 #include "qib.h"
0046
0047
0048
0049
0050
0051 const char ib_qib_version[] = QIB_DRIVER_VERSION "\n";
0052
0053 DEFINE_MUTEX(qib_mutex);
0054
0055 unsigned qib_ibmtu;
0056 module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
0057 MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
0058
0059 unsigned qib_compat_ddr_negotiate = 1;
0060 module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
0061 S_IWUSR | S_IRUGO);
0062 MODULE_PARM_DESC(compat_ddr_negotiate,
0063 "Attempt pre-IBTA 1.2 DDR speed negotiation");
0064
0065 MODULE_LICENSE("Dual BSD/GPL");
0066 MODULE_AUTHOR("Cornelis <support@cornelisnetworks.com>");
0067 MODULE_DESCRIPTION("Cornelis IB driver");
0068
0069
0070
0071
0072
0073
0074 #define QIB_PIO_MAXIBHDR 128
0075
0076
0077
0078
0079 #define QIB_MAX_PKT_RECV 64
0080
0081 struct qlogic_ib_stats qib_stats;
0082
0083 struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
0084 {
0085 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
0086 struct qib_devdata *dd = container_of(ibdev,
0087 struct qib_devdata, verbs_dev);
0088 return dd->pcidev;
0089 }
0090
0091
0092
0093
0094 int qib_count_active_units(void)
0095 {
0096 struct qib_devdata *dd;
0097 struct qib_pportdata *ppd;
0098 unsigned long index, flags;
0099 int pidx, nunits_active = 0;
0100
0101 xa_lock_irqsave(&qib_dev_table, flags);
0102 xa_for_each(&qib_dev_table, index, dd) {
0103 if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
0104 continue;
0105 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
0106 ppd = dd->pport + pidx;
0107 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
0108 QIBL_LINKARMED | QIBL_LINKACTIVE))) {
0109 nunits_active++;
0110 break;
0111 }
0112 }
0113 }
0114 xa_unlock_irqrestore(&qib_dev_table, flags);
0115 return nunits_active;
0116 }
0117
0118
0119
0120
0121
0122
0123 int qib_count_units(int *npresentp, int *nupp)
0124 {
0125 int nunits = 0, npresent = 0, nup = 0;
0126 struct qib_devdata *dd;
0127 unsigned long index, flags;
0128 int pidx;
0129 struct qib_pportdata *ppd;
0130
0131 xa_lock_irqsave(&qib_dev_table, flags);
0132 xa_for_each(&qib_dev_table, index, dd) {
0133 nunits++;
0134 if ((dd->flags & QIB_PRESENT) && dd->kregbase)
0135 npresent++;
0136 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
0137 ppd = dd->pport + pidx;
0138 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
0139 QIBL_LINKARMED | QIBL_LINKACTIVE)))
0140 nup++;
0141 }
0142 }
0143 xa_unlock_irqrestore(&qib_dev_table, flags);
0144
0145 if (npresentp)
0146 *npresentp = npresent;
0147 if (nupp)
0148 *nupp = nup;
0149
0150 return nunits;
0151 }
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165 int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
0166 {
0167 int ret;
0168 unsigned long flags;
0169
0170 spin_lock_irqsave(&ppd->lflags_lock, flags);
0171 if (ppd->state_wanted) {
0172 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
0173 ret = -EBUSY;
0174 goto bail;
0175 }
0176 ppd->state_wanted = state;
0177 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
0178 wait_event_interruptible_timeout(ppd->state_wait,
0179 (ppd->lflags & state),
0180 msecs_to_jiffies(msecs));
0181 spin_lock_irqsave(&ppd->lflags_lock, flags);
0182 ppd->state_wanted = 0;
0183 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
0184
0185 if (!(ppd->lflags & state))
0186 ret = -ETIMEDOUT;
0187 else
0188 ret = 0;
0189 bail:
0190 return ret;
0191 }
0192
0193 int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
0194 {
0195 u32 lstate;
0196 int ret;
0197 struct qib_devdata *dd = ppd->dd;
0198 unsigned long flags;
0199
0200 switch (newstate) {
0201 case QIB_IB_LINKDOWN_ONLY:
0202 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
0203 IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
0204
0205 ret = 0;
0206 goto bail;
0207
0208 case QIB_IB_LINKDOWN:
0209 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
0210 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
0211
0212 ret = 0;
0213 goto bail;
0214
0215 case QIB_IB_LINKDOWN_SLEEP:
0216 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
0217 IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
0218
0219 ret = 0;
0220 goto bail;
0221
0222 case QIB_IB_LINKDOWN_DISABLE:
0223 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
0224 IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
0225
0226 ret = 0;
0227 goto bail;
0228
0229 case QIB_IB_LINKARM:
0230 if (ppd->lflags & QIBL_LINKARMED) {
0231 ret = 0;
0232 goto bail;
0233 }
0234 if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
0235 ret = -EINVAL;
0236 goto bail;
0237 }
0238
0239
0240
0241
0242
0243
0244 spin_lock_irqsave(&ppd->lflags_lock, flags);
0245 ppd->lflags &= ~QIBL_LINKV;
0246 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
0247 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
0248 IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
0249 lstate = QIBL_LINKV;
0250 break;
0251
0252 case QIB_IB_LINKACTIVE:
0253 if (ppd->lflags & QIBL_LINKACTIVE) {
0254 ret = 0;
0255 goto bail;
0256 }
0257 if (!(ppd->lflags & QIBL_LINKARMED)) {
0258 ret = -EINVAL;
0259 goto bail;
0260 }
0261 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
0262 IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
0263 lstate = QIBL_LINKACTIVE;
0264 break;
0265
0266 default:
0267 ret = -EINVAL;
0268 goto bail;
0269 }
0270 ret = qib_wait_linkstate(ppd, lstate, 10);
0271
0272 bail:
0273 return ret;
0274 }
0275
0276
0277
0278
0279
0280 static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
0281 {
0282 const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
0283 const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
0284
0285 return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
0286 }
0287
0288
0289
0290
0291
0292 static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
0293 u32 ctxt, u32 eflags, u32 l, u32 etail,
0294 __le32 *rhf_addr, struct qib_message_header *rhdr)
0295 {
0296 u32 ret = 0;
0297
0298 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
0299 ret = 1;
0300 else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
0301
0302 struct ib_header *hdr = (struct ib_header *)rhdr;
0303 struct ib_other_headers *ohdr = NULL;
0304 struct qib_ibport *ibp = &ppd->ibport_data;
0305 struct qib_devdata *dd = ppd->dd;
0306 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
0307 struct rvt_qp *qp = NULL;
0308 u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
0309 u16 lid = be16_to_cpu(hdr->lrh[1]);
0310 int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
0311 u32 qp_num;
0312 u32 opcode;
0313 u32 psn;
0314 int diff;
0315
0316
0317 if (tlen < 24)
0318 goto drop;
0319
0320 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
0321 lid &= ~((1 << ppd->lmc) - 1);
0322 if (unlikely(lid != ppd->lid))
0323 goto drop;
0324 }
0325
0326
0327 if (lnh == QIB_LRH_BTH)
0328 ohdr = &hdr->u.oth;
0329 else if (lnh == QIB_LRH_GRH) {
0330 u32 vtf;
0331
0332 ohdr = &hdr->u.l.oth;
0333 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
0334 goto drop;
0335 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
0336 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
0337 goto drop;
0338 } else
0339 goto drop;
0340
0341
0342 opcode = be32_to_cpu(ohdr->bth[0]);
0343 opcode >>= 24;
0344 psn = be32_to_cpu(ohdr->bth[2]);
0345
0346
0347 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
0348 if (qp_num != QIB_MULTICAST_QPN) {
0349 int ruc_res;
0350
0351 rcu_read_lock();
0352 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
0353 if (!qp) {
0354 rcu_read_unlock();
0355 goto drop;
0356 }
0357
0358
0359
0360
0361
0362 spin_lock(&qp->r_lock);
0363
0364
0365 if (!(ib_rvt_state_ops[qp->state] &
0366 RVT_PROCESS_RECV_OK)) {
0367 ibp->rvp.n_pkt_drops++;
0368 goto unlock;
0369 }
0370
0371 switch (qp->ibqp.qp_type) {
0372 case IB_QPT_RC:
0373 ruc_res =
0374 qib_ruc_check_hdr(
0375 ibp, hdr,
0376 lnh == QIB_LRH_GRH,
0377 qp,
0378 be32_to_cpu(ohdr->bth[0]));
0379 if (ruc_res)
0380 goto unlock;
0381
0382
0383 if (opcode <
0384 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
0385 diff = qib_cmp24(psn, qp->r_psn);
0386 if (!qp->r_nak_state && diff >= 0) {
0387 ibp->rvp.n_rc_seqnak++;
0388 qp->r_nak_state =
0389 IB_NAK_PSN_ERROR;
0390
0391 qp->r_ack_psn = qp->r_psn;
0392
0393
0394
0395
0396
0397
0398
0399
0400 if (list_empty(&qp->rspwait)) {
0401 qp->r_flags |=
0402 RVT_R_RSP_NAK;
0403 rvt_get_qp(qp);
0404 list_add_tail(
0405 &qp->rspwait,
0406 &rcd->qp_wait_list);
0407 }
0408 }
0409 }
0410 break;
0411 case IB_QPT_SMI:
0412 case IB_QPT_GSI:
0413 case IB_QPT_UD:
0414 case IB_QPT_UC:
0415 default:
0416
0417 break;
0418 }
0419
0420 unlock:
0421 spin_unlock(&qp->r_lock);
0422 rcu_read_unlock();
0423 }
0424 }
0425
0426 drop:
0427 return ret;
0428 }
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441 u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
0442 {
0443 struct qib_devdata *dd = rcd->dd;
0444 struct qib_pportdata *ppd = rcd->ppd;
0445 __le32 *rhf_addr;
0446 void *ebuf;
0447 const u32 rsize = dd->rcvhdrentsize;
0448 const u32 maxcnt = dd->rcvhdrcnt * rsize;
0449 u32 etail = -1, l, hdrqtail;
0450 struct qib_message_header *hdr;
0451 u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
0452 int last;
0453 u64 lval;
0454 struct rvt_qp *qp, *nqp;
0455
0456 l = rcd->head;
0457 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
0458 if (dd->flags & QIB_NODMA_RTAIL) {
0459 u32 seq = qib_hdrget_seq(rhf_addr);
0460
0461 if (seq != rcd->seq_cnt)
0462 goto bail;
0463 hdrqtail = 0;
0464 } else {
0465 hdrqtail = qib_get_rcvhdrtail(rcd);
0466 if (l == hdrqtail)
0467 goto bail;
0468 smp_rmb();
0469 }
0470
0471 for (last = 0, i = 1; !last; i += !last) {
0472 hdr = dd->f_get_msgheader(dd, rhf_addr);
0473 eflags = qib_hdrget_err_flags(rhf_addr);
0474 etype = qib_hdrget_rcv_type(rhf_addr);
0475
0476 tlen = qib_hdrget_length_in_bytes(rhf_addr);
0477 ebuf = NULL;
0478 if ((dd->flags & QIB_NODMA_RTAIL) ?
0479 qib_hdrget_use_egr_buf(rhf_addr) :
0480 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
0481 etail = qib_hdrget_index(rhf_addr);
0482 updegr = 1;
0483 if (tlen > sizeof(*hdr) ||
0484 etype >= RCVHQ_RCV_TYPE_NON_KD) {
0485 ebuf = qib_get_egrbuf(rcd, etail);
0486 prefetch_range(ebuf, tlen - sizeof(*hdr));
0487 }
0488 }
0489 if (!eflags) {
0490 u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
0491
0492 if (lrh_len != tlen) {
0493 qib_stats.sps_lenerrs++;
0494 goto move_along;
0495 }
0496 }
0497 if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
0498 ebuf == NULL &&
0499 tlen > (dd->rcvhdrentsize - 2 + 1 -
0500 qib_hdrget_offset(rhf_addr)) << 2) {
0501 goto move_along;
0502 }
0503
0504
0505
0506
0507
0508 if (unlikely(eflags))
0509 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
0510 etail, rhf_addr, hdr);
0511 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
0512 qib_ib_rcv(rcd, hdr, ebuf, tlen);
0513 if (crcs)
0514 crcs--;
0515 else if (llic && *llic)
0516 --*llic;
0517 }
0518 move_along:
0519 l += rsize;
0520 if (l >= maxcnt)
0521 l = 0;
0522 if (i == QIB_MAX_PKT_RECV)
0523 last = 1;
0524
0525 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
0526 if (dd->flags & QIB_NODMA_RTAIL) {
0527 u32 seq = qib_hdrget_seq(rhf_addr);
0528
0529 if (++rcd->seq_cnt > 13)
0530 rcd->seq_cnt = 1;
0531 if (seq != rcd->seq_cnt)
0532 last = 1;
0533 } else if (l == hdrqtail)
0534 last = 1;
0535
0536
0537
0538
0539
0540
0541 lval = l;
0542 if (!last && !(i & 0xf)) {
0543 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
0544 updegr = 0;
0545 }
0546 }
0547
0548 rcd->head = l;
0549
0550
0551
0552
0553
0554 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
0555 list_del_init(&qp->rspwait);
0556 if (qp->r_flags & RVT_R_RSP_NAK) {
0557 qp->r_flags &= ~RVT_R_RSP_NAK;
0558 qib_send_rc_ack(qp);
0559 }
0560 if (qp->r_flags & RVT_R_RSP_SEND) {
0561 unsigned long flags;
0562
0563 qp->r_flags &= ~RVT_R_RSP_SEND;
0564 spin_lock_irqsave(&qp->s_lock, flags);
0565 if (ib_rvt_state_ops[qp->state] &
0566 RVT_PROCESS_OR_FLUSH_SEND)
0567 qib_schedule_send(qp);
0568 spin_unlock_irqrestore(&qp->s_lock, flags);
0569 }
0570 rvt_put_qp(qp);
0571 }
0572
0573 bail:
0574
0575 if (npkts)
0576 *npkts = i;
0577
0578
0579
0580
0581
0582 lval = (u64)rcd->head | dd->rhdrhead_intr_off;
0583 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
0584 return crcs;
0585 }
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599 int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
0600 {
0601 u32 piosize;
0602 int ret, chk;
0603
0604 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
0605 arg != 4096) {
0606 ret = -EINVAL;
0607 goto bail;
0608 }
0609 chk = ib_mtu_enum_to_int(qib_ibmtu);
0610 if (chk > 0 && arg > chk) {
0611 ret = -EINVAL;
0612 goto bail;
0613 }
0614
0615 piosize = ppd->ibmaxlen;
0616 ppd->ibmtu = arg;
0617
0618 if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
0619
0620 if (piosize != ppd->init_ibmaxlen) {
0621 if (arg > piosize && arg <= ppd->init_ibmaxlen)
0622 piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
0623 ppd->ibmaxlen = piosize;
0624 }
0625 } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
0626 piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
0627 ppd->ibmaxlen = piosize;
0628 }
0629
0630 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
0631
0632 ret = 0;
0633
0634 bail:
0635 return ret;
0636 }
0637
0638 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
0639 {
0640 struct qib_devdata *dd = ppd->dd;
0641
0642 ppd->lid = lid;
0643 ppd->lmc = lmc;
0644
0645 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
0646 lid | (~((1U << lmc) - 1)) << 16);
0647
0648 qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
0649 dd->unit, ppd->port, lid);
0650
0651 return 0;
0652 }
0653
0654
0655
0656
0657
0658
0659
0660
0661 #define LED_OVER_FREQ_SHIFT 8
0662 #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
0663
0664 #define LED_OVER_BOTH_OFF (8)
0665
0666 static void qib_run_led_override(struct timer_list *t)
0667 {
0668 struct qib_pportdata *ppd = from_timer(ppd, t,
0669 led_override_timer);
0670 struct qib_devdata *dd = ppd->dd;
0671 int timeoff;
0672 int ph_idx;
0673
0674 if (!(dd->flags & QIB_INITTED))
0675 return;
0676
0677 ph_idx = ppd->led_override_phase++ & 1;
0678 ppd->led_override = ppd->led_override_vals[ph_idx];
0679 timeoff = ppd->led_override_timeoff;
0680
0681 dd->f_setextled(ppd, 1);
0682
0683
0684
0685
0686 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
0687 mod_timer(&ppd->led_override_timer, jiffies + timeoff);
0688 }
0689
0690 void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
0691 {
0692 struct qib_devdata *dd = ppd->dd;
0693 int timeoff, freq;
0694
0695 if (!(dd->flags & QIB_INITTED))
0696 return;
0697
0698
0699 timeoff = HZ;
0700 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
0701
0702 if (freq) {
0703
0704 ppd->led_override_vals[0] = val & 0xF;
0705 ppd->led_override_vals[1] = (val >> 4) & 0xF;
0706 timeoff = (HZ << 4)/freq;
0707 } else {
0708
0709 ppd->led_override_vals[0] = val & 0xF;
0710 ppd->led_override_vals[1] = val & 0xF;
0711 }
0712 ppd->led_override_timeoff = timeoff;
0713
0714
0715
0716
0717
0718 if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
0719
0720 timer_setup(&ppd->led_override_timer, qib_run_led_override, 0);
0721 ppd->led_override_timer.expires = jiffies + 1;
0722 add_timer(&ppd->led_override_timer);
0723 } else {
0724 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
0725 mod_timer(&ppd->led_override_timer, jiffies + 1);
0726 atomic_dec(&ppd->led_override_timer_active);
0727 }
0728 }
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739 int qib_reset_device(int unit)
0740 {
0741 int ret, i;
0742 struct qib_devdata *dd = qib_lookup(unit);
0743 struct qib_pportdata *ppd;
0744 unsigned long flags;
0745 int pidx;
0746
0747 if (!dd) {
0748 ret = -ENODEV;
0749 goto bail;
0750 }
0751
0752 qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
0753
0754 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
0755 qib_devinfo(dd->pcidev,
0756 "Invalid unit number %u or not initialized or not present\n",
0757 unit);
0758 ret = -ENXIO;
0759 goto bail;
0760 }
0761
0762 spin_lock_irqsave(&dd->uctxt_lock, flags);
0763 if (dd->rcd)
0764 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
0765 if (!dd->rcd[i] || !dd->rcd[i]->cnt)
0766 continue;
0767 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
0768 ret = -EBUSY;
0769 goto bail;
0770 }
0771 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
0772
0773 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
0774 ppd = dd->pport + pidx;
0775 if (atomic_read(&ppd->led_override_timer_active)) {
0776
0777 del_timer_sync(&ppd->led_override_timer);
0778 atomic_set(&ppd->led_override_timer_active, 0);
0779 }
0780
0781
0782 ppd->led_override = LED_OVER_BOTH_OFF;
0783 dd->f_setextled(ppd, 0);
0784 if (dd->flags & QIB_HAS_SEND_DMA)
0785 qib_teardown_sdma(ppd);
0786 }
0787
0788 ret = dd->f_reset(dd);
0789 if (ret == 1)
0790 ret = qib_init(dd, 1);
0791 else
0792 ret = -EAGAIN;
0793 if (ret)
0794 qib_dev_err(dd,
0795 "Reinitialize unit %u after reset failed with %d\n",
0796 unit, ret);
0797 else
0798 qib_devinfo(dd->pcidev,
0799 "Reinitialized unit %u after resetting\n",
0800 unit);
0801
0802 bail:
0803 return ret;
0804 }