0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <linux/slab.h>
0033 #include <linux/mman.h>
0034 #include <net/sock.h>
0035
0036 #include "iw_cxgb4.h"
0037
0038 static void print_tpte(struct c4iw_dev *dev, u32 stag)
0039 {
0040 int ret;
0041 struct fw_ri_tpte tpte;
0042
0043 ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag,
0044 (__be32 *)&tpte);
0045 if (ret) {
0046 dev_err(&dev->rdev.lldi.pdev->dev,
0047 "%s cxgb4_read_tpte err %d\n", __func__, ret);
0048 return;
0049 }
0050 pr_debug("stag idx 0x%x valid %d key 0x%x state %d pdid %d perm 0x%x ps %d len 0x%llx va 0x%llx\n",
0051 stag & 0xffffff00,
0052 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
0053 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
0054 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
0055 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
0056 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
0057 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
0058 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
0059 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
0060 }
0061
0062 static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
0063 {
0064 __be64 *p = (void *)err_cqe;
0065
0066 dev_err(&dev->rdev.lldi.pdev->dev,
0067 "AE qpid %d opcode %d status 0x%x "
0068 "type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n",
0069 CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
0070 CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
0071 CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
0072
0073 pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n",
0074 be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
0075 be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
0076 be64_to_cpu(p[6]), be64_to_cpu(p[7]));
0077
0078
0079
0080
0081
0082 if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE ||
0083 CQE_OPCODE(err_cqe) == FW_RI_READ_RESP))
0084 print_tpte(dev, CQE_WRID_STAG(err_cqe));
0085 }
0086
0087 static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
0088 struct c4iw_qp *qhp,
0089 struct t4_cqe *err_cqe,
0090 enum ib_event_type ib_event)
0091 {
0092 struct ib_event event;
0093 struct c4iw_qp_attributes attrs;
0094 unsigned long flag;
0095
0096 dump_err_cqe(dev, err_cqe);
0097
0098 if (qhp->attr.state == C4IW_QP_STATE_RTS) {
0099 attrs.next_state = C4IW_QP_STATE_TERMINATE;
0100 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
0101 &attrs, 0);
0102 }
0103
0104 event.event = ib_event;
0105 event.device = chp->ibcq.device;
0106 if (ib_event == IB_EVENT_CQ_ERR)
0107 event.element.cq = &chp->ibcq;
0108 else
0109 event.element.qp = &qhp->ibqp;
0110 if (qhp->ibqp.event_handler)
0111 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
0112
0113 if (t4_clear_cq_armed(&chp->cq)) {
0114 spin_lock_irqsave(&chp->comp_handler_lock, flag);
0115 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
0116 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
0117 }
0118 }
0119
0120 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
0121 {
0122 struct c4iw_cq *chp;
0123 struct c4iw_qp *qhp;
0124 u32 cqid;
0125
0126 xa_lock_irq(&dev->qps);
0127 qhp = xa_load(&dev->qps, CQE_QPID(err_cqe));
0128 if (!qhp) {
0129 pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
0130 CQE_QPID(err_cqe),
0131 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
0132 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
0133 CQE_WRID_LOW(err_cqe));
0134 xa_unlock_irq(&dev->qps);
0135 goto out;
0136 }
0137
0138 if (SQ_TYPE(err_cqe))
0139 cqid = qhp->attr.scq;
0140 else
0141 cqid = qhp->attr.rcq;
0142 chp = get_chp(dev, cqid);
0143 if (!chp) {
0144 pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
0145 cqid, CQE_QPID(err_cqe),
0146 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
0147 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
0148 CQE_WRID_LOW(err_cqe));
0149 xa_unlock_irq(&dev->qps);
0150 goto out;
0151 }
0152
0153 c4iw_qp_add_ref(&qhp->ibqp);
0154 refcount_inc(&chp->refcnt);
0155 xa_unlock_irq(&dev->qps);
0156
0157
0158 if (RQ_TYPE(err_cqe) &&
0159 (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
0160 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
0161 goto done;
0162 }
0163
0164 switch (CQE_STATUS(err_cqe)) {
0165
0166
0167 case T4_ERR_SUCCESS:
0168 pr_err("AE with status 0!\n");
0169 break;
0170
0171 case T4_ERR_STAG:
0172 case T4_ERR_PDID:
0173 case T4_ERR_QPID:
0174 case T4_ERR_ACCESS:
0175 case T4_ERR_WRAP:
0176 case T4_ERR_BOUND:
0177 case T4_ERR_INVALIDATE_SHARED_MR:
0178 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
0179 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
0180 break;
0181
0182
0183 case T4_ERR_ECC:
0184 case T4_ERR_ECC_PSTAG:
0185 case T4_ERR_INTERNAL_ERR:
0186 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
0187 break;
0188
0189
0190 case T4_ERR_OUT_OF_RQE:
0191 case T4_ERR_PBL_ADDR_BOUND:
0192 case T4_ERR_CRC:
0193 case T4_ERR_MARKER:
0194 case T4_ERR_PDU_LEN_ERR:
0195 case T4_ERR_DDP_VERSION:
0196 case T4_ERR_RDMA_VERSION:
0197 case T4_ERR_OPCODE:
0198 case T4_ERR_DDP_QUEUE_NUM:
0199 case T4_ERR_MSN:
0200 case T4_ERR_TBIT:
0201 case T4_ERR_MO:
0202 case T4_ERR_MSN_GAP:
0203 case T4_ERR_MSN_RANGE:
0204 case T4_ERR_RQE_ADDR_BOUND:
0205 case T4_ERR_IRD_OVERFLOW:
0206 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
0207 break;
0208
0209 default:
0210 pr_err("Unknown T4 status 0x%x QPID 0x%x\n",
0211 CQE_STATUS(err_cqe), qhp->wq.sq.qid);
0212 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
0213 break;
0214 }
0215 done:
0216 c4iw_cq_rem_ref(chp);
0217 c4iw_qp_rem_ref(&qhp->ibqp);
0218 out:
0219 return;
0220 }
0221
0222 int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
0223 {
0224 struct c4iw_cq *chp;
0225 unsigned long flag;
0226
0227 xa_lock_irqsave(&dev->cqs, flag);
0228 chp = xa_load(&dev->cqs, qid);
0229 if (chp) {
0230 refcount_inc(&chp->refcnt);
0231 xa_unlock_irqrestore(&dev->cqs, flag);
0232 t4_clear_cq_armed(&chp->cq);
0233 spin_lock_irqsave(&chp->comp_handler_lock, flag);
0234 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
0235 spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
0236 c4iw_cq_rem_ref(chp);
0237 } else {
0238 pr_debug("unknown cqid 0x%x\n", qid);
0239 xa_unlock_irqrestore(&dev->cqs, flag);
0240 }
0241 return 0;
0242 }