0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/err.h>
0036 #include <linux/vmalloc.h>
0037 #include <rdma/rdma_vt.h>
0038 #ifdef CONFIG_DEBUG_FS
0039 #include <linux/seq_file.h>
0040 #endif
0041
0042 #include "qib.h"
0043
0044 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
0045 struct rvt_qpn_map *map, unsigned off)
0046 {
0047 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
0048 }
0049
0050 static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
0051 struct rvt_qpn_map *map, unsigned off,
0052 unsigned n, u16 qpt_mask)
0053 {
0054 if (qpt_mask) {
0055 off++;
0056 if (((off & qpt_mask) >> 1) >= n)
0057 off = (off | qpt_mask) + 2;
0058 } else {
0059 off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
0060 }
0061 return off;
0062 }
0063
0064 const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
0065 [IB_WR_RDMA_WRITE] = {
0066 .length = sizeof(struct ib_rdma_wr),
0067 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
0068 },
0069
0070 [IB_WR_RDMA_READ] = {
0071 .length = sizeof(struct ib_rdma_wr),
0072 .qpt_support = BIT(IB_QPT_RC),
0073 .flags = RVT_OPERATION_ATOMIC,
0074 },
0075
0076 [IB_WR_ATOMIC_CMP_AND_SWP] = {
0077 .length = sizeof(struct ib_atomic_wr),
0078 .qpt_support = BIT(IB_QPT_RC),
0079 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
0080 },
0081
0082 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
0083 .length = sizeof(struct ib_atomic_wr),
0084 .qpt_support = BIT(IB_QPT_RC),
0085 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
0086 },
0087
0088 [IB_WR_RDMA_WRITE_WITH_IMM] = {
0089 .length = sizeof(struct ib_rdma_wr),
0090 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
0091 },
0092
0093 [IB_WR_SEND] = {
0094 .length = sizeof(struct ib_send_wr),
0095 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
0096 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
0097 },
0098
0099 [IB_WR_SEND_WITH_IMM] = {
0100 .length = sizeof(struct ib_send_wr),
0101 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
0102 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
0103 },
0104
0105 };
0106
0107 static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
0108 {
0109 unsigned long page = get_zeroed_page(GFP_KERNEL);
0110
0111
0112
0113
0114
0115 spin_lock(&qpt->lock);
0116 if (map->page)
0117 free_page(page);
0118 else
0119 map->page = (void *)page;
0120 spin_unlock(&qpt->lock);
0121 }
0122
0123
0124
0125
0126
0127 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
0128 enum ib_qp_type type, u32 port)
0129 {
0130 u32 i, offset, max_scan, qpn;
0131 struct rvt_qpn_map *map;
0132 u32 ret;
0133 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
0134 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
0135 verbs_dev);
0136 u16 qpt_mask = dd->qpn_mask;
0137
0138 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
0139 u32 n;
0140
0141 ret = type == IB_QPT_GSI;
0142 n = 1 << (ret + 2 * (port - 1));
0143 spin_lock(&qpt->lock);
0144 if (qpt->flags & n)
0145 ret = -EINVAL;
0146 else
0147 qpt->flags |= n;
0148 spin_unlock(&qpt->lock);
0149 goto bail;
0150 }
0151
0152 qpn = qpt->last + 2;
0153 if (qpn >= RVT_QPN_MAX)
0154 qpn = 2;
0155 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
0156 qpn = (qpn | qpt_mask) + 2;
0157 offset = qpn & RVT_BITS_PER_PAGE_MASK;
0158 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
0159 max_scan = qpt->nmaps - !offset;
0160 for (i = 0;;) {
0161 if (unlikely(!map->page)) {
0162 get_map_page(qpt, map);
0163 if (unlikely(!map->page))
0164 break;
0165 }
0166 do {
0167 if (!test_and_set_bit(offset, map->page)) {
0168 qpt->last = qpn;
0169 ret = qpn;
0170 goto bail;
0171 }
0172 offset = find_next_offset(qpt, map, offset,
0173 dd->n_krcv_queues, qpt_mask);
0174 qpn = mk_qpn(qpt, map, offset);
0175
0176
0177
0178
0179
0180
0181
0182
0183 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
0184
0185
0186
0187
0188
0189 if (++i > max_scan) {
0190 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
0191 break;
0192 map = &qpt->map[qpt->nmaps++];
0193 offset = 0;
0194 } else if (map < &qpt->map[qpt->nmaps]) {
0195 ++map;
0196 offset = 0;
0197 } else {
0198 map = &qpt->map[0];
0199 offset = 2;
0200 }
0201 qpn = mk_qpn(qpt, map, offset);
0202 }
0203
0204 ret = -ENOMEM;
0205
0206 bail:
0207 return ret;
0208 }
0209
0210
0211
0212
0213 unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
0214 {
0215 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
0216 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
0217 verbs_dev);
0218 unsigned n, qp_inuse = 0;
0219
0220 for (n = 0; n < dd->num_pports; n++) {
0221 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
0222
0223 rcu_read_lock();
0224 if (rcu_dereference(ibp->rvp.qp[0]))
0225 qp_inuse++;
0226 if (rcu_dereference(ibp->rvp.qp[1]))
0227 qp_inuse++;
0228 rcu_read_unlock();
0229 }
0230 return qp_inuse;
0231 }
0232
0233 void qib_notify_qp_reset(struct rvt_qp *qp)
0234 {
0235 struct qib_qp_priv *priv = qp->priv;
0236
0237 atomic_set(&priv->s_dma_busy, 0);
0238 }
0239
0240 void qib_notify_error_qp(struct rvt_qp *qp)
0241 {
0242 struct qib_qp_priv *priv = qp->priv;
0243 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
0244
0245 spin_lock(&dev->rdi.pending_lock);
0246 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
0247 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
0248 list_del_init(&priv->iowait);
0249 }
0250 spin_unlock(&dev->rdi.pending_lock);
0251
0252 if (!(qp->s_flags & RVT_S_BUSY)) {
0253 qp->s_hdrwords = 0;
0254 if (qp->s_rdma_mr) {
0255 rvt_put_mr(qp->s_rdma_mr);
0256 qp->s_rdma_mr = NULL;
0257 }
0258 if (priv->s_tx) {
0259 qib_put_txreq(priv->s_tx);
0260 priv->s_tx = NULL;
0261 }
0262 }
0263 }
0264
0265 static int mtu_to_enum(u32 mtu)
0266 {
0267 int enum_mtu;
0268
0269 switch (mtu) {
0270 case 4096:
0271 enum_mtu = IB_MTU_4096;
0272 break;
0273 case 2048:
0274 enum_mtu = IB_MTU_2048;
0275 break;
0276 case 1024:
0277 enum_mtu = IB_MTU_1024;
0278 break;
0279 case 512:
0280 enum_mtu = IB_MTU_512;
0281 break;
0282 case 256:
0283 enum_mtu = IB_MTU_256;
0284 break;
0285 default:
0286 enum_mtu = IB_MTU_2048;
0287 }
0288 return enum_mtu;
0289 }
0290
0291 int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
0292 struct ib_qp_attr *attr)
0293 {
0294 int mtu, pmtu, pidx = qp->port_num - 1;
0295 struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
0296 struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
0297 verbs_dev);
0298 mtu = ib_mtu_enum_to_int(attr->path_mtu);
0299 if (mtu == -1)
0300 return -EINVAL;
0301
0302 if (mtu > dd->pport[pidx].ibmtu)
0303 pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
0304 else
0305 pmtu = attr->path_mtu;
0306 return pmtu;
0307 }
0308
0309 int qib_mtu_to_path_mtu(u32 mtu)
0310 {
0311 return mtu_to_enum(mtu);
0312 }
0313
0314 u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
0315 {
0316 return ib_mtu_enum_to_int(pmtu);
0317 }
0318
0319 void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
0320 {
0321 struct qib_qp_priv *priv;
0322
0323 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
0324 if (!priv)
0325 return ERR_PTR(-ENOMEM);
0326 priv->owner = qp;
0327
0328 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
0329 if (!priv->s_hdr) {
0330 kfree(priv);
0331 return ERR_PTR(-ENOMEM);
0332 }
0333 init_waitqueue_head(&priv->wait_dma);
0334 INIT_WORK(&priv->s_work, _qib_do_send);
0335 INIT_LIST_HEAD(&priv->iowait);
0336
0337 return priv;
0338 }
0339
0340 void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
0341 {
0342 struct qib_qp_priv *priv = qp->priv;
0343
0344 kfree(priv->s_hdr);
0345 kfree(priv);
0346 }
0347
0348 void qib_stop_send_queue(struct rvt_qp *qp)
0349 {
0350 struct qib_qp_priv *priv = qp->priv;
0351
0352 cancel_work_sync(&priv->s_work);
0353 }
0354
0355 void qib_quiesce_qp(struct rvt_qp *qp)
0356 {
0357 struct qib_qp_priv *priv = qp->priv;
0358
0359 wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
0360 if (priv->s_tx) {
0361 qib_put_txreq(priv->s_tx);
0362 priv->s_tx = NULL;
0363 }
0364 }
0365
0366 void qib_flush_qp_waiters(struct rvt_qp *qp)
0367 {
0368 struct qib_qp_priv *priv = qp->priv;
0369 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
0370
0371 spin_lock(&dev->rdi.pending_lock);
0372 if (!list_empty(&priv->iowait))
0373 list_del_init(&priv->iowait);
0374 spin_unlock(&dev->rdi.pending_lock);
0375 }
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 int qib_check_send_wqe(struct rvt_qp *qp,
0386 struct rvt_swqe *wqe, bool *call_send)
0387 {
0388 struct rvt_ah *ah;
0389
0390 switch (qp->ibqp.qp_type) {
0391 case IB_QPT_RC:
0392 case IB_QPT_UC:
0393 if (wqe->length > 0x80000000U)
0394 return -EINVAL;
0395 if (wqe->length > qp->pmtu)
0396 *call_send = false;
0397 break;
0398 case IB_QPT_SMI:
0399 case IB_QPT_GSI:
0400 case IB_QPT_UD:
0401 ah = rvt_get_swqe_ah(wqe);
0402 if (wqe->length > (1 << ah->log_pmtu))
0403 return -EINVAL;
0404
0405 *call_send = true;
0406 break;
0407 default:
0408 break;
0409 }
0410 return 0;
0411 }
0412
0413 #ifdef CONFIG_DEBUG_FS
0414
0415 static const char * const qp_type_str[] = {
0416 "SMI", "GSI", "RC", "UC", "UD",
0417 };
0418
0419
0420
0421
0422
0423
0424 void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
0425 {
0426 struct rvt_swqe *wqe;
0427 struct rvt_qp *qp = iter->qp;
0428 struct qib_qp_priv *priv = qp->priv;
0429
0430 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
0431 seq_printf(s,
0432 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
0433 iter->n,
0434 qp->ibqp.qp_num,
0435 qp_type_str[qp->ibqp.qp_type],
0436 qp->state,
0437 wqe->wr.opcode,
0438 qp->s_hdrwords,
0439 qp->s_flags,
0440 atomic_read(&priv->s_dma_busy),
0441 !list_empty(&priv->iowait),
0442 qp->timeout,
0443 wqe->ssn,
0444 qp->s_lsn,
0445 qp->s_last_psn,
0446 qp->s_psn, qp->s_next_psn,
0447 qp->s_sending_psn, qp->s_sending_hpsn,
0448 qp->s_last, qp->s_acked, qp->s_cur,
0449 qp->s_tail, qp->s_head, qp->s_size,
0450 qp->remote_qpn,
0451 rdma_ah_get_dlid(&qp->remote_ah_attr));
0452 }
0453
0454 #endif