0001
0002
0003
0004
0005
0006
0007 #include <linux/skbuff.h>
0008 #include <linux/delay.h>
0009 #include <linux/sched.h>
0010 #include <linux/vmalloc.h>
0011 #include <rdma/uverbs_ioctl.h>
0012
0013 #include "rxe.h"
0014 #include "rxe_loc.h"
0015 #include "rxe_queue.h"
0016 #include "rxe_task.h"
0017
0018 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
0019 int has_srq)
0020 {
0021 if (cap->max_send_wr > rxe->attr.max_qp_wr) {
0022 pr_warn("invalid send wr = %d > %d\n",
0023 cap->max_send_wr, rxe->attr.max_qp_wr);
0024 goto err1;
0025 }
0026
0027 if (cap->max_send_sge > rxe->attr.max_send_sge) {
0028 pr_warn("invalid send sge = %d > %d\n",
0029 cap->max_send_sge, rxe->attr.max_send_sge);
0030 goto err1;
0031 }
0032
0033 if (!has_srq) {
0034 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
0035 pr_warn("invalid recv wr = %d > %d\n",
0036 cap->max_recv_wr, rxe->attr.max_qp_wr);
0037 goto err1;
0038 }
0039
0040 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
0041 pr_warn("invalid recv sge = %d > %d\n",
0042 cap->max_recv_sge, rxe->attr.max_recv_sge);
0043 goto err1;
0044 }
0045 }
0046
0047 if (cap->max_inline_data > rxe->max_inline_data) {
0048 pr_warn("invalid max inline data = %d > %d\n",
0049 cap->max_inline_data, rxe->max_inline_data);
0050 goto err1;
0051 }
0052
0053 return 0;
0054
0055 err1:
0056 return -EINVAL;
0057 }
0058
0059 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
0060 {
0061 struct ib_qp_cap *cap = &init->cap;
0062 struct rxe_port *port;
0063 int port_num = init->port_num;
0064
0065 switch (init->qp_type) {
0066 case IB_QPT_GSI:
0067 case IB_QPT_RC:
0068 case IB_QPT_UC:
0069 case IB_QPT_UD:
0070 break;
0071 default:
0072 return -EOPNOTSUPP;
0073 }
0074
0075 if (!init->recv_cq || !init->send_cq) {
0076 pr_warn("missing cq\n");
0077 goto err1;
0078 }
0079
0080 if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
0081 goto err1;
0082
0083 if (init->qp_type == IB_QPT_GSI) {
0084 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
0085 pr_warn("invalid port = %d\n", port_num);
0086 goto err1;
0087 }
0088
0089 port = &rxe->port;
0090
0091 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
0092 pr_warn("GSI QP exists for port %d\n", port_num);
0093 goto err1;
0094 }
0095 }
0096
0097 return 0;
0098
0099 err1:
0100 return -EINVAL;
0101 }
0102
0103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
0104 {
0105 qp->resp.res_head = 0;
0106 qp->resp.res_tail = 0;
0107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
0108
0109 if (!qp->resp.resources)
0110 return -ENOMEM;
0111
0112 return 0;
0113 }
0114
0115 static void free_rd_atomic_resources(struct rxe_qp *qp)
0116 {
0117 if (qp->resp.resources) {
0118 int i;
0119
0120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
0121 struct resp_res *res = &qp->resp.resources[i];
0122
0123 free_rd_atomic_resource(res);
0124 }
0125 kfree(qp->resp.resources);
0126 qp->resp.resources = NULL;
0127 }
0128 }
0129
0130 void free_rd_atomic_resource(struct resp_res *res)
0131 {
0132 res->type = 0;
0133 }
0134
0135 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
0136 {
0137 int i;
0138 struct resp_res *res;
0139
0140 if (qp->resp.resources) {
0141 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
0142 res = &qp->resp.resources[i];
0143 free_rd_atomic_resource(res);
0144 }
0145 }
0146 }
0147
0148 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
0149 struct ib_qp_init_attr *init)
0150 {
0151 struct rxe_port *port;
0152 u32 qpn;
0153
0154 qp->sq_sig_type = init->sq_sig_type;
0155 qp->attr.path_mtu = 1;
0156 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
0157
0158 qpn = qp->elem.index;
0159 port = &rxe->port;
0160
0161 switch (init->qp_type) {
0162 case IB_QPT_GSI:
0163 qp->ibqp.qp_num = 1;
0164 port->qp_gsi_index = qpn;
0165 qp->attr.port_num = init->port_num;
0166 break;
0167
0168 default:
0169 qp->ibqp.qp_num = qpn;
0170 break;
0171 }
0172
0173 spin_lock_init(&qp->state_lock);
0174
0175 spin_lock_init(&qp->req.task.state_lock);
0176 spin_lock_init(&qp->resp.task.state_lock);
0177 spin_lock_init(&qp->comp.task.state_lock);
0178
0179 spin_lock_init(&qp->sq.sq_lock);
0180 spin_lock_init(&qp->rq.producer_lock);
0181 spin_lock_init(&qp->rq.consumer_lock);
0182
0183 atomic_set(&qp->ssn, 0);
0184 atomic_set(&qp->skb_out, 0);
0185 }
0186
0187 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
0188 struct ib_qp_init_attr *init, struct ib_udata *udata,
0189 struct rxe_create_qp_resp __user *uresp)
0190 {
0191 int err;
0192 int wqe_size;
0193 enum queue_type type;
0194
0195 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
0196 if (err < 0)
0197 return err;
0198 qp->sk->sk->sk_user_data = qp;
0199
0200
0201
0202
0203
0204
0205
0206
0207 qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
0208 qp->sq.max_wr = init->cap.max_send_wr;
0209
0210
0211 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
0212 init->cap.max_inline_data);
0213 qp->sq.max_sge = init->cap.max_send_sge =
0214 wqe_size / sizeof(struct ib_sge);
0215 qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
0216 wqe_size += sizeof(struct rxe_send_wqe);
0217
0218 type = QUEUE_TYPE_FROM_CLIENT;
0219 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
0220 wqe_size, type);
0221 if (!qp->sq.queue)
0222 return -ENOMEM;
0223
0224 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
0225 qp->sq.queue->buf, qp->sq.queue->buf_size,
0226 &qp->sq.queue->ip);
0227
0228 if (err) {
0229 vfree(qp->sq.queue->buf);
0230 kfree(qp->sq.queue);
0231 qp->sq.queue = NULL;
0232 return err;
0233 }
0234
0235 qp->req.wqe_index = queue_get_producer(qp->sq.queue,
0236 QUEUE_TYPE_FROM_CLIENT);
0237
0238 qp->req.state = QP_STATE_RESET;
0239 qp->comp.state = QP_STATE_RESET;
0240 qp->req.opcode = -1;
0241 qp->comp.opcode = -1;
0242
0243 skb_queue_head_init(&qp->req_pkts);
0244
0245 rxe_init_task(rxe, &qp->req.task, qp,
0246 rxe_requester, "req");
0247 rxe_init_task(rxe, &qp->comp.task, qp,
0248 rxe_completer, "comp");
0249
0250 qp->qp_timeout_jiffies = 0;
0251 if (init->qp_type == IB_QPT_RC) {
0252 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
0253 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
0254 }
0255 return 0;
0256 }
0257
0258 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
0259 struct ib_qp_init_attr *init,
0260 struct ib_udata *udata,
0261 struct rxe_create_qp_resp __user *uresp)
0262 {
0263 int err;
0264 int wqe_size;
0265 enum queue_type type;
0266
0267 if (!qp->srq) {
0268 qp->rq.max_wr = init->cap.max_recv_wr;
0269 qp->rq.max_sge = init->cap.max_recv_sge;
0270
0271 wqe_size = rcv_wqe_size(qp->rq.max_sge);
0272
0273 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
0274 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
0275
0276 type = QUEUE_TYPE_FROM_CLIENT;
0277 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
0278 wqe_size, type);
0279 if (!qp->rq.queue)
0280 return -ENOMEM;
0281
0282 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
0283 qp->rq.queue->buf, qp->rq.queue->buf_size,
0284 &qp->rq.queue->ip);
0285 if (err) {
0286 vfree(qp->rq.queue->buf);
0287 kfree(qp->rq.queue);
0288 qp->rq.queue = NULL;
0289 return err;
0290 }
0291 }
0292
0293 skb_queue_head_init(&qp->resp_pkts);
0294
0295 rxe_init_task(rxe, &qp->resp.task, qp,
0296 rxe_responder, "resp");
0297
0298 qp->resp.opcode = OPCODE_NONE;
0299 qp->resp.msn = 0;
0300 qp->resp.state = QP_STATE_RESET;
0301
0302 return 0;
0303 }
0304
0305
0306 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
0307 struct ib_qp_init_attr *init,
0308 struct rxe_create_qp_resp __user *uresp,
0309 struct ib_pd *ibpd,
0310 struct ib_udata *udata)
0311 {
0312 int err;
0313 struct rxe_cq *rcq = to_rcq(init->recv_cq);
0314 struct rxe_cq *scq = to_rcq(init->send_cq);
0315 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
0316
0317 rxe_get(pd);
0318 rxe_get(rcq);
0319 rxe_get(scq);
0320 if (srq)
0321 rxe_get(srq);
0322
0323 qp->pd = pd;
0324 qp->rcq = rcq;
0325 qp->scq = scq;
0326 qp->srq = srq;
0327
0328 atomic_inc(&rcq->num_wq);
0329 atomic_inc(&scq->num_wq);
0330
0331 rxe_qp_init_misc(rxe, qp, init);
0332
0333 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
0334 if (err)
0335 goto err1;
0336
0337 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
0338 if (err)
0339 goto err2;
0340
0341 qp->attr.qp_state = IB_QPS_RESET;
0342 qp->valid = 1;
0343
0344 return 0;
0345
0346 err2:
0347 rxe_queue_cleanup(qp->sq.queue);
0348 qp->sq.queue = NULL;
0349 err1:
0350 atomic_dec(&rcq->num_wq);
0351 atomic_dec(&scq->num_wq);
0352
0353 qp->pd = NULL;
0354 qp->rcq = NULL;
0355 qp->scq = NULL;
0356 qp->srq = NULL;
0357
0358 if (srq)
0359 rxe_put(srq);
0360 rxe_put(scq);
0361 rxe_put(rcq);
0362 rxe_put(pd);
0363
0364 return err;
0365 }
0366
0367
0368 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
0369 {
0370 init->event_handler = qp->ibqp.event_handler;
0371 init->qp_context = qp->ibqp.qp_context;
0372 init->send_cq = qp->ibqp.send_cq;
0373 init->recv_cq = qp->ibqp.recv_cq;
0374 init->srq = qp->ibqp.srq;
0375
0376 init->cap.max_send_wr = qp->sq.max_wr;
0377 init->cap.max_send_sge = qp->sq.max_sge;
0378 init->cap.max_inline_data = qp->sq.max_inline;
0379
0380 if (!qp->srq) {
0381 init->cap.max_recv_wr = qp->rq.max_wr;
0382 init->cap.max_recv_sge = qp->rq.max_sge;
0383 }
0384
0385 init->sq_sig_type = qp->sq_sig_type;
0386
0387 init->qp_type = qp->ibqp.qp_type;
0388 init->port_num = 1;
0389
0390 return 0;
0391 }
0392
0393
0394
0395
0396 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
0397 struct ib_qp_attr *attr, int mask)
0398 {
0399 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
0400 attr->cur_qp_state : qp->attr.qp_state;
0401 enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
0402 attr->qp_state : cur_state;
0403
0404 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
0405 pr_warn("invalid mask or state for qp\n");
0406 goto err1;
0407 }
0408
0409 if (mask & IB_QP_STATE) {
0410 if (cur_state == IB_QPS_SQD) {
0411 if (qp->req.state == QP_STATE_DRAIN &&
0412 new_state != IB_QPS_ERR)
0413 goto err1;
0414 }
0415 }
0416
0417 if (mask & IB_QP_PORT) {
0418 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
0419 pr_warn("invalid port %d\n", attr->port_num);
0420 goto err1;
0421 }
0422 }
0423
0424 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
0425 goto err1;
0426
0427 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
0428 goto err1;
0429
0430 if (mask & IB_QP_ALT_PATH) {
0431 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
0432 goto err1;
0433 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
0434 pr_warn("invalid alt port %d\n", attr->alt_port_num);
0435 goto err1;
0436 }
0437 if (attr->alt_timeout > 31) {
0438 pr_warn("invalid QP alt timeout %d > 31\n",
0439 attr->alt_timeout);
0440 goto err1;
0441 }
0442 }
0443
0444 if (mask & IB_QP_PATH_MTU) {
0445 struct rxe_port *port = &rxe->port;
0446
0447 enum ib_mtu max_mtu = port->attr.max_mtu;
0448 enum ib_mtu mtu = attr->path_mtu;
0449
0450 if (mtu > max_mtu) {
0451 pr_debug("invalid mtu (%d) > (%d)\n",
0452 ib_mtu_enum_to_int(mtu),
0453 ib_mtu_enum_to_int(max_mtu));
0454 goto err1;
0455 }
0456 }
0457
0458 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
0459 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
0460 pr_warn("invalid max_rd_atomic %d > %d\n",
0461 attr->max_rd_atomic,
0462 rxe->attr.max_qp_rd_atom);
0463 goto err1;
0464 }
0465 }
0466
0467 if (mask & IB_QP_TIMEOUT) {
0468 if (attr->timeout > 31) {
0469 pr_warn("invalid QP timeout %d > 31\n",
0470 attr->timeout);
0471 goto err1;
0472 }
0473 }
0474
0475 return 0;
0476
0477 err1:
0478 return -EINVAL;
0479 }
0480
0481
0482 static void rxe_qp_reset(struct rxe_qp *qp)
0483 {
0484
0485 rxe_disable_task(&qp->resp.task);
0486
0487
0488 if (qp->sq.queue) {
0489 if (qp_type(qp) == IB_QPT_RC)
0490 rxe_disable_task(&qp->comp.task);
0491 rxe_disable_task(&qp->req.task);
0492 }
0493
0494
0495 qp->req.state = QP_STATE_RESET;
0496 qp->comp.state = QP_STATE_RESET;
0497 qp->resp.state = QP_STATE_RESET;
0498
0499
0500
0501
0502 __rxe_do_task(&qp->resp.task);
0503
0504 if (qp->sq.queue) {
0505 __rxe_do_task(&qp->comp.task);
0506 __rxe_do_task(&qp->req.task);
0507 rxe_queue_reset(qp->sq.queue);
0508 }
0509
0510
0511 atomic_set(&qp->ssn, 0);
0512 qp->req.opcode = -1;
0513 qp->req.need_retry = 0;
0514 qp->req.wait_for_rnr_timer = 0;
0515 qp->req.noack_pkts = 0;
0516 qp->resp.msn = 0;
0517 qp->resp.opcode = -1;
0518 qp->resp.drop_msg = 0;
0519 qp->resp.goto_error = 0;
0520 qp->resp.sent_psn_nak = 0;
0521
0522 if (qp->resp.mr) {
0523 rxe_put(qp->resp.mr);
0524 qp->resp.mr = NULL;
0525 }
0526
0527 cleanup_rd_atomic_resources(qp);
0528
0529
0530 rxe_enable_task(&qp->resp.task);
0531
0532 if (qp->sq.queue) {
0533 if (qp_type(qp) == IB_QPT_RC)
0534 rxe_enable_task(&qp->comp.task);
0535
0536 rxe_enable_task(&qp->req.task);
0537 }
0538 }
0539
0540
0541 static void rxe_qp_drain(struct rxe_qp *qp)
0542 {
0543 if (qp->sq.queue) {
0544 if (qp->req.state != QP_STATE_DRAINED) {
0545 qp->req.state = QP_STATE_DRAIN;
0546 if (qp_type(qp) == IB_QPT_RC)
0547 rxe_run_task(&qp->comp.task, 1);
0548 else
0549 __rxe_do_task(&qp->comp.task);
0550 rxe_run_task(&qp->req.task, 1);
0551 }
0552 }
0553 }
0554
0555
0556 void rxe_qp_error(struct rxe_qp *qp)
0557 {
0558 qp->req.state = QP_STATE_ERROR;
0559 qp->resp.state = QP_STATE_ERROR;
0560 qp->comp.state = QP_STATE_ERROR;
0561 qp->attr.qp_state = IB_QPS_ERR;
0562
0563
0564 rxe_run_task(&qp->resp.task, 1);
0565
0566 if (qp_type(qp) == IB_QPT_RC)
0567 rxe_run_task(&qp->comp.task, 1);
0568 else
0569 __rxe_do_task(&qp->comp.task);
0570 rxe_run_task(&qp->req.task, 1);
0571 }
0572
0573
0574 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
0575 struct ib_udata *udata)
0576 {
0577 int err;
0578
0579 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
0580 int max_rd_atomic = attr->max_rd_atomic ?
0581 roundup_pow_of_two(attr->max_rd_atomic) : 0;
0582
0583 qp->attr.max_rd_atomic = max_rd_atomic;
0584 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
0585 }
0586
0587 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
0588 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
0589 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
0590
0591 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
0592
0593 free_rd_atomic_resources(qp);
0594
0595 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
0596 if (err)
0597 return err;
0598 }
0599
0600 if (mask & IB_QP_CUR_STATE)
0601 qp->attr.cur_qp_state = attr->qp_state;
0602
0603 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
0604 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
0605
0606 if (mask & IB_QP_ACCESS_FLAGS)
0607 qp->attr.qp_access_flags = attr->qp_access_flags;
0608
0609 if (mask & IB_QP_PKEY_INDEX)
0610 qp->attr.pkey_index = attr->pkey_index;
0611
0612 if (mask & IB_QP_PORT)
0613 qp->attr.port_num = attr->port_num;
0614
0615 if (mask & IB_QP_QKEY)
0616 qp->attr.qkey = attr->qkey;
0617
0618 if (mask & IB_QP_AV)
0619 rxe_init_av(&attr->ah_attr, &qp->pri_av);
0620
0621 if (mask & IB_QP_ALT_PATH) {
0622 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
0623 qp->attr.alt_port_num = attr->alt_port_num;
0624 qp->attr.alt_pkey_index = attr->alt_pkey_index;
0625 qp->attr.alt_timeout = attr->alt_timeout;
0626 }
0627
0628 if (mask & IB_QP_PATH_MTU) {
0629 qp->attr.path_mtu = attr->path_mtu;
0630 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
0631 }
0632
0633 if (mask & IB_QP_TIMEOUT) {
0634 qp->attr.timeout = attr->timeout;
0635 if (attr->timeout == 0) {
0636 qp->qp_timeout_jiffies = 0;
0637 } else {
0638
0639 int j = nsecs_to_jiffies(4096ULL << attr->timeout);
0640
0641 qp->qp_timeout_jiffies = j ? j : 1;
0642 }
0643 }
0644
0645 if (mask & IB_QP_RETRY_CNT) {
0646 qp->attr.retry_cnt = attr->retry_cnt;
0647 qp->comp.retry_cnt = attr->retry_cnt;
0648 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
0649 attr->retry_cnt);
0650 }
0651
0652 if (mask & IB_QP_RNR_RETRY) {
0653 qp->attr.rnr_retry = attr->rnr_retry;
0654 qp->comp.rnr_retry = attr->rnr_retry;
0655 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
0656 attr->rnr_retry);
0657 }
0658
0659 if (mask & IB_QP_RQ_PSN) {
0660 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
0661 qp->resp.psn = qp->attr.rq_psn;
0662 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
0663 qp->resp.psn);
0664 }
0665
0666 if (mask & IB_QP_MIN_RNR_TIMER) {
0667 qp->attr.min_rnr_timer = attr->min_rnr_timer;
0668 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
0669 attr->min_rnr_timer);
0670 }
0671
0672 if (mask & IB_QP_SQ_PSN) {
0673 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
0674 qp->req.psn = qp->attr.sq_psn;
0675 qp->comp.psn = qp->attr.sq_psn;
0676 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
0677 }
0678
0679 if (mask & IB_QP_PATH_MIG_STATE)
0680 qp->attr.path_mig_state = attr->path_mig_state;
0681
0682 if (mask & IB_QP_DEST_QPN)
0683 qp->attr.dest_qp_num = attr->dest_qp_num;
0684
0685 if (mask & IB_QP_STATE) {
0686 qp->attr.qp_state = attr->qp_state;
0687
0688 switch (attr->qp_state) {
0689 case IB_QPS_RESET:
0690 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
0691 rxe_qp_reset(qp);
0692 break;
0693
0694 case IB_QPS_INIT:
0695 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
0696 qp->req.state = QP_STATE_INIT;
0697 qp->resp.state = QP_STATE_INIT;
0698 qp->comp.state = QP_STATE_INIT;
0699 break;
0700
0701 case IB_QPS_RTR:
0702 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
0703 qp->resp.state = QP_STATE_READY;
0704 break;
0705
0706 case IB_QPS_RTS:
0707 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
0708 qp->req.state = QP_STATE_READY;
0709 qp->comp.state = QP_STATE_READY;
0710 break;
0711
0712 case IB_QPS_SQD:
0713 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
0714 rxe_qp_drain(qp);
0715 break;
0716
0717 case IB_QPS_SQE:
0718 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
0719
0720 break;
0721
0722 case IB_QPS_ERR:
0723 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
0724 rxe_qp_error(qp);
0725 break;
0726 }
0727 }
0728
0729 return 0;
0730 }
0731
0732
0733 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
0734 {
0735 *attr = qp->attr;
0736
0737 attr->rq_psn = qp->resp.psn;
0738 attr->sq_psn = qp->req.psn;
0739
0740 attr->cap.max_send_wr = qp->sq.max_wr;
0741 attr->cap.max_send_sge = qp->sq.max_sge;
0742 attr->cap.max_inline_data = qp->sq.max_inline;
0743
0744 if (!qp->srq) {
0745 attr->cap.max_recv_wr = qp->rq.max_wr;
0746 attr->cap.max_recv_sge = qp->rq.max_sge;
0747 }
0748
0749 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
0750 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
0751
0752 if (qp->req.state == QP_STATE_DRAIN) {
0753 attr->sq_draining = 1;
0754
0755
0756
0757
0758 cond_resched();
0759 } else {
0760 attr->sq_draining = 0;
0761 }
0762
0763 pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
0764
0765 return 0;
0766 }
0767
0768 int rxe_qp_chk_destroy(struct rxe_qp *qp)
0769 {
0770
0771
0772
0773
0774 if (atomic_read(&qp->mcg_num)) {
0775 pr_debug("Attempt to destroy QP while attached to multicast group\n");
0776 return -EBUSY;
0777 }
0778
0779 return 0;
0780 }
0781
0782
0783 static void rxe_qp_do_cleanup(struct work_struct *work)
0784 {
0785 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
0786
0787 qp->valid = 0;
0788 qp->qp_timeout_jiffies = 0;
0789 rxe_cleanup_task(&qp->resp.task);
0790
0791 if (qp_type(qp) == IB_QPT_RC) {
0792 del_timer_sync(&qp->retrans_timer);
0793 del_timer_sync(&qp->rnr_nak_timer);
0794 }
0795
0796 rxe_cleanup_task(&qp->req.task);
0797 rxe_cleanup_task(&qp->comp.task);
0798
0799
0800 __rxe_do_task(&qp->req.task);
0801 if (qp->sq.queue) {
0802 __rxe_do_task(&qp->comp.task);
0803 __rxe_do_task(&qp->req.task);
0804 }
0805
0806 if (qp->sq.queue)
0807 rxe_queue_cleanup(qp->sq.queue);
0808
0809 if (qp->srq)
0810 rxe_put(qp->srq);
0811
0812 if (qp->rq.queue)
0813 rxe_queue_cleanup(qp->rq.queue);
0814
0815 if (qp->scq) {
0816 atomic_dec(&qp->scq->num_wq);
0817 rxe_put(qp->scq);
0818 }
0819
0820 if (qp->rcq) {
0821 atomic_dec(&qp->rcq->num_wq);
0822 rxe_put(qp->rcq);
0823 }
0824
0825 if (qp->pd)
0826 rxe_put(qp->pd);
0827
0828 if (qp->resp.mr)
0829 rxe_put(qp->resp.mr);
0830
0831 if (qp_type(qp) == IB_QPT_RC)
0832 sk_dst_reset(qp->sk->sk);
0833
0834 free_rd_atomic_resources(qp);
0835
0836 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
0837 sock_release(qp->sk);
0838 }
0839
0840
0841 void rxe_qp_cleanup(struct rxe_pool_elem *elem)
0842 {
0843 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
0844
0845 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
0846 }