0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <linux/dma-mapping.h>
0033 #include <linux/crc32.h>
0034 #include <linux/iommu.h>
0035 #include <net/ip.h>
0036 #include <net/ipv6.h>
0037 #include <net/udp.h>
0038
0039 #include <rdma/ib_verbs.h>
0040 #include <rdma/ib_user_verbs.h>
0041 #include <rdma/iw_cm.h>
0042 #include <rdma/ib_umem.h>
0043 #include <rdma/ib_addr.h>
0044 #include <rdma/ib_cache.h>
0045
0046 #include <linux/qed/qed_if.h>
0047 #include <linux/qed/qed_rdma_if.h>
0048 #include "qedr.h"
0049 #include "verbs.h"
0050 #include <rdma/qedr-abi.h>
0051 #include "qedr_roce_cm.h"
0052
0053 void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
0054 {
0055 info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
0056 }
0057
0058 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
0059 struct ib_qp_init_attr *attrs)
0060 {
0061 dev->gsi_qp_created = 1;
0062 dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
0063 dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
0064 dev->gsi_qp = qp;
0065 }
0066
0067 static void qedr_ll2_complete_tx_packet(void *cxt, u8 connection_handle,
0068 void *cookie,
0069 dma_addr_t first_frag_addr,
0070 bool b_last_fragment,
0071 bool b_last_packet)
0072 {
0073 struct qedr_dev *dev = (struct qedr_dev *)cxt;
0074 struct qed_roce_ll2_packet *pkt = cookie;
0075 struct qedr_cq *cq = dev->gsi_sqcq;
0076 struct qedr_qp *qp = dev->gsi_qp;
0077 unsigned long flags;
0078
0079 DP_DEBUG(dev, QEDR_MSG_GSI,
0080 "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
0081 dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
0082 cq->ibcq.comp_handler ? "Yes" : "No");
0083
0084 dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
0085 pkt->header.baddr);
0086 kfree(pkt);
0087
0088 spin_lock_irqsave(&qp->q_lock, flags);
0089 qedr_inc_sw_gsi_cons(&qp->sq);
0090 spin_unlock_irqrestore(&qp->q_lock, flags);
0091
0092 if (cq->ibcq.comp_handler)
0093 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
0094 }
0095
0096 static void qedr_ll2_complete_rx_packet(void *cxt,
0097 struct qed_ll2_comp_rx_data *data)
0098 {
0099 struct qedr_dev *dev = (struct qedr_dev *)cxt;
0100 struct qedr_cq *cq = dev->gsi_rqcq;
0101 struct qedr_qp *qp = dev->gsi_qp;
0102 unsigned long flags;
0103
0104 spin_lock_irqsave(&qp->q_lock, flags);
0105
0106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
0107 -EINVAL : 0;
0108 qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
0109
0110 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
0111 data->length.data_length;
0112 *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
0113 ntohl(data->opaque_data_0);
0114 *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
0115 ntohs((u16)data->opaque_data_1);
0116
0117 qedr_inc_sw_gsi_cons(&qp->rq);
0118
0119 spin_unlock_irqrestore(&qp->q_lock, flags);
0120
0121 if (cq->ibcq.comp_handler)
0122 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
0123 }
0124
0125 static void qedr_ll2_release_rx_packet(void *cxt, u8 connection_handle,
0126 void *cookie, dma_addr_t rx_buf_addr,
0127 bool b_last_packet)
0128 {
0129
0130 }
0131
0132 static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
0133 struct ib_qp_init_attr *attrs)
0134 {
0135 struct qed_rdma_destroy_cq_in_params iparams;
0136 struct qed_rdma_destroy_cq_out_params oparams;
0137 struct qedr_cq *cq;
0138
0139 cq = get_qedr_cq(attrs->send_cq);
0140 iparams.icid = cq->icid;
0141 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
0142 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
0143
0144 cq = get_qedr_cq(attrs->recv_cq);
0145
0146 if (iparams.icid != cq->icid) {
0147 iparams.icid = cq->icid;
0148 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
0149 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
0150 }
0151 }
0152
0153 static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
0154 struct ib_qp_init_attr *attrs)
0155 {
0156 if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
0157 DP_ERR(dev,
0158 " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
0159 attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
0160 return -EINVAL;
0161 }
0162
0163 if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
0164 DP_ERR(dev,
0165 " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
0166 attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
0167 return -EINVAL;
0168 }
0169
0170 if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
0171 DP_ERR(dev,
0172 " create gsi qp: failed. max_send_wr is too large %d>%d\n",
0173 attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
0174 return -EINVAL;
0175 }
0176
0177 return 0;
0178 }
0179
0180 static int qedr_ll2_post_tx(struct qedr_dev *dev,
0181 struct qed_roce_ll2_packet *pkt)
0182 {
0183 enum qed_ll2_roce_flavor_type roce_flavor;
0184 struct qed_ll2_tx_pkt_info ll2_tx_pkt;
0185 int rc;
0186 int i;
0187
0188 memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
0189
0190 roce_flavor = (pkt->roce_mode == ROCE_V1) ?
0191 QED_LL2_ROCE : QED_LL2_RROCE;
0192
0193 if (pkt->roce_mode == ROCE_V2_IPV4)
0194 ll2_tx_pkt.enable_ip_cksum = 1;
0195
0196 ll2_tx_pkt.num_of_bds = 1 + pkt->n_seg;
0197 ll2_tx_pkt.vlan = 0;
0198 ll2_tx_pkt.tx_dest = pkt->tx_dest;
0199 ll2_tx_pkt.qed_roce_flavor = roce_flavor;
0200 ll2_tx_pkt.first_frag = pkt->header.baddr;
0201 ll2_tx_pkt.first_frag_len = pkt->header.len;
0202 ll2_tx_pkt.cookie = pkt;
0203
0204
0205 rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx,
0206 dev->gsi_ll2_handle,
0207 &ll2_tx_pkt, 1);
0208 if (rc) {
0209
0210 dma_free_coherent(&dev->pdev->dev, pkt->header.len,
0211 pkt->header.vaddr, pkt->header.baddr);
0212 kfree(pkt);
0213
0214 DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc);
0215 return rc;
0216 }
0217
0218
0219 for (i = 0; i < pkt->n_seg; i++) {
0220 rc = dev->ops->ll2_set_fragment_of_tx_packet(
0221 dev->rdma_ctx,
0222 dev->gsi_ll2_handle,
0223 pkt->payload[i].baddr,
0224 pkt->payload[i].len);
0225
0226 if (rc) {
0227
0228
0229
0230
0231 DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc);
0232 return rc;
0233 }
0234 }
0235
0236 return 0;
0237 }
0238
0239 static int qedr_ll2_stop(struct qedr_dev *dev)
0240 {
0241 int rc;
0242
0243 if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE)
0244 return 0;
0245
0246
0247 rc = dev->ops->ll2_set_mac_filter(dev->cdev,
0248 dev->gsi_ll2_mac_address, NULL);
0249
0250 rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx,
0251 dev->gsi_ll2_handle);
0252 if (rc)
0253 DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc);
0254
0255 dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
0256
0257 dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE;
0258
0259 return rc;
0260 }
0261
0262 static int qedr_ll2_start(struct qedr_dev *dev,
0263 struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
0264 {
0265 struct qed_ll2_acquire_data data;
0266 struct qed_ll2_cbs cbs;
0267 int rc;
0268
0269
0270 cbs.rx_comp_cb = qedr_ll2_complete_rx_packet;
0271 cbs.tx_comp_cb = qedr_ll2_complete_tx_packet;
0272 cbs.rx_release_cb = qedr_ll2_release_rx_packet;
0273 cbs.tx_release_cb = qedr_ll2_complete_tx_packet;
0274 cbs.cookie = dev;
0275
0276 memset(&data, 0, sizeof(data));
0277 data.input.conn_type = QED_LL2_TYPE_ROCE;
0278 data.input.mtu = dev->ndev->mtu;
0279 data.input.rx_num_desc = attrs->cap.max_recv_wr;
0280 data.input.rx_drop_ttl0_flg = true;
0281 data.input.rx_vlan_removal_en = false;
0282 data.input.tx_num_desc = attrs->cap.max_send_wr;
0283 data.input.tx_tc = 0;
0284 data.input.tx_dest = QED_LL2_TX_DEST_NW;
0285 data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET;
0286 data.input.ai_err_no_buf = QED_LL2_DROP_PACKET;
0287 data.input.gsi_enable = 1;
0288 data.p_connection_handle = &dev->gsi_ll2_handle;
0289 data.cbs = &cbs;
0290
0291 rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data);
0292 if (rc) {
0293 DP_ERR(dev,
0294 "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
0295 rc);
0296 return rc;
0297 }
0298
0299 rc = dev->ops->ll2_establish_connection(dev->rdma_ctx,
0300 dev->gsi_ll2_handle);
0301 if (rc) {
0302 DP_ERR(dev,
0303 "ll2 start: failed to establish LL2 connection (rc=%d)\n",
0304 rc);
0305 goto err1;
0306 }
0307
0308 rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr);
0309 if (rc)
0310 goto err2;
0311
0312 return 0;
0313
0314 err2:
0315 dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
0316 err1:
0317 dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
0318
0319 return rc;
0320 }
0321
0322 int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
0323 struct qedr_qp *qp)
0324 {
0325 int rc;
0326
0327 rc = qedr_check_gsi_qp_attrs(dev, attrs);
0328 if (rc)
0329 return rc;
0330
0331 rc = qedr_ll2_start(dev, attrs, qp);
0332 if (rc) {
0333 DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
0334 return rc;
0335 }
0336
0337
0338 qp->ibqp.qp_num = 1;
0339 qp->rq.max_wr = attrs->cap.max_recv_wr;
0340 qp->sq.max_wr = attrs->cap.max_send_wr;
0341
0342 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
0343 GFP_KERNEL);
0344 if (!qp->rqe_wr_id)
0345 goto err;
0346 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
0347 GFP_KERNEL);
0348 if (!qp->wqe_wr_id)
0349 goto err;
0350
0351 qedr_store_gsi_qp_cq(dev, qp, attrs);
0352 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
0353
0354
0355 qedr_destroy_gsi_cq(dev, attrs);
0356 dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
0357 dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
0358
0359 DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
0360
0361 return 0;
0362
0363 err:
0364 kfree(qp->rqe_wr_id);
0365
0366 rc = qedr_ll2_stop(dev);
0367 if (rc)
0368 DP_ERR(dev, "create gsi qp: failed destroy on create\n");
0369
0370 return -ENOMEM;
0371 }
0372
0373 int qedr_destroy_gsi_qp(struct qedr_dev *dev)
0374 {
0375 return qedr_ll2_stop(dev);
0376 }
0377
0378 #define QEDR_MAX_UD_HEADER_SIZE (100)
0379 #define QEDR_GSI_QPN (1)
0380 static inline int qedr_gsi_build_header(struct qedr_dev *dev,
0381 struct qedr_qp *qp,
0382 const struct ib_send_wr *swr,
0383 struct ib_ud_header *udh,
0384 int *roce_mode)
0385 {
0386 bool has_vlan = false, has_grh_ipv6 = true;
0387 struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
0388 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
0389 const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
0390 int send_size = 0;
0391 u16 vlan_id = 0;
0392 u16 ether_type;
0393 int rc;
0394 int ip_ver = 0;
0395
0396 bool has_udp = false;
0397 int i;
0398
0399 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
0400 if (rc)
0401 return rc;
0402
0403 if (vlan_id < VLAN_CFI_MASK)
0404 has_vlan = true;
0405
0406 send_size = 0;
0407 for (i = 0; i < swr->num_sge; ++i)
0408 send_size += swr->sg_list[i].length;
0409
0410 has_udp = (sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
0411 if (!has_udp) {
0412
0413 ether_type = ETH_P_IBOE;
0414 *roce_mode = ROCE_V1;
0415 } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
0416
0417 ip_ver = 4;
0418 ether_type = ETH_P_IP;
0419 has_grh_ipv6 = false;
0420 *roce_mode = ROCE_V2_IPV4;
0421 } else {
0422
0423 ip_ver = 6;
0424 ether_type = ETH_P_IPV6;
0425 *roce_mode = ROCE_V2_IPV6;
0426 }
0427
0428 rc = ib_ud_header_init(send_size, false, true, has_vlan,
0429 has_grh_ipv6, ip_ver, has_udp, 0, udh);
0430 if (rc) {
0431 DP_ERR(dev, "gsi post send: failed to init header\n");
0432 return rc;
0433 }
0434
0435
0436 ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
0437 ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
0438 if (has_vlan) {
0439 udh->eth.type = htons(ETH_P_8021Q);
0440 udh->vlan.tag = htons(vlan_id);
0441 udh->vlan.type = htons(ether_type);
0442 } else {
0443 udh->eth.type = htons(ether_type);
0444 }
0445
0446
0447 udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
0448 udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
0449 udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
0450 udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
0451 udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
0452
0453
0454 udh->deth.qkey = htonl(0x80010000);
0455 udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
0456
0457 if (has_grh_ipv6) {
0458
0459 udh->grh.traffic_class = grh->traffic_class;
0460 udh->grh.flow_label = grh->flow_label;
0461 udh->grh.hop_limit = grh->hop_limit;
0462 udh->grh.destination_gid = grh->dgid;
0463 memcpy(&udh->grh.source_gid.raw, sgid_attr->gid.raw,
0464 sizeof(udh->grh.source_gid.raw));
0465 } else {
0466
0467 u32 ipv4_addr;
0468
0469 udh->ip4.protocol = IPPROTO_UDP;
0470 udh->ip4.tos = htonl(grh->flow_label);
0471 udh->ip4.frag_off = htons(IP_DF);
0472 udh->ip4.ttl = grh->hop_limit;
0473
0474 ipv4_addr = qedr_get_ipv4_from_gid(sgid_attr->gid.raw);
0475 udh->ip4.saddr = ipv4_addr;
0476 ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
0477 udh->ip4.daddr = ipv4_addr;
0478
0479 }
0480
0481
0482 if (has_udp) {
0483 udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
0484 udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
0485 udh->udp.csum = 0;
0486
0487 }
0488 return 0;
0489 }
0490
0491 static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
0492 struct qedr_qp *qp,
0493 const struct ib_send_wr *swr,
0494 struct qed_roce_ll2_packet **p_packet)
0495 {
0496 u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
0497 struct qed_roce_ll2_packet *packet;
0498 struct pci_dev *pdev = dev->pdev;
0499 int roce_mode, header_size;
0500 struct ib_ud_header udh;
0501 int i, rc;
0502
0503 *p_packet = NULL;
0504
0505 rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
0506 if (rc)
0507 return rc;
0508
0509 header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
0510
0511 packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
0512 if (!packet)
0513 return -ENOMEM;
0514
0515 packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
0516 &packet->header.baddr,
0517 GFP_ATOMIC);
0518 if (!packet->header.vaddr) {
0519 kfree(packet);
0520 return -ENOMEM;
0521 }
0522
0523 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
0524 packet->tx_dest = QED_LL2_TX_DEST_LB;
0525 else
0526 packet->tx_dest = QED_LL2_TX_DEST_NW;
0527
0528 packet->roce_mode = roce_mode;
0529 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
0530 packet->header.len = header_size;
0531 packet->n_seg = swr->num_sge;
0532 for (i = 0; i < packet->n_seg; i++) {
0533 packet->payload[i].baddr = swr->sg_list[i].addr;
0534 packet->payload[i].len = swr->sg_list[i].length;
0535 }
0536
0537 *p_packet = packet;
0538
0539 return 0;
0540 }
0541
0542 int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
0543 const struct ib_send_wr **bad_wr)
0544 {
0545 struct qed_roce_ll2_packet *pkt = NULL;
0546 struct qedr_qp *qp = get_qedr_qp(ibqp);
0547 struct qedr_dev *dev = qp->dev;
0548 unsigned long flags;
0549 int rc;
0550
0551 if (qp->state != QED_ROCE_QP_STATE_RTS) {
0552 *bad_wr = wr;
0553 DP_ERR(dev,
0554 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
0555 qp->state);
0556 return -EINVAL;
0557 }
0558
0559 if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
0560 DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
0561 wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
0562 rc = -EINVAL;
0563 goto err;
0564 }
0565
0566 if (wr->opcode != IB_WR_SEND) {
0567 DP_ERR(dev,
0568 "gsi post send: failed due to unsupported opcode %d\n",
0569 wr->opcode);
0570 rc = -EINVAL;
0571 goto err;
0572 }
0573
0574 spin_lock_irqsave(&qp->q_lock, flags);
0575
0576 rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
0577 if (rc) {
0578 spin_unlock_irqrestore(&qp->q_lock, flags);
0579 goto err;
0580 }
0581
0582 rc = qedr_ll2_post_tx(dev, pkt);
0583
0584 if (!rc) {
0585 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
0586 qedr_inc_sw_prod(&qp->sq);
0587 DP_DEBUG(qp->dev, QEDR_MSG_GSI,
0588 "gsi post send: opcode=%d, wr_id=%llx\n", wr->opcode,
0589 wr->wr_id);
0590 } else {
0591 DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
0592 rc = -EAGAIN;
0593 *bad_wr = wr;
0594 }
0595
0596 spin_unlock_irqrestore(&qp->q_lock, flags);
0597
0598 if (wr->next) {
0599 DP_ERR(dev,
0600 "gsi post send: failed second WR. Only one WR may be passed at a time\n");
0601 *bad_wr = wr->next;
0602 rc = -EINVAL;
0603 }
0604
0605 return rc;
0606
0607 err:
0608 *bad_wr = wr;
0609 return rc;
0610 }
0611
0612 int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
0613 const struct ib_recv_wr **bad_wr)
0614 {
0615 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
0616 struct qedr_qp *qp = get_qedr_qp(ibqp);
0617 unsigned long flags;
0618 int rc = 0;
0619
0620 if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
0621 (qp->state != QED_ROCE_QP_STATE_RTS)) {
0622 *bad_wr = wr;
0623 DP_ERR(dev,
0624 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
0625 qp->state);
0626 return -EINVAL;
0627 }
0628
0629 spin_lock_irqsave(&qp->q_lock, flags);
0630
0631 while (wr) {
0632 if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
0633 DP_ERR(dev,
0634 "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
0635 wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
0636 goto err;
0637 }
0638
0639 rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx,
0640 dev->gsi_ll2_handle,
0641 wr->sg_list[0].addr,
0642 wr->sg_list[0].length,
0643 NULL ,
0644 1 );
0645 if (rc) {
0646 DP_ERR(dev,
0647 "gsi post recv: failed to post rx buffer (rc=%d)\n",
0648 rc);
0649 goto err;
0650 }
0651
0652 memset(&qp->rqe_wr_id[qp->rq.prod], 0,
0653 sizeof(qp->rqe_wr_id[qp->rq.prod]));
0654 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
0655 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
0656
0657 qedr_inc_sw_prod(&qp->rq);
0658
0659 wr = wr->next;
0660 }
0661
0662 spin_unlock_irqrestore(&qp->q_lock, flags);
0663
0664 return rc;
0665 err:
0666 spin_unlock_irqrestore(&qp->q_lock, flags);
0667 *bad_wr = wr;
0668 return -ENOMEM;
0669 }
0670
0671 int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
0672 {
0673 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
0674 struct qedr_cq *cq = get_qedr_cq(ibcq);
0675 struct qedr_qp *qp = dev->gsi_qp;
0676 unsigned long flags;
0677 u16 vlan_id;
0678 int i = 0;
0679
0680 spin_lock_irqsave(&cq->cq_lock, flags);
0681
0682 while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
0683 memset(&wc[i], 0, sizeof(*wc));
0684
0685 wc[i].qp = &qp->ibqp;
0686 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
0687 wc[i].opcode = IB_WC_RECV;
0688 wc[i].pkey_index = 0;
0689 wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
0690 IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
0691
0692 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
0693 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
0694 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
0695 wc[i].wc_flags |= IB_WC_WITH_SMAC;
0696
0697 vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
0698 VLAN_VID_MASK;
0699 if (vlan_id) {
0700 wc[i].wc_flags |= IB_WC_WITH_VLAN;
0701 wc[i].vlan_id = vlan_id;
0702 wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
0703 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
0704 }
0705
0706 qedr_inc_sw_cons(&qp->rq);
0707 i++;
0708 }
0709
0710 while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
0711 memset(&wc[i], 0, sizeof(*wc));
0712
0713 wc[i].qp = &qp->ibqp;
0714 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
0715 wc[i].opcode = IB_WC_SEND;
0716 wc[i].status = IB_WC_SUCCESS;
0717
0718 qedr_inc_sw_cons(&qp->sq);
0719 i++;
0720 }
0721
0722 spin_unlock_irqrestore(&cq->cq_lock, flags);
0723
0724 DP_DEBUG(dev, QEDR_MSG_GSI,
0725 "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
0726 num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
0727 qp->sq.gsi_cons, qp->ibqp.qp_num);
0728
0729 return i;
0730 }