0001
0002
0003
0004
0005
0006
0007 #include <linux/types.h>
0008 #include <asm/byteorder.h>
0009 #include <linux/bitops.h>
0010 #include <linux/delay.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/errno.h>
0013 #include <linux/io.h>
0014 #include <linux/kernel.h>
0015 #include <linux/list.h>
0016 #include <linux/module.h>
0017 #include <linux/mutex.h>
0018 #include <linux/pci.h>
0019 #include <linux/slab.h>
0020 #include <linux/spinlock.h>
0021 #include <linux/string.h>
0022 #include <linux/if_vlan.h>
0023 #include "qed.h"
0024 #include "qed_cxt.h"
0025 #include "qed_dcbx.h"
0026 #include "qed_hsi.h"
0027 #include "qed_hw.h"
0028 #include "qed_init_ops.h"
0029 #include "qed_int.h"
0030 #include "qed_ll2.h"
0031 #include "qed_mcp.h"
0032 #include "qed_reg_addr.h"
0033 #include <linux/qed/qed_rdma_if.h>
0034 #include "qed_rdma.h"
0035 #include "qed_roce.h"
0036 #include "qed_sp.h"
0037
0038 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
0039
0040 static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
0041 __le16 echo, union event_ring_data *data,
0042 u8 fw_return_code)
0043 {
0044 struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
0045 union rdma_eqe_data *rdata = &data->rdma_data;
0046
0047 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
0048 u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid);
0049
0050
0051
0052
0053
0054 qed_roce_free_real_icid(p_hwfn, icid);
0055 } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
0056 fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
0057 u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo);
0058
0059 events.affiliated_event(events.context, fw_event_code,
0060 &srq_id);
0061 } else {
0062 events.affiliated_event(events.context, fw_event_code,
0063 (void *)&rdata->async_handle);
0064 }
0065
0066 return 0;
0067 }
0068
0069 void qed_roce_stop(struct qed_hwfn *p_hwfn)
0070 {
0071 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
0072 int wait_count = 0;
0073
0074
0075
0076
0077
0078
0079 while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) {
0080
0081
0082
0083
0084
0085 if (p_hwfn->cdev->recov_in_prog)
0086 return;
0087
0088 msleep(100);
0089 if (wait_count++ > 20) {
0090 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
0091 break;
0092 }
0093 }
0094 }
0095
0096 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
0097 __le32 *dst_gid)
0098 {
0099 u32 i;
0100
0101 if (qp->roce_mode == ROCE_V2_IPV4) {
0102
0103
0104
0105 memset(src_gid, 0, sizeof(union qed_gid));
0106 memset(dst_gid, 0, sizeof(union qed_gid));
0107 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
0108 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
0109 } else {
0110
0111 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
0112 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
0113 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
0114 }
0115 }
0116 }
0117
0118 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
0119 {
0120 switch (roce_mode) {
0121 case ROCE_V1:
0122 return PLAIN_ROCE;
0123 case ROCE_V2_IPV4:
0124 return RROCE_IPV4;
0125 case ROCE_V2_IPV6:
0126 return RROCE_IPV6;
0127 default:
0128 return MAX_ROCE_FLAVOR;
0129 }
0130 }
0131
0132 static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
0133 {
0134 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
0135 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
0136 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
0137 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
0138 }
0139
0140 int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
0141 {
0142 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
0143 u32 responder_icid;
0144 u32 requester_icid;
0145 int rc;
0146
0147 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
0148 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
0149 &responder_icid);
0150 if (rc) {
0151 spin_unlock_bh(&p_rdma_info->lock);
0152 return rc;
0153 }
0154
0155 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
0156 &requester_icid);
0157
0158 spin_unlock_bh(&p_rdma_info->lock);
0159 if (rc)
0160 goto err;
0161
0162
0163 if ((requester_icid - responder_icid) != 1) {
0164 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
0165 rc = -EINVAL;
0166 goto err;
0167 }
0168
0169 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
0170 p_rdma_info->proto);
0171 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
0172 p_rdma_info->proto);
0173
0174
0175
0176
0177 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
0178 if (rc)
0179 goto err;
0180
0181 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
0182 if (rc)
0183 goto err;
0184
0185 *cid = (u16)responder_icid;
0186 return rc;
0187
0188 err:
0189 spin_lock_bh(&p_rdma_info->lock);
0190 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
0191 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
0192
0193 spin_unlock_bh(&p_rdma_info->lock);
0194 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
0195 "Allocate CID - failed, rc = %d\n", rc);
0196 return rc;
0197 }
0198
0199 static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
0200 {
0201 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
0202 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
0203 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
0204 }
0205
0206 static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
0207 {
0208 u8 pri, tc = 0;
0209
0210 if (qp->vlan_id) {
0211 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
0212 tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
0213 }
0214
0215 DP_VERBOSE(p_hwfn, QED_MSG_SP,
0216 "qp icid %u tc: %u (vlan priority %s)\n",
0217 qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
0218
0219 return tc;
0220 }
0221
0222 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
0223 struct qed_rdma_qp *qp)
0224 {
0225 struct roce_create_qp_resp_ramrod_data *p_ramrod;
0226 u16 regular_latency_queue, low_latency_queue;
0227 struct qed_sp_init_data init_data;
0228 struct qed_spq_entry *p_ent;
0229 enum protocol_type proto;
0230 u32 flags = 0;
0231 int rc;
0232 u8 tc;
0233
0234 if (!qp->has_resp)
0235 return 0;
0236
0237 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
0238
0239
0240 qp->irq_num_pages = 1;
0241 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
0242 RDMA_RING_PAGE_SIZE,
0243 &qp->irq_phys_addr, GFP_KERNEL);
0244 if (!qp->irq) {
0245 rc = -ENOMEM;
0246 DP_NOTICE(p_hwfn,
0247 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
0248 rc);
0249 return rc;
0250 }
0251
0252
0253 memset(&init_data, 0, sizeof(init_data));
0254 init_data.cid = qp->icid;
0255 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0256 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
0257
0258 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
0259 PROTOCOLID_ROCE, &init_data);
0260 if (rc)
0261 goto err;
0262
0263 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR,
0264 qed_roce_mode_to_flavor(qp->roce_mode));
0265
0266 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
0267 qp->incoming_rdma_read_en);
0268
0269 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
0270 qp->incoming_rdma_write_en);
0271
0272 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
0273 qp->incoming_atomic_en);
0274
0275 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
0276 qp->e2e_flow_control_en);
0277
0278 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
0279
0280 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
0281 qp->fmr_and_reserved_lkey);
0282
0283 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
0284 qp->min_rnr_nak_timer);
0285
0286 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
0287 qed_rdma_is_xrc_qp(qp));
0288
0289 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
0290 p_ramrod->flags = cpu_to_le32(flags);
0291 p_ramrod->max_ird = qp->max_rd_atomic_resp;
0292 p_ramrod->traffic_class = qp->traffic_class_tos;
0293 p_ramrod->hop_limit = qp->hop_limit_ttl;
0294 p_ramrod->irq_num_pages = qp->irq_num_pages;
0295 p_ramrod->p_key = cpu_to_le16(qp->pkey);
0296 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
0297 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
0298 p_ramrod->mtu = cpu_to_le16(qp->mtu);
0299 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
0300 p_ramrod->pd = cpu_to_le16(qp->pd);
0301 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
0302 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
0303 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
0304 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
0305 p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
0306 p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
0307 p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
0308 p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
0309 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
0310 qp->rq_cq_id);
0311 p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
0312
0313 tc = qed_roce_get_qp_tc(p_hwfn, qp);
0314 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
0315 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
0316 DP_VERBOSE(p_hwfn, QED_MSG_SP,
0317 "qp icid %u pqs: regular_latency %u low_latency %u\n",
0318 qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
0319 low_latency_queue - CM_TX_PQ_BASE);
0320 p_ramrod->regular_latency_phy_queue =
0321 cpu_to_le16(regular_latency_queue);
0322 p_ramrod->low_latency_phy_queue =
0323 cpu_to_le16(low_latency_queue);
0324
0325 p_ramrod->dpi = cpu_to_le16(qp->dpi);
0326
0327 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
0328 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
0329
0330 p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
0331 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
0332 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
0333 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
0334
0335 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
0336 qp->stats_queue;
0337
0338 rc = qed_spq_post(p_hwfn, p_ent, NULL);
0339 if (rc)
0340 goto err;
0341
0342 qp->resp_offloaded = true;
0343 qp->cq_prod = 0;
0344
0345 proto = p_hwfn->p_rdma_info->proto;
0346 qed_roce_set_real_cid(p_hwfn, qp->icid -
0347 qed_cxt_get_proto_cid_start(p_hwfn, proto));
0348
0349 return rc;
0350
0351 err:
0352 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
0353 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
0354 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
0355 qp->irq, qp->irq_phys_addr);
0356
0357 return rc;
0358 }
0359
0360 static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
0361 struct qed_rdma_qp *qp)
0362 {
0363 struct roce_create_qp_req_ramrod_data *p_ramrod;
0364 u16 regular_latency_queue, low_latency_queue;
0365 struct qed_sp_init_data init_data;
0366 struct qed_spq_entry *p_ent;
0367 enum protocol_type proto;
0368 u16 flags = 0;
0369 int rc;
0370 u8 tc;
0371
0372 if (!qp->has_req)
0373 return 0;
0374
0375 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
0376
0377
0378 qp->orq_num_pages = 1;
0379 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
0380 RDMA_RING_PAGE_SIZE,
0381 &qp->orq_phys_addr, GFP_KERNEL);
0382 if (!qp->orq) {
0383 rc = -ENOMEM;
0384 DP_NOTICE(p_hwfn,
0385 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
0386 rc);
0387 return rc;
0388 }
0389
0390
0391 memset(&init_data, 0, sizeof(init_data));
0392 init_data.cid = qp->icid + 1;
0393 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0394 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
0395
0396 rc = qed_sp_init_request(p_hwfn, &p_ent,
0397 ROCE_RAMROD_CREATE_QP,
0398 PROTOCOLID_ROCE, &init_data);
0399 if (rc)
0400 goto err;
0401
0402 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR,
0403 qed_roce_mode_to_flavor(qp->roce_mode));
0404
0405 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
0406 qp->fmr_and_reserved_lkey);
0407
0408 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP,
0409 qp->signal_all);
0410
0411 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
0412 qp->retry_cnt);
0413
0414 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
0415 qp->rnr_retry_cnt);
0416
0417 SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
0418 qed_rdma_is_xrc_qp(qp));
0419
0420 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
0421 p_ramrod->flags = cpu_to_le16(flags);
0422
0423 SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE,
0424 qp->edpm_mode);
0425
0426 p_ramrod->max_ord = qp->max_rd_atomic_req;
0427 p_ramrod->traffic_class = qp->traffic_class_tos;
0428 p_ramrod->hop_limit = qp->hop_limit_ttl;
0429 p_ramrod->orq_num_pages = qp->orq_num_pages;
0430 p_ramrod->p_key = cpu_to_le16(qp->pkey);
0431 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
0432 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
0433 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
0434 p_ramrod->mtu = cpu_to_le16(qp->mtu);
0435 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
0436 p_ramrod->pd = cpu_to_le16(qp->pd);
0437 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
0438 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
0439 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
0440 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
0441 p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
0442 p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
0443 p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
0444 p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
0445 p_ramrod->cq_cid =
0446 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
0447
0448 tc = qed_roce_get_qp_tc(p_hwfn, qp);
0449 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
0450 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
0451 DP_VERBOSE(p_hwfn, QED_MSG_SP,
0452 "qp icid %u pqs: regular_latency %u low_latency %u\n",
0453 qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
0454 low_latency_queue - CM_TX_PQ_BASE);
0455 p_ramrod->regular_latency_phy_queue =
0456 cpu_to_le16(regular_latency_queue);
0457 p_ramrod->low_latency_phy_queue =
0458 cpu_to_le16(low_latency_queue);
0459
0460 p_ramrod->dpi = cpu_to_le16(qp->dpi);
0461
0462 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
0463 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
0464
0465 p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
0466 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
0467 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
0468 qp->stats_queue;
0469
0470 rc = qed_spq_post(p_hwfn, p_ent, NULL);
0471 if (rc)
0472 goto err;
0473
0474 qp->req_offloaded = true;
0475 proto = p_hwfn->p_rdma_info->proto;
0476 qed_roce_set_real_cid(p_hwfn,
0477 qp->icid + 1 -
0478 qed_cxt_get_proto_cid_start(p_hwfn, proto));
0479
0480 return rc;
0481
0482 err:
0483 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
0484 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
0485 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
0486 qp->orq, qp->orq_phys_addr);
0487 return rc;
0488 }
0489
0490 static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
0491 struct qed_rdma_qp *qp,
0492 bool move_to_err, u32 modify_flags)
0493 {
0494 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
0495 struct qed_sp_init_data init_data;
0496 struct qed_spq_entry *p_ent;
0497 u16 flags = 0;
0498 int rc;
0499
0500 if (!qp->has_resp)
0501 return 0;
0502
0503 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
0504
0505 if (move_to_err && !qp->resp_offloaded)
0506 return 0;
0507
0508
0509 memset(&init_data, 0, sizeof(init_data));
0510 init_data.cid = qp->icid;
0511 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0512 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
0513
0514 rc = qed_sp_init_request(p_hwfn, &p_ent,
0515 ROCE_EVENT_MODIFY_QP,
0516 PROTOCOLID_ROCE, &init_data);
0517 if (rc) {
0518 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
0519 return rc;
0520 }
0521
0522 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG,
0523 !!move_to_err);
0524
0525 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
0526 qp->incoming_rdma_read_en);
0527
0528 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
0529 qp->incoming_rdma_write_en);
0530
0531 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
0532 qp->incoming_atomic_en);
0533
0534 SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
0535 qp->e2e_flow_control_en);
0536
0537 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
0538 GET_FIELD(modify_flags,
0539 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
0540
0541 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
0542 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
0543
0544 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
0545 GET_FIELD(modify_flags,
0546 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
0547
0548 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
0549 GET_FIELD(modify_flags,
0550 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
0551
0552 SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
0553 GET_FIELD(modify_flags,
0554 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
0555
0556 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
0557 p_ramrod->flags = cpu_to_le16(flags);
0558
0559 p_ramrod->fields = 0;
0560 SET_FIELD(p_ramrod->fields,
0561 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
0562 qp->min_rnr_nak_timer);
0563
0564 p_ramrod->max_ird = qp->max_rd_atomic_resp;
0565 p_ramrod->traffic_class = qp->traffic_class_tos;
0566 p_ramrod->hop_limit = qp->hop_limit_ttl;
0567 p_ramrod->p_key = cpu_to_le16(qp->pkey);
0568 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
0569 p_ramrod->mtu = cpu_to_le16(qp->mtu);
0570 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
0571 rc = qed_spq_post(p_hwfn, p_ent, NULL);
0572
0573 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
0574 return rc;
0575 }
0576
0577 static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
0578 struct qed_rdma_qp *qp,
0579 bool move_to_sqd,
0580 bool move_to_err, u32 modify_flags)
0581 {
0582 struct roce_modify_qp_req_ramrod_data *p_ramrod;
0583 struct qed_sp_init_data init_data;
0584 struct qed_spq_entry *p_ent;
0585 u16 flags = 0;
0586 int rc;
0587
0588 if (!qp->has_req)
0589 return 0;
0590
0591 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
0592
0593 if (move_to_err && !(qp->req_offloaded))
0594 return 0;
0595
0596
0597 memset(&init_data, 0, sizeof(init_data));
0598 init_data.cid = qp->icid + 1;
0599 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0600 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
0601
0602 rc = qed_sp_init_request(p_hwfn, &p_ent,
0603 ROCE_EVENT_MODIFY_QP,
0604 PROTOCOLID_ROCE, &init_data);
0605 if (rc) {
0606 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
0607 return rc;
0608 }
0609
0610 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG,
0611 !!move_to_err);
0612
0613 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG,
0614 !!move_to_sqd);
0615
0616 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
0617 qp->sqd_async);
0618
0619 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
0620 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
0621
0622 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
0623 GET_FIELD(modify_flags,
0624 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
0625
0626 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
0627 GET_FIELD(modify_flags,
0628 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
0629
0630 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
0631 GET_FIELD(modify_flags,
0632 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
0633
0634 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
0635 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
0636
0637 SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
0638 GET_FIELD(modify_flags,
0639 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
0640
0641 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
0642 p_ramrod->flags = cpu_to_le16(flags);
0643
0644 p_ramrod->fields = 0;
0645 SET_FIELD(p_ramrod->fields,
0646 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
0647 SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
0648 qp->rnr_retry_cnt);
0649
0650 p_ramrod->max_ord = qp->max_rd_atomic_req;
0651 p_ramrod->traffic_class = qp->traffic_class_tos;
0652 p_ramrod->hop_limit = qp->hop_limit_ttl;
0653 p_ramrod->p_key = cpu_to_le16(qp->pkey);
0654 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
0655 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
0656 p_ramrod->mtu = cpu_to_le16(qp->mtu);
0657 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
0658 rc = qed_spq_post(p_hwfn, p_ent, NULL);
0659
0660 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
0661 return rc;
0662 }
0663
0664 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
0665 struct qed_rdma_qp *qp,
0666 u32 *cq_prod)
0667 {
0668 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
0669 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
0670 struct qed_sp_init_data init_data;
0671 struct qed_spq_entry *p_ent;
0672 dma_addr_t ramrod_res_phys;
0673 int rc;
0674
0675 if (!qp->has_resp) {
0676 *cq_prod = 0;
0677 return 0;
0678 }
0679
0680 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
0681 *cq_prod = qp->cq_prod;
0682
0683 if (!qp->resp_offloaded) {
0684
0685
0686
0687 u32 cid;
0688
0689 cid = qp->icid -
0690 qed_cxt_get_proto_cid_start(p_hwfn,
0691 p_hwfn->p_rdma_info->proto);
0692 qed_roce_free_cid_pair(p_hwfn, (u16)cid);
0693
0694 return 0;
0695 }
0696
0697
0698 memset(&init_data, 0, sizeof(init_data));
0699 init_data.cid = qp->icid;
0700 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0701 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
0702
0703 rc = qed_sp_init_request(p_hwfn, &p_ent,
0704 ROCE_RAMROD_DESTROY_QP,
0705 PROTOCOLID_ROCE, &init_data);
0706 if (rc)
0707 return rc;
0708
0709 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
0710
0711 p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
0712 sizeof(*p_ramrod_res),
0713 &ramrod_res_phys, GFP_KERNEL);
0714
0715 if (!p_ramrod_res) {
0716 rc = -ENOMEM;
0717 DP_NOTICE(p_hwfn,
0718 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
0719 rc);
0720 qed_sp_destroy_request(p_hwfn, p_ent);
0721 return rc;
0722 }
0723
0724 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
0725
0726 rc = qed_spq_post(p_hwfn, p_ent, NULL);
0727 if (rc)
0728 goto err;
0729
0730 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
0731 qp->cq_prod = *cq_prod;
0732
0733
0734 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
0735 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
0736 qp->irq, qp->irq_phys_addr);
0737
0738 qp->resp_offloaded = false;
0739
0740 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
0741
0742 err:
0743 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
0744 sizeof(struct roce_destroy_qp_resp_output_params),
0745 p_ramrod_res, ramrod_res_phys);
0746
0747 return rc;
0748 }
0749
0750 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
0751 struct qed_rdma_qp *qp)
0752 {
0753 struct roce_destroy_qp_req_output_params *p_ramrod_res;
0754 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
0755 struct qed_sp_init_data init_data;
0756 struct qed_spq_entry *p_ent;
0757 dma_addr_t ramrod_res_phys;
0758 int rc = -ENOMEM;
0759
0760 if (!qp->has_req)
0761 return 0;
0762
0763 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
0764
0765 if (!qp->req_offloaded)
0766 return 0;
0767
0768 p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
0769 sizeof(*p_ramrod_res),
0770 &ramrod_res_phys, GFP_KERNEL);
0771 if (!p_ramrod_res) {
0772 DP_NOTICE(p_hwfn,
0773 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
0774 return rc;
0775 }
0776
0777
0778 memset(&init_data, 0, sizeof(init_data));
0779 init_data.cid = qp->icid + 1;
0780 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0781 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
0782
0783 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
0784 PROTOCOLID_ROCE, &init_data);
0785 if (rc)
0786 goto err;
0787
0788 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
0789 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
0790
0791 rc = qed_spq_post(p_hwfn, p_ent, NULL);
0792 if (rc)
0793 goto err;
0794
0795
0796 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
0797 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
0798 qp->orq, qp->orq_phys_addr);
0799
0800 qp->req_offloaded = false;
0801
0802 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
0803
0804 err:
0805 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
0806 p_ramrod_res, ramrod_res_phys);
0807
0808 return rc;
0809 }
0810
0811 int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
0812 struct qed_rdma_qp *qp,
0813 struct qed_rdma_query_qp_out_params *out_params)
0814 {
0815 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
0816 struct roce_query_qp_req_output_params *p_req_ramrod_res;
0817 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
0818 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
0819 struct qed_sp_init_data init_data;
0820 dma_addr_t resp_ramrod_res_phys;
0821 dma_addr_t req_ramrod_res_phys;
0822 struct qed_spq_entry *p_ent;
0823 bool rq_err_state;
0824 bool sq_err_state;
0825 bool sq_draining;
0826 int rc = -ENOMEM;
0827
0828 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
0829
0830
0831
0832 out_params->draining = false;
0833 out_params->rq_psn = qp->rq_psn;
0834 out_params->sq_psn = qp->sq_psn;
0835 out_params->state = qp->cur_state;
0836
0837 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
0838 return 0;
0839 }
0840
0841 if (!(qp->resp_offloaded)) {
0842 DP_NOTICE(p_hwfn,
0843 "The responder's qp should be offloaded before requester's\n");
0844 return -EINVAL;
0845 }
0846
0847
0848 p_resp_ramrod_res =
0849 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
0850 sizeof(*p_resp_ramrod_res),
0851 &resp_ramrod_res_phys, GFP_KERNEL);
0852 if (!p_resp_ramrod_res) {
0853 DP_NOTICE(p_hwfn,
0854 "qed query qp failed: cannot allocate memory (ramrod)\n");
0855 return rc;
0856 }
0857
0858
0859 memset(&init_data, 0, sizeof(init_data));
0860 init_data.cid = qp->icid;
0861 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0862 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
0863 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
0864 PROTOCOLID_ROCE, &init_data);
0865 if (rc)
0866 goto err_resp;
0867
0868 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
0869 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
0870
0871 rc = qed_spq_post(p_hwfn, p_ent, NULL);
0872 if (rc)
0873 goto err_resp;
0874
0875 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
0876 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
0877 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
0878
0879 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
0880 p_resp_ramrod_res, resp_ramrod_res_phys);
0881
0882 if (!(qp->req_offloaded)) {
0883
0884 out_params->sq_psn = qp->sq_psn;
0885 out_params->draining = false;
0886
0887 if (rq_err_state)
0888 qp->cur_state = QED_ROCE_QP_STATE_ERR;
0889
0890 out_params->state = qp->cur_state;
0891
0892 return 0;
0893 }
0894
0895
0896 p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
0897 sizeof(*p_req_ramrod_res),
0898 &req_ramrod_res_phys,
0899 GFP_KERNEL);
0900 if (!p_req_ramrod_res) {
0901 rc = -ENOMEM;
0902 DP_NOTICE(p_hwfn,
0903 "qed query qp failed: cannot allocate memory (ramrod)\n");
0904 return rc;
0905 }
0906
0907
0908 init_data.cid = qp->icid + 1;
0909 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
0910 PROTOCOLID_ROCE, &init_data);
0911 if (rc)
0912 goto err_req;
0913
0914 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
0915 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
0916
0917 rc = qed_spq_post(p_hwfn, p_ent, NULL);
0918 if (rc)
0919 goto err_req;
0920
0921 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
0922 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
0923 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
0924 sq_draining =
0925 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
0926 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
0927
0928 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
0929 p_req_ramrod_res, req_ramrod_res_phys);
0930
0931 out_params->draining = false;
0932
0933 if (rq_err_state || sq_err_state)
0934 qp->cur_state = QED_ROCE_QP_STATE_ERR;
0935 else if (sq_draining)
0936 out_params->draining = true;
0937 out_params->state = qp->cur_state;
0938
0939 return 0;
0940
0941 err_req:
0942 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
0943 p_req_ramrod_res, req_ramrod_res_phys);
0944 return rc;
0945 err_resp:
0946 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
0947 p_resp_ramrod_res, resp_ramrod_res_phys);
0948 return rc;
0949 }
0950
0951 int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
0952 {
0953 u32 cq_prod;
0954 int rc;
0955
0956
0957 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
0958 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
0959 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
0960 DP_NOTICE(p_hwfn,
0961 "QP must be in error, reset or init state before destroying it\n");
0962 return -EINVAL;
0963 }
0964
0965 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
0966 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
0967 &cq_prod);
0968 if (rc)
0969 return rc;
0970
0971
0972 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
0973 if (rc)
0974 return rc;
0975 }
0976
0977 return 0;
0978 }
0979
0980 int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
0981 struct qed_rdma_qp *qp,
0982 enum qed_roce_qp_state prev_state,
0983 struct qed_rdma_modify_qp_in_params *params)
0984 {
0985 int rc = 0;
0986
0987
0988
0989
0990 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
0991 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
0992 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
0993
0994 rc = qed_roce_sp_create_responder(p_hwfn, qp);
0995 return rc;
0996 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
0997 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
0998
0999 rc = qed_roce_sp_create_requester(p_hwfn, qp);
1000 if (rc)
1001 return rc;
1002
1003
1004 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1005 params->modify_flags);
1006 return rc;
1007 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1008 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1009
1010 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1011 params->modify_flags);
1012 if (rc)
1013 return rc;
1014
1015 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1016 params->modify_flags);
1017 return rc;
1018 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1019 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1020
1021 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
1022 params->modify_flags);
1023 return rc;
1024 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1025 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1026
1027 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1028 params->modify_flags);
1029 if (rc)
1030 return rc;
1031
1032 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1033 params->modify_flags);
1034 return rc;
1035 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1036 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1037
1038 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1039 params->modify_flags);
1040 if (rc)
1041 return rc;
1042
1043 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1044 params->modify_flags);
1045
1046 return rc;
1047 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
1048
1049 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
1050 params->modify_flags);
1051 if (rc)
1052 return rc;
1053
1054 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
1055 params->modify_flags);
1056 return rc;
1057 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
1058
1059 u32 cq_prod;
1060
1061
1062 rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
1063 qp,
1064 &cq_prod);
1065
1066 if (rc)
1067 return rc;
1068
1069 qp->cq_prod = cq_prod;
1070
1071 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
1072 } else {
1073 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
1074 }
1075
1076 return rc;
1077 }
1078
1079 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
1080 {
1081 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1082 u32 start_cid, cid, xcid;
1083
1084
1085
1086
1087
1088
1089 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
1090 cid = icid - start_cid;
1091 xcid = cid ^ 1;
1092
1093 spin_lock_bh(&p_rdma_info->lock);
1094
1095 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
1096 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
1097 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
1098 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
1099 }
1100
1101 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1102 }
1103
1104 void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1105 {
1106 u8 val;
1107
1108
1109
1110
1111
1112 val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
1113 p_hwfn->dcbx_no_edpm = (u8)val;
1114
1115 qed_rdma_dpm_conf(p_hwfn, p_ptt);
1116 }
1117
1118 int qed_roce_setup(struct qed_hwfn *p_hwfn)
1119 {
1120 return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
1121 qed_roce_async_event);
1122 }
1123
1124 int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1125 {
1126 u32 ll2_ethertype_en;
1127
1128 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
1129
1130 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
1131
1132 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1133 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1134 (ll2_ethertype_en | 0x01));
1135
1136 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
1137 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
1138 return -EINVAL;
1139 }
1140
1141 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
1142 return 0;
1143 }