Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
0002 /* Copyright 2021 Marvell. All rights reserved. */
0003 
0004 #include <linux/types.h>
0005 #include <asm/byteorder.h>
0006 #include <asm/param.h>
0007 #include <linux/delay.h>
0008 #include <linux/dma-mapping.h>
0009 #include <linux/etherdevice.h>
0010 #include <linux/kernel.h>
0011 #include <linux/log2.h>
0012 #include <linux/module.h>
0013 #include <linux/pci.h>
0014 #include <linux/stddef.h>
0015 #include <linux/string.h>
0016 #include <linux/errno.h>
0017 #include <linux/list.h>
0018 #include <linux/qed/qed_nvmetcp_if.h>
0019 #include "qed.h"
0020 #include "qed_cxt.h"
0021 #include "qed_dev_api.h"
0022 #include "qed_hsi.h"
0023 #include "qed_hw.h"
0024 #include "qed_int.h"
0025 #include "qed_nvmetcp.h"
0026 #include "qed_ll2.h"
0027 #include "qed_mcp.h"
0028 #include "qed_sp.h"
0029 #include "qed_reg_addr.h"
0030 #include "qed_nvmetcp_fw_funcs.h"
0031 
0032 static int qed_nvmetcp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
0033                    u16 echo, union event_ring_data *data,
0034                    u8 fw_return_code)
0035 {
0036     if (p_hwfn->p_nvmetcp_info->event_cb) {
0037         struct qed_nvmetcp_info *p_nvmetcp = p_hwfn->p_nvmetcp_info;
0038 
0039         return p_nvmetcp->event_cb(p_nvmetcp->event_context,
0040                      fw_event_code, data);
0041     } else {
0042         DP_NOTICE(p_hwfn, "nvmetcp async completion is not set\n");
0043 
0044         return -EINVAL;
0045     }
0046 }
0047 
0048 static int qed_sp_nvmetcp_func_start(struct qed_hwfn *p_hwfn,
0049                      enum spq_mode comp_mode,
0050                      struct qed_spq_comp_cb *p_comp_addr,
0051                      void *event_context,
0052                      nvmetcp_event_cb_t async_event_cb)
0053 {
0054     struct nvmetcp_init_ramrod_params *p_ramrod = NULL;
0055     struct qed_nvmetcp_pf_params *p_params = NULL;
0056     struct scsi_init_func_queues *p_queue = NULL;
0057     struct nvmetcp_spe_func_init *p_init = NULL;
0058     struct qed_sp_init_data init_data = {};
0059     struct qed_spq_entry *p_ent = NULL;
0060     int rc = 0;
0061     u16 val;
0062     u8 i;
0063 
0064     /* Get SPQ entry */
0065     init_data.cid = qed_spq_get_cid(p_hwfn);
0066     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0067     init_data.comp_mode = comp_mode;
0068     init_data.p_comp_data = p_comp_addr;
0069     rc = qed_sp_init_request(p_hwfn, &p_ent,
0070                  NVMETCP_RAMROD_CMD_ID_INIT_FUNC,
0071                  PROTOCOLID_TCP_ULP, &init_data);
0072     if (rc)
0073         return rc;
0074 
0075     p_ramrod = &p_ent->ramrod.nvmetcp_init;
0076     p_init = &p_ramrod->nvmetcp_init_spe;
0077     p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
0078     p_queue = &p_init->q_params;
0079     p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
0080     p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
0081     p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
0082     p_init->ll2_rx_queue_id = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
0083                     p_params->ll2_ooo_queue_id;
0084     SET_FIELD(p_init->flags, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE, 1);
0085     p_init->func_params.log_page_size = ilog2(PAGE_SIZE);
0086     p_init->func_params.num_tasks = cpu_to_le16(p_params->num_tasks);
0087     p_init->debug_flags = p_params->debug_mode;
0088     DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
0089                p_params->glbl_q_params_addr);
0090     p_queue->cq_num_entries = cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE);
0091     p_queue->num_queues = p_params->num_queues;
0092     val = RESC_START(p_hwfn, QED_CMDQS_CQS);
0093     p_queue->queue_relative_offset = cpu_to_le16((u16)val);
0094     p_queue->cq_sb_pi = p_params->gl_rq_pi;
0095 
0096     for (i = 0; i < p_params->num_queues; i++) {
0097         val = qed_get_igu_sb_id(p_hwfn, i);
0098         p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
0099     }
0100 
0101     SET_FIELD(p_queue->q_validity,
0102           SCSI_INIT_FUNC_QUEUES_CMD_VALID, 0);
0103     p_queue->cmdq_num_entries = 0;
0104     p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
0105     p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(QED_TCP_TWO_MSL_TIMER);
0106     p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(QED_TCP_SWS_TIMER);
0107     p_init->half_way_close_timeout = cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT);
0108     p_ramrod->tcp_init.max_fin_rt = QED_TCP_MAX_FIN_RT;
0109     SET_FIELD(p_ramrod->nvmetcp_init_spe.params,
0110           NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT, QED_TCP_MAX_FIN_RT);
0111     p_hwfn->p_nvmetcp_info->event_context = event_context;
0112     p_hwfn->p_nvmetcp_info->event_cb = async_event_cb;
0113     qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
0114                   qed_nvmetcp_async_event);
0115 
0116     return qed_spq_post(p_hwfn, p_ent, NULL);
0117 }
0118 
0119 static int qed_sp_nvmetcp_func_stop(struct qed_hwfn *p_hwfn,
0120                     enum spq_mode comp_mode,
0121                     struct qed_spq_comp_cb *p_comp_addr)
0122 {
0123     struct qed_spq_entry *p_ent = NULL;
0124     struct qed_sp_init_data init_data;
0125     int rc;
0126 
0127     /* Get SPQ entry */
0128     memset(&init_data, 0, sizeof(init_data));
0129     init_data.cid = qed_spq_get_cid(p_hwfn);
0130     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0131     init_data.comp_mode = comp_mode;
0132     init_data.p_comp_data = p_comp_addr;
0133     rc = qed_sp_init_request(p_hwfn, &p_ent,
0134                  NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC,
0135                  PROTOCOLID_TCP_ULP, &init_data);
0136     if (rc)
0137         return rc;
0138 
0139     rc = qed_spq_post(p_hwfn, p_ent, NULL);
0140     qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
0141 
0142     return rc;
0143 }
0144 
0145 static int qed_fill_nvmetcp_dev_info(struct qed_dev *cdev,
0146                      struct qed_dev_nvmetcp_info *info)
0147 {
0148     struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
0149     int rc;
0150 
0151     memset(info, 0, sizeof(*info));
0152     rc = qed_fill_dev_info(cdev, &info->common);
0153     info->port_id = MFW_PORT(hwfn);
0154     info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ);
0155 
0156     return rc;
0157 }
0158 
0159 static void qed_register_nvmetcp_ops(struct qed_dev *cdev,
0160                      struct qed_nvmetcp_cb_ops *ops,
0161                      void *cookie)
0162 {
0163     cdev->protocol_ops.nvmetcp = ops;
0164     cdev->ops_cookie = cookie;
0165 }
0166 
0167 static int qed_nvmetcp_stop(struct qed_dev *cdev)
0168 {
0169     int rc;
0170 
0171     if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
0172         DP_NOTICE(cdev, "nvmetcp already stopped\n");
0173 
0174         return 0;
0175     }
0176 
0177     if (!hash_empty(cdev->connections)) {
0178         DP_NOTICE(cdev,
0179               "Can't stop nvmetcp - not all connections were returned\n");
0180 
0181         return -EINVAL;
0182     }
0183 
0184     /* Stop the nvmetcp */
0185     rc = qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
0186                       NULL);
0187     cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
0188 
0189     return rc;
0190 }
0191 
0192 static int qed_nvmetcp_start(struct qed_dev *cdev,
0193                  struct qed_nvmetcp_tid *tasks,
0194                  void *event_context,
0195                  nvmetcp_event_cb_t async_event_cb)
0196 {
0197     struct qed_tid_mem *tid_info;
0198     int rc;
0199 
0200     if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
0201         DP_NOTICE(cdev, "nvmetcp already started;\n");
0202 
0203         return 0;
0204     }
0205 
0206     rc = qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev),
0207                        QED_SPQ_MODE_EBLOCK, NULL,
0208                        event_context, async_event_cb);
0209     if (rc) {
0210         DP_NOTICE(cdev, "Failed to start nvmetcp\n");
0211 
0212         return rc;
0213     }
0214 
0215     cdev->flags |= QED_FLAG_STORAGE_STARTED;
0216     hash_init(cdev->connections);
0217 
0218     if (!tasks)
0219         return 0;
0220 
0221     tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
0222     if (!tid_info) {
0223         qed_nvmetcp_stop(cdev);
0224 
0225         return -ENOMEM;
0226     }
0227 
0228     rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
0229     if (rc) {
0230         DP_NOTICE(cdev, "Failed to gather task information\n");
0231         qed_nvmetcp_stop(cdev);
0232         kfree(tid_info);
0233 
0234         return rc;
0235     }
0236 
0237     /* Fill task information */
0238     tasks->size = tid_info->tid_size;
0239     tasks->num_tids_per_block = tid_info->num_tids_per_block;
0240     memcpy(tasks->blocks, tid_info->blocks,
0241            MAX_TID_BLOCKS_NVMETCP * sizeof(u8 *));
0242     kfree(tid_info);
0243 
0244     return 0;
0245 }
0246 
0247 static struct qed_hash_nvmetcp_con *qed_nvmetcp_get_hash(struct qed_dev *cdev,
0248                              u32 handle)
0249 {
0250     struct qed_hash_nvmetcp_con *hash_con = NULL;
0251 
0252     if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
0253         return NULL;
0254 
0255     hash_for_each_possible(cdev->connections, hash_con, node, handle) {
0256         if (hash_con->con->icid == handle)
0257             break;
0258     }
0259 
0260     if (!hash_con || hash_con->con->icid != handle)
0261         return NULL;
0262 
0263     return hash_con;
0264 }
0265 
0266 static int qed_sp_nvmetcp_conn_offload(struct qed_hwfn *p_hwfn,
0267                        struct qed_nvmetcp_conn *p_conn,
0268                        enum spq_mode comp_mode,
0269                        struct qed_spq_comp_cb *p_comp_addr)
0270 {
0271     struct nvmetcp_spe_conn_offload *p_ramrod = NULL;
0272     struct tcp_offload_params_opt2 *p_tcp = NULL;
0273     struct qed_sp_init_data init_data = { 0 };
0274     struct qed_spq_entry *p_ent = NULL;
0275     dma_addr_t r2tq_pbl_addr;
0276     dma_addr_t xhq_pbl_addr;
0277     dma_addr_t uhq_pbl_addr;
0278     u16 physical_q;
0279     int rc = 0;
0280     u8 i;
0281 
0282     /* Get SPQ entry */
0283     init_data.cid = p_conn->icid;
0284     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0285     init_data.comp_mode = comp_mode;
0286     init_data.p_comp_data = p_comp_addr;
0287     rc = qed_sp_init_request(p_hwfn, &p_ent,
0288                  NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN,
0289                  PROTOCOLID_TCP_ULP, &init_data);
0290     if (rc)
0291         return rc;
0292 
0293     p_ramrod = &p_ent->ramrod.nvmetcp_conn_offload;
0294 
0295     /* Transmission PQ is the first of the PF */
0296     physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
0297     p_conn->physical_q0 = cpu_to_le16(physical_q);
0298     p_ramrod->nvmetcp.physical_q0 = cpu_to_le16(physical_q);
0299 
0300     /* nvmetcp Pure-ACK PQ */
0301     physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
0302     p_conn->physical_q1 = cpu_to_le16(physical_q);
0303     p_ramrod->nvmetcp.physical_q1 = cpu_to_le16(physical_q);
0304     p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
0305     DMA_REGPAIR_LE(p_ramrod->nvmetcp.sq_pbl_addr, p_conn->sq_pbl_addr);
0306     r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
0307     DMA_REGPAIR_LE(p_ramrod->nvmetcp.r2tq_pbl_addr, r2tq_pbl_addr);
0308     xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
0309     DMA_REGPAIR_LE(p_ramrod->nvmetcp.xhq_pbl_addr, xhq_pbl_addr);
0310     uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
0311     DMA_REGPAIR_LE(p_ramrod->nvmetcp.uhq_pbl_addr, uhq_pbl_addr);
0312     p_ramrod->nvmetcp.flags = p_conn->offl_flags;
0313     p_ramrod->nvmetcp.default_cq = p_conn->default_cq;
0314     p_ramrod->nvmetcp.initial_ack = 0;
0315     DMA_REGPAIR_LE(p_ramrod->nvmetcp.nvmetcp.cccid_itid_table_addr,
0316                p_conn->nvmetcp_cccid_itid_table_addr);
0317     p_ramrod->nvmetcp.nvmetcp.cccid_max_range =
0318          cpu_to_le16(p_conn->nvmetcp_cccid_max_range);
0319     p_tcp = &p_ramrod->tcp;
0320     qed_set_fw_mac_addr(&p_tcp->remote_mac_addr_hi,
0321                 &p_tcp->remote_mac_addr_mid,
0322                 &p_tcp->remote_mac_addr_lo, p_conn->remote_mac);
0323     qed_set_fw_mac_addr(&p_tcp->local_mac_addr_hi,
0324                 &p_tcp->local_mac_addr_mid,
0325                 &p_tcp->local_mac_addr_lo, p_conn->local_mac);
0326     p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
0327     p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
0328     p_tcp->ip_version = p_conn->ip_version;
0329     if (p_tcp->ip_version == TCP_IPV6) {
0330         for (i = 0; i < 4; i++) {
0331             p_tcp->remote_ip[i] = cpu_to_le32(p_conn->remote_ip[i]);
0332             p_tcp->local_ip[i] = cpu_to_le32(p_conn->local_ip[i]);
0333         }
0334     } else {
0335         p_tcp->remote_ip[0] = cpu_to_le32(p_conn->remote_ip[0]);
0336         p_tcp->local_ip[0] = cpu_to_le32(p_conn->local_ip[0]);
0337     }
0338 
0339     p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
0340     p_tcp->ttl = p_conn->ttl;
0341     p_tcp->tos_or_tc = p_conn->tos_or_tc;
0342     p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
0343     p_tcp->local_port = cpu_to_le16(p_conn->local_port);
0344     p_tcp->mss = cpu_to_le16(p_conn->mss);
0345     p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
0346     p_tcp->connect_mode = p_conn->connect_mode;
0347     p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
0348     p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
0349     p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
0350     p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
0351     p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
0352 
0353     return qed_spq_post(p_hwfn, p_ent, NULL);
0354 }
0355 
0356 static int qed_sp_nvmetcp_conn_update(struct qed_hwfn *p_hwfn,
0357                       struct qed_nvmetcp_conn *p_conn,
0358                       enum spq_mode comp_mode,
0359                       struct qed_spq_comp_cb *p_comp_addr)
0360 {
0361     struct nvmetcp_conn_update_ramrod_params *p_ramrod = NULL;
0362     struct qed_spq_entry *p_ent = NULL;
0363     struct qed_sp_init_data init_data;
0364     int rc = -EINVAL;
0365     u32 dval;
0366 
0367     /* Get SPQ entry */
0368     memset(&init_data, 0, sizeof(init_data));
0369     init_data.cid = p_conn->icid;
0370     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0371     init_data.comp_mode = comp_mode;
0372     init_data.p_comp_data = p_comp_addr;
0373 
0374     rc = qed_sp_init_request(p_hwfn, &p_ent,
0375                  NVMETCP_RAMROD_CMD_ID_UPDATE_CONN,
0376                  PROTOCOLID_TCP_ULP, &init_data);
0377     if (rc)
0378         return rc;
0379 
0380     p_ramrod = &p_ent->ramrod.nvmetcp_conn_update;
0381     p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
0382     p_ramrod->flags = p_conn->update_flag;
0383     p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
0384     dval = p_conn->max_recv_pdu_length;
0385     p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
0386     dval = p_conn->max_send_pdu_length;
0387     p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
0388     p_ramrod->first_seq_length = cpu_to_le32(p_conn->first_seq_length);
0389 
0390     return qed_spq_post(p_hwfn, p_ent, NULL);
0391 }
0392 
0393 static int qed_sp_nvmetcp_conn_terminate(struct qed_hwfn *p_hwfn,
0394                      struct qed_nvmetcp_conn *p_conn,
0395                      enum spq_mode comp_mode,
0396                      struct qed_spq_comp_cb *p_comp_addr)
0397 {
0398     struct nvmetcp_spe_conn_termination *p_ramrod = NULL;
0399     struct qed_spq_entry *p_ent = NULL;
0400     struct qed_sp_init_data init_data;
0401     int rc = -EINVAL;
0402 
0403     /* Get SPQ entry */
0404     memset(&init_data, 0, sizeof(init_data));
0405     init_data.cid = p_conn->icid;
0406     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0407     init_data.comp_mode = comp_mode;
0408     init_data.p_comp_data = p_comp_addr;
0409     rc = qed_sp_init_request(p_hwfn, &p_ent,
0410                  NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN,
0411                  PROTOCOLID_TCP_ULP, &init_data);
0412     if (rc)
0413         return rc;
0414 
0415     p_ramrod = &p_ent->ramrod.nvmetcp_conn_terminate;
0416     p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
0417     p_ramrod->abortive = p_conn->abortive_dsconnect;
0418 
0419     return qed_spq_post(p_hwfn, p_ent, NULL);
0420 }
0421 
0422 static int qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn *p_hwfn,
0423                     struct qed_nvmetcp_conn *p_conn,
0424                     enum spq_mode comp_mode,
0425                     struct qed_spq_comp_cb *p_comp_addr)
0426 {
0427     struct qed_spq_entry *p_ent = NULL;
0428     struct qed_sp_init_data init_data;
0429     int rc = -EINVAL;
0430 
0431     /* Get SPQ entry */
0432     memset(&init_data, 0, sizeof(init_data));
0433     init_data.cid = p_conn->icid;
0434     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0435     init_data.comp_mode = comp_mode;
0436     init_data.p_comp_data = p_comp_addr;
0437     rc = qed_sp_init_request(p_hwfn, &p_ent,
0438                  NVMETCP_RAMROD_CMD_ID_CLEAR_SQ,
0439                  PROTOCOLID_TCP_ULP, &init_data);
0440     if (rc)
0441         return rc;
0442 
0443     return qed_spq_post(p_hwfn, p_ent, NULL);
0444 }
0445 
0446 static void __iomem *qed_nvmetcp_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
0447 {
0448     return (u8 __iomem *)p_hwfn->doorbells +
0449                  qed_db_addr(cid, DQ_DEMS_LEGACY);
0450 }
0451 
0452 static int qed_nvmetcp_allocate_connection(struct qed_hwfn *p_hwfn,
0453                        struct qed_nvmetcp_conn **p_out_conn)
0454 {
0455     struct qed_chain_init_params params = {
0456         .mode       = QED_CHAIN_MODE_PBL,
0457         .intended_use   = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
0458         .cnt_type   = QED_CHAIN_CNT_TYPE_U16,
0459     };
0460     struct qed_nvmetcp_pf_params *p_params = NULL;
0461     struct qed_nvmetcp_conn *p_conn = NULL;
0462     int rc = 0;
0463 
0464     /* Try finding a free connection that can be used */
0465     spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
0466     if (!list_empty(&p_hwfn->p_nvmetcp_info->free_list))
0467         p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
0468                       struct qed_nvmetcp_conn, list_entry);
0469     if (p_conn) {
0470         list_del(&p_conn->list_entry);
0471         spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
0472         *p_out_conn = p_conn;
0473 
0474         return 0;
0475     }
0476     spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
0477 
0478     /* Need to allocate a new connection */
0479     p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
0480     p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
0481     if (!p_conn)
0482         return -ENOMEM;
0483 
0484     params.num_elems = p_params->num_r2tq_pages_in_ring *
0485                QED_CHAIN_PAGE_SIZE / sizeof(struct nvmetcp_wqe);
0486     params.elem_size = sizeof(struct nvmetcp_wqe);
0487     rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params);
0488     if (rc)
0489         goto nomem_r2tq;
0490 
0491     params.num_elems = p_params->num_uhq_pages_in_ring *
0492                QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
0493     params.elem_size = sizeof(struct iscsi_uhqe);
0494     rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params);
0495     if (rc)
0496         goto nomem_uhq;
0497 
0498     params.elem_size = sizeof(struct iscsi_xhqe);
0499     rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params);
0500     if (rc)
0501         goto nomem;
0502 
0503     p_conn->free_on_delete = true;
0504     *p_out_conn = p_conn;
0505 
0506     return 0;
0507 
0508 nomem:
0509     qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
0510 nomem_uhq:
0511     qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
0512 nomem_r2tq:
0513     kfree(p_conn);
0514 
0515     return -ENOMEM;
0516 }
0517 
0518 static int qed_nvmetcp_acquire_connection(struct qed_hwfn *p_hwfn,
0519                       struct qed_nvmetcp_conn **p_out_conn)
0520 {
0521     struct qed_nvmetcp_conn *p_conn = NULL;
0522     int rc = 0;
0523     u32 icid;
0524 
0525     spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
0526     rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
0527     spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
0528 
0529     if (rc)
0530         return rc;
0531 
0532     rc = qed_nvmetcp_allocate_connection(p_hwfn, &p_conn);
0533     if (rc) {
0534         spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
0535         qed_cxt_release_cid(p_hwfn, icid);
0536         spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
0537 
0538         return rc;
0539     }
0540 
0541     p_conn->icid = icid;
0542     p_conn->conn_id = (u16)icid;
0543     p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
0544     *p_out_conn = p_conn;
0545 
0546     return rc;
0547 }
0548 
0549 static void qed_nvmetcp_release_connection(struct qed_hwfn *p_hwfn,
0550                        struct qed_nvmetcp_conn *p_conn)
0551 {
0552     spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
0553     list_add_tail(&p_conn->list_entry, &p_hwfn->p_nvmetcp_info->free_list);
0554     qed_cxt_release_cid(p_hwfn, p_conn->icid);
0555     spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
0556 }
0557 
0558 static void qed_nvmetcp_free_connection(struct qed_hwfn *p_hwfn,
0559                     struct qed_nvmetcp_conn *p_conn)
0560 {
0561     qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
0562     qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
0563     qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
0564     kfree(p_conn);
0565 }
0566 
0567 int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
0568 {
0569     struct qed_nvmetcp_info *p_nvmetcp_info;
0570 
0571     p_nvmetcp_info = kzalloc(sizeof(*p_nvmetcp_info), GFP_KERNEL);
0572     if (!p_nvmetcp_info)
0573         return -ENOMEM;
0574 
0575     INIT_LIST_HEAD(&p_nvmetcp_info->free_list);
0576     p_hwfn->p_nvmetcp_info = p_nvmetcp_info;
0577 
0578     return 0;
0579 }
0580 
0581 void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn)
0582 {
0583     spin_lock_init(&p_hwfn->p_nvmetcp_info->lock);
0584 }
0585 
0586 void qed_nvmetcp_free(struct qed_hwfn *p_hwfn)
0587 {
0588     struct qed_nvmetcp_conn *p_conn = NULL;
0589 
0590     if (!p_hwfn->p_nvmetcp_info)
0591         return;
0592 
0593     while (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) {
0594         p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
0595                       struct qed_nvmetcp_conn, list_entry);
0596         if (p_conn) {
0597             list_del(&p_conn->list_entry);
0598             qed_nvmetcp_free_connection(p_hwfn, p_conn);
0599         }
0600     }
0601 
0602     kfree(p_hwfn->p_nvmetcp_info);
0603     p_hwfn->p_nvmetcp_info = NULL;
0604 }
0605 
0606 static int qed_nvmetcp_acquire_conn(struct qed_dev *cdev,
0607                     u32 *handle,
0608                     u32 *fw_cid, void __iomem **p_doorbell)
0609 {
0610     struct qed_hash_nvmetcp_con *hash_con;
0611     int rc;
0612 
0613     /* Allocate a hashed connection */
0614     hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
0615     if (!hash_con)
0616         return -ENOMEM;
0617 
0618     /* Acquire the connection */
0619     rc = qed_nvmetcp_acquire_connection(QED_AFFIN_HWFN(cdev),
0620                         &hash_con->con);
0621     if (rc) {
0622         DP_NOTICE(cdev, "Failed to acquire Connection\n");
0623         kfree(hash_con);
0624 
0625         return rc;
0626     }
0627 
0628     /* Added the connection to hash table */
0629     *handle = hash_con->con->icid;
0630     *fw_cid = hash_con->con->fw_cid;
0631     hash_add(cdev->connections, &hash_con->node, *handle);
0632     if (p_doorbell)
0633         *p_doorbell = qed_nvmetcp_get_db_addr(QED_AFFIN_HWFN(cdev),
0634                               *handle);
0635 
0636     return 0;
0637 }
0638 
0639 static int qed_nvmetcp_release_conn(struct qed_dev *cdev, u32 handle)
0640 {
0641     struct qed_hash_nvmetcp_con *hash_con;
0642 
0643     hash_con = qed_nvmetcp_get_hash(cdev, handle);
0644     if (!hash_con) {
0645         DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
0646               handle);
0647 
0648         return -EINVAL;
0649     }
0650 
0651     hlist_del(&hash_con->node);
0652     qed_nvmetcp_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
0653     kfree(hash_con);
0654 
0655     return 0;
0656 }
0657 
0658 static int qed_nvmetcp_offload_conn(struct qed_dev *cdev, u32 handle,
0659                     struct qed_nvmetcp_params_offload *conn_info)
0660 {
0661     struct qed_hash_nvmetcp_con *hash_con;
0662     struct qed_nvmetcp_conn *con;
0663 
0664     hash_con = qed_nvmetcp_get_hash(cdev, handle);
0665     if (!hash_con) {
0666         DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
0667               handle);
0668 
0669         return -EINVAL;
0670     }
0671 
0672     /* Update the connection with information from the params */
0673     con = hash_con->con;
0674 
0675     /* FW initializations */
0676     con->layer_code = NVMETCP_SLOW_PATH_LAYER_CODE;
0677     con->sq_pbl_addr = conn_info->sq_pbl_addr;
0678     con->nvmetcp_cccid_max_range = conn_info->nvmetcp_cccid_max_range;
0679     con->nvmetcp_cccid_itid_table_addr = conn_info->nvmetcp_cccid_itid_table_addr;
0680     con->default_cq = conn_info->default_cq;
0681     SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE, 0);
0682     SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE, 1);
0683     SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B, 1);
0684 
0685     /* Networking and TCP stack initializations */
0686     ether_addr_copy(con->local_mac, conn_info->src.mac);
0687     ether_addr_copy(con->remote_mac, conn_info->dst.mac);
0688     memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
0689     memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
0690     con->local_port = conn_info->src.port;
0691     con->remote_port = conn_info->dst.port;
0692     con->vlan_id = conn_info->vlan_id;
0693 
0694     if (conn_info->timestamp_en)
0695         SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 1);
0696 
0697     if (conn_info->delayed_ack_en)
0698         SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 1);
0699 
0700     if (conn_info->tcp_keep_alive_en)
0701         SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_KA_EN, 1);
0702 
0703     if (conn_info->ecn_en)
0704         SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_ECN_EN, 1);
0705 
0706     con->ip_version = conn_info->ip_version;
0707     con->flow_label = QED_TCP_FLOW_LABEL;
0708     con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
0709     con->ka_timeout = conn_info->ka_timeout;
0710     con->ka_interval = conn_info->ka_interval;
0711     con->max_rt_time = conn_info->max_rt_time;
0712     con->ttl = conn_info->ttl;
0713     con->tos_or_tc = conn_info->tos_or_tc;
0714     con->mss = conn_info->mss;
0715     con->cwnd = conn_info->cwnd;
0716     con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
0717     con->connect_mode = 0;
0718 
0719     return qed_sp_nvmetcp_conn_offload(QED_AFFIN_HWFN(cdev), con,
0720                      QED_SPQ_MODE_EBLOCK, NULL);
0721 }
0722 
0723 static int qed_nvmetcp_update_conn(struct qed_dev *cdev,
0724                    u32 handle,
0725                    struct qed_nvmetcp_params_update *conn_info)
0726 {
0727     struct qed_hash_nvmetcp_con *hash_con;
0728     struct qed_nvmetcp_conn *con;
0729 
0730     hash_con = qed_nvmetcp_get_hash(cdev, handle);
0731     if (!hash_con) {
0732         DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
0733               handle);
0734 
0735         return -EINVAL;
0736     }
0737 
0738     /* Update the connection with information from the params */
0739     con = hash_con->con;
0740     SET_FIELD(con->update_flag,
0741           ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, 0);
0742     SET_FIELD(con->update_flag,
0743           ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, 1);
0744     if (conn_info->hdr_digest_en)
0745         SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, 1);
0746 
0747     if (conn_info->data_digest_en)
0748         SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, 1);
0749 
0750     /* Placeholder - initialize pfv, cpda, hpda */
0751 
0752     con->max_seq_size = conn_info->max_io_size;
0753     con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
0754     con->max_send_pdu_length = conn_info->max_send_pdu_length;
0755     con->first_seq_length = conn_info->max_io_size;
0756 
0757     return qed_sp_nvmetcp_conn_update(QED_AFFIN_HWFN(cdev), con,
0758                     QED_SPQ_MODE_EBLOCK, NULL);
0759 }
0760 
0761 static int qed_nvmetcp_clear_conn_sq(struct qed_dev *cdev, u32 handle)
0762 {
0763     struct qed_hash_nvmetcp_con *hash_con;
0764 
0765     hash_con = qed_nvmetcp_get_hash(cdev, handle);
0766     if (!hash_con) {
0767         DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
0768               handle);
0769 
0770         return -EINVAL;
0771     }
0772 
0773     return qed_sp_nvmetcp_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con,
0774                         QED_SPQ_MODE_EBLOCK, NULL);
0775 }
0776 
0777 static int qed_nvmetcp_destroy_conn(struct qed_dev *cdev,
0778                     u32 handle, u8 abrt_conn)
0779 {
0780     struct qed_hash_nvmetcp_con *hash_con;
0781 
0782     hash_con = qed_nvmetcp_get_hash(cdev, handle);
0783     if (!hash_con) {
0784         DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
0785               handle);
0786 
0787         return -EINVAL;
0788     }
0789 
0790     hash_con->con->abortive_dsconnect = abrt_conn;
0791 
0792     return qed_sp_nvmetcp_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con,
0793                        QED_SPQ_MODE_EBLOCK, NULL);
0794 }
0795 
0796 static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass = {
0797     .common = &qed_common_ops_pass,
0798     .ll2 = &qed_ll2_ops_pass,
0799     .fill_dev_info = &qed_fill_nvmetcp_dev_info,
0800     .register_ops = &qed_register_nvmetcp_ops,
0801     .start = &qed_nvmetcp_start,
0802     .stop = &qed_nvmetcp_stop,
0803     .acquire_conn = &qed_nvmetcp_acquire_conn,
0804     .release_conn = &qed_nvmetcp_release_conn,
0805     .offload_conn = &qed_nvmetcp_offload_conn,
0806     .update_conn = &qed_nvmetcp_update_conn,
0807     .destroy_conn = &qed_nvmetcp_destroy_conn,
0808     .clear_sq = &qed_nvmetcp_clear_conn_sq,
0809     .add_src_tcp_port_filter = &qed_llh_add_src_tcp_port_filter,
0810     .remove_src_tcp_port_filter = &qed_llh_remove_src_tcp_port_filter,
0811     .add_dst_tcp_port_filter = &qed_llh_add_dst_tcp_port_filter,
0812     .remove_dst_tcp_port_filter = &qed_llh_remove_dst_tcp_port_filter,
0813     .clear_all_filters = &qed_llh_clear_all_filters,
0814     .init_read_io = &init_nvmetcp_host_read_task,
0815     .init_write_io = &init_nvmetcp_host_write_task,
0816     .init_icreq_exchange = &init_nvmetcp_init_conn_req_task,
0817     .init_task_cleanup = &init_cleanup_task_nvmetcp
0818 };
0819 
0820 const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void)
0821 {
0822     return &qed_nvmetcp_ops_pass;
0823 }
0824 EXPORT_SYMBOL(qed_get_nvmetcp_ops);
0825 
0826 void qed_put_nvmetcp_ops(void)
0827 {
0828 }
0829 EXPORT_SYMBOL(qed_put_nvmetcp_ops);