Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
0002 /* QLogic qed NIC Driver
0003  * Copyright (c) 2015-2017  QLogic Corporation
0004  * Copyright (c) 2019-2020 Marvell International Ltd.
0005  */
0006 
0007 #include <linux/types.h>
0008 #include <asm/byteorder.h>
0009 #include <linux/bitops.h>
0010 #include <linux/errno.h>
0011 #include <linux/kernel.h>
0012 #include <linux/string.h>
0013 #include "qed.h"
0014 #include <linux/qed/qed_chain.h>
0015 #include "qed_cxt.h"
0016 #include "qed_dcbx.h"
0017 #include "qed_hsi.h"
0018 #include "qed_hw.h"
0019 #include "qed_int.h"
0020 #include "qed_reg_addr.h"
0021 #include "qed_sp.h"
0022 #include "qed_sriov.h"
0023 
0024 void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
0025                 struct qed_spq_entry *p_ent)
0026 {
0027     /* qed_spq_get_entry() can either get an entry from the free_pool,
0028      * or, if no entries are left, allocate a new entry and add it to
0029      * the unlimited_pending list.
0030      */
0031     if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
0032         kfree(p_ent);
0033     else
0034         qed_spq_return_entry(p_hwfn, p_ent);
0035 }
0036 
0037 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
0038             struct qed_spq_entry **pp_ent,
0039             u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
0040 {
0041     u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
0042     struct qed_spq_entry *p_ent = NULL;
0043     int rc;
0044 
0045     if (!pp_ent)
0046         return -ENOMEM;
0047 
0048     rc = qed_spq_get_entry(p_hwfn, pp_ent);
0049 
0050     if (rc)
0051         return rc;
0052 
0053     p_ent = *pp_ent;
0054 
0055     p_ent->elem.hdr.cid     = cpu_to_le32(opaque_cid);
0056     p_ent->elem.hdr.cmd_id      = cmd;
0057     p_ent->elem.hdr.protocol_id = protocol;
0058 
0059     p_ent->priority     = QED_SPQ_PRIORITY_NORMAL;
0060     p_ent->comp_mode    = p_data->comp_mode;
0061     p_ent->comp_done.done   = 0;
0062 
0063     switch (p_ent->comp_mode) {
0064     case QED_SPQ_MODE_EBLOCK:
0065         p_ent->comp_cb.cookie = &p_ent->comp_done;
0066         break;
0067 
0068     case QED_SPQ_MODE_BLOCK:
0069         if (!p_data->p_comp_data)
0070             goto err;
0071 
0072         p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
0073         break;
0074 
0075     case QED_SPQ_MODE_CB:
0076         if (!p_data->p_comp_data)
0077             p_ent->comp_cb.function = NULL;
0078         else
0079             p_ent->comp_cb = *p_data->p_comp_data;
0080         break;
0081 
0082     default:
0083         DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
0084               p_ent->comp_mode);
0085         goto err;
0086     }
0087 
0088     DP_VERBOSE(p_hwfn,
0089            QED_MSG_SPQ,
0090            "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n",
0091            opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd),
0092            cmd, qed_get_protocol_type_str(protocol), protocol,
0093            (unsigned long long)(uintptr_t)&p_ent->ramrod,
0094            D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
0095                QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
0096                "MODE_CB"));
0097 
0098     memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
0099 
0100     return 0;
0101 
0102 err:
0103     qed_sp_destroy_request(p_hwfn, p_ent);
0104 
0105     return -EINVAL;
0106 }
0107 
0108 static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
0109 {
0110     switch (type) {
0111     case QED_TUNN_CLSS_MAC_VLAN:
0112         return TUNNEL_CLSS_MAC_VLAN;
0113     case QED_TUNN_CLSS_MAC_VNI:
0114         return TUNNEL_CLSS_MAC_VNI;
0115     case QED_TUNN_CLSS_INNER_MAC_VLAN:
0116         return TUNNEL_CLSS_INNER_MAC_VLAN;
0117     case QED_TUNN_CLSS_INNER_MAC_VNI:
0118         return TUNNEL_CLSS_INNER_MAC_VNI;
0119     case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
0120         return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
0121     default:
0122         return TUNNEL_CLSS_MAC_VLAN;
0123     }
0124 }
0125 
0126 static void
0127 qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
0128                 struct qed_tunnel_info *p_src, bool b_pf_start)
0129 {
0130     if (p_src->vxlan.b_update_mode || b_pf_start)
0131         p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
0132 
0133     if (p_src->l2_gre.b_update_mode || b_pf_start)
0134         p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
0135 
0136     if (p_src->ip_gre.b_update_mode || b_pf_start)
0137         p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
0138 
0139     if (p_src->l2_geneve.b_update_mode || b_pf_start)
0140         p_tun->l2_geneve.b_mode_enabled =
0141             p_src->l2_geneve.b_mode_enabled;
0142 
0143     if (p_src->ip_geneve.b_update_mode || b_pf_start)
0144         p_tun->ip_geneve.b_mode_enabled =
0145             p_src->ip_geneve.b_mode_enabled;
0146 }
0147 
0148 static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
0149                   struct qed_tunnel_info *p_src)
0150 {
0151     int type;
0152 
0153     p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
0154     p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
0155 
0156     type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
0157     p_tun->vxlan.tun_cls = type;
0158     type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
0159     p_tun->l2_gre.tun_cls = type;
0160     type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
0161     p_tun->ip_gre.tun_cls = type;
0162     type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
0163     p_tun->l2_geneve.tun_cls = type;
0164     type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
0165     p_tun->ip_geneve.tun_cls = type;
0166 }
0167 
0168 static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
0169                    struct qed_tunnel_info *p_src)
0170 {
0171     p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
0172     p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
0173 
0174     if (p_src->geneve_port.b_update_port)
0175         p_tun->geneve_port.port = p_src->geneve_port.port;
0176 
0177     if (p_src->vxlan_port.b_update_port)
0178         p_tun->vxlan_port.port = p_src->vxlan_port.port;
0179 }
0180 
0181 static void
0182 __qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
0183                   struct qed_tunn_update_type *tun_type)
0184 {
0185     *p_tunn_cls = tun_type->tun_cls;
0186 }
0187 
0188 static void
0189 qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
0190                 struct qed_tunn_update_type *tun_type,
0191                 u8 *p_update_port,
0192                 __le16 *p_port,
0193                 struct qed_tunn_update_udp_port *p_udp_port)
0194 {
0195     __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
0196     if (p_udp_port->b_update_port) {
0197         *p_update_port = 1;
0198         *p_port = cpu_to_le16(p_udp_port->port);
0199     }
0200 }
0201 
0202 static void
0203 qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
0204                   struct qed_tunnel_info *p_src,
0205                   struct pf_update_tunnel_config *p_tunn_cfg)
0206 {
0207     struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
0208 
0209     qed_set_pf_update_tunn_mode(p_tun, p_src, false);
0210     qed_set_tunn_cls_info(p_tun, p_src);
0211     qed_set_tunn_ports(p_tun, p_src);
0212 
0213     qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
0214                     &p_tun->vxlan,
0215                     &p_tunn_cfg->set_vxlan_udp_port_flg,
0216                     &p_tunn_cfg->vxlan_udp_port,
0217                     &p_tun->vxlan_port);
0218 
0219     qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
0220                     &p_tun->l2_geneve,
0221                     &p_tunn_cfg->set_geneve_udp_port_flg,
0222                     &p_tunn_cfg->geneve_udp_port,
0223                     &p_tun->geneve_port);
0224 
0225     __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
0226                       &p_tun->ip_geneve);
0227 
0228     __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
0229                       &p_tun->l2_gre);
0230 
0231     __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
0232                       &p_tun->ip_gre);
0233 
0234     p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
0235 }
0236 
0237 static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
0238                  struct qed_ptt *p_ptt,
0239                  struct qed_tunnel_info *p_tun)
0240 {
0241     qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
0242                p_tun->ip_gre.b_mode_enabled);
0243     qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
0244 
0245     qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
0246                   p_tun->ip_geneve.b_mode_enabled);
0247 }
0248 
0249 static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
0250                       struct qed_ptt *p_ptt,
0251                       struct qed_tunnel_info *p_tunn)
0252 {
0253     if (p_tunn->vxlan_port.b_update_port)
0254         qed_set_vxlan_dest_port(p_hwfn, p_ptt,
0255                     p_tunn->vxlan_port.port);
0256 
0257     if (p_tunn->geneve_port.b_update_port)
0258         qed_set_geneve_dest_port(p_hwfn, p_ptt,
0259                      p_tunn->geneve_port.port);
0260 
0261     qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
0262 }
0263 
0264 static void
0265 qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
0266                  struct qed_tunnel_info *p_src,
0267                  struct pf_start_tunnel_config *p_tunn_cfg)
0268 {
0269     struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
0270 
0271     if (!p_src)
0272         return;
0273 
0274     qed_set_pf_update_tunn_mode(p_tun, p_src, true);
0275     qed_set_tunn_cls_info(p_tun, p_src);
0276     qed_set_tunn_ports(p_tun, p_src);
0277 
0278     qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
0279                     &p_tun->vxlan,
0280                     &p_tunn_cfg->set_vxlan_udp_port_flg,
0281                     &p_tunn_cfg->vxlan_udp_port,
0282                     &p_tun->vxlan_port);
0283 
0284     qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
0285                     &p_tun->l2_geneve,
0286                     &p_tunn_cfg->set_geneve_udp_port_flg,
0287                     &p_tunn_cfg->geneve_udp_port,
0288                     &p_tun->geneve_port);
0289 
0290     __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
0291                       &p_tun->ip_geneve);
0292 
0293     __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
0294                       &p_tun->l2_gre);
0295 
0296     __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
0297                       &p_tun->ip_gre);
0298 }
0299 
0300 int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
0301             struct qed_ptt *p_ptt,
0302             struct qed_tunnel_info *p_tunn,
0303             bool allow_npar_tx_switch)
0304 {
0305     struct outer_tag_config_struct *outer_tag_config;
0306     struct pf_start_ramrod_data *p_ramrod = NULL;
0307     u16 sb = qed_int_get_sp_sb_id(p_hwfn);
0308     u8 sb_index = p_hwfn->p_eq->eq_sb_index;
0309     struct qed_spq_entry *p_ent = NULL;
0310     struct qed_sp_init_data init_data;
0311     u8 page_cnt, i;
0312     int rc;
0313 
0314     /* update initial eq producer */
0315     qed_eq_prod_update(p_hwfn,
0316                qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
0317 
0318     memset(&init_data, 0, sizeof(init_data));
0319     init_data.cid = qed_spq_get_cid(p_hwfn);
0320     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0321     init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
0322 
0323     rc = qed_sp_init_request(p_hwfn, &p_ent,
0324                  COMMON_RAMROD_PF_START,
0325                  PROTOCOLID_COMMON, &init_data);
0326     if (rc)
0327         return rc;
0328 
0329     p_ramrod = &p_ent->ramrod.pf_start;
0330 
0331     p_ramrod->event_ring_sb_id  = cpu_to_le16(sb);
0332     p_ramrod->event_ring_sb_index   = sb_index;
0333     p_ramrod->path_id       = QED_PATH_ID(p_hwfn);
0334     p_ramrod->dont_log_ramrods  = 0;
0335     p_ramrod->log_type_mask     = cpu_to_le16(0xf);
0336 
0337     if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
0338         p_ramrod->mf_mode = MF_OVLAN;
0339     else
0340         p_ramrod->mf_mode = MF_NPAR;
0341 
0342     outer_tag_config = &p_ramrod->outer_tag_config;
0343     outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan);
0344 
0345     if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) {
0346         outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021Q);
0347     } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
0348         outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021AD);
0349         outer_tag_config->enable_stag_pri_change = 1;
0350     }
0351 
0352     outer_tag_config->pri_map_valid = 1;
0353     for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
0354         outer_tag_config->inner_to_outer_pri_map[i] = i;
0355 
0356     /* enable_stag_pri_change should be set if port is in BD mode or,
0357      * UFP with Host Control mode.
0358      */
0359     if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) {
0360         if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
0361             outer_tag_config->enable_stag_pri_change = 1;
0362         else
0363             outer_tag_config->enable_stag_pri_change = 0;
0364 
0365         outer_tag_config->outer_tag.tci |=
0366             cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
0367     }
0368 
0369     /* Place EQ address in RAMROD */
0370     DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
0371                qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
0372     page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
0373     p_ramrod->event_ring_num_pages = page_cnt;
0374 
0375     /* Place consolidation queue address in ramrod */
0376     DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr,
0377                qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
0378     page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain);
0379     p_ramrod->consolid_q_num_pages = page_cnt;
0380 
0381     qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
0382 
0383     if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits))
0384         p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
0385 
0386     switch (p_hwfn->hw_info.personality) {
0387     case QED_PCI_ETH:
0388         p_ramrod->personality = PERSONALITY_ETH;
0389         break;
0390     case QED_PCI_FCOE:
0391         p_ramrod->personality = PERSONALITY_FCOE;
0392         break;
0393     case QED_PCI_ISCSI:
0394     case QED_PCI_NVMETCP:
0395         p_ramrod->personality = PERSONALITY_TCP_ULP;
0396         break;
0397     case QED_PCI_ETH_ROCE:
0398     case QED_PCI_ETH_IWARP:
0399         p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
0400         break;
0401     default:
0402         DP_NOTICE(p_hwfn, "Unknown personality %d\n",
0403               p_hwfn->hw_info.personality);
0404         p_ramrod->personality = PERSONALITY_ETH;
0405     }
0406 
0407     if (p_hwfn->cdev->p_iov_info) {
0408         struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
0409 
0410         p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
0411         p_ramrod->num_vfs = (u8)p_iov->total_vfs;
0412     }
0413     p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
0414     p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
0415 
0416     DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
0417            "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
0418            sb, sb_index, outer_tag_config->outer_tag.tci);
0419 
0420     rc = qed_spq_post(p_hwfn, p_ent, NULL);
0421 
0422     if (p_tunn)
0423         qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
0424                       &p_hwfn->cdev->tunnel);
0425 
0426     return rc;
0427 }
0428 
0429 int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
0430 {
0431     struct qed_spq_entry *p_ent = NULL;
0432     struct qed_sp_init_data init_data;
0433     int rc;
0434 
0435     /* Get SPQ entry */
0436     memset(&init_data, 0, sizeof(init_data));
0437     init_data.cid = qed_spq_get_cid(p_hwfn);
0438     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0439     init_data.comp_mode = QED_SPQ_MODE_CB;
0440 
0441     rc = qed_sp_init_request(p_hwfn, &p_ent,
0442                  COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
0443                  &init_data);
0444     if (rc)
0445         return rc;
0446 
0447     qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
0448                       &p_ent->ramrod.pf_update);
0449 
0450     return qed_spq_post(p_hwfn, p_ent, NULL);
0451 }
0452 
0453 int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
0454 {
0455     struct qed_spq_entry *p_ent = NULL;
0456     struct qed_sp_init_data init_data;
0457     int rc;
0458 
0459     if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
0460         DP_INFO(p_hwfn, "Invalid priority type %d\n",
0461             p_hwfn->ufp_info.pri_type);
0462         return -EINVAL;
0463     }
0464 
0465     /* Get SPQ entry */
0466     memset(&init_data, 0, sizeof(init_data));
0467     init_data.cid = qed_spq_get_cid(p_hwfn);
0468     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0469     init_data.comp_mode = QED_SPQ_MODE_CB;
0470 
0471     rc = qed_sp_init_request(p_hwfn, &p_ent,
0472                  COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
0473                  &init_data);
0474     if (rc)
0475         return rc;
0476 
0477     p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
0478     if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
0479         p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
0480     else
0481         p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
0482 
0483     return qed_spq_post(p_hwfn, p_ent, NULL);
0484 }
0485 
0486 /* Set pf update ramrod command params */
0487 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
0488                   struct qed_ptt *p_ptt,
0489                   struct qed_tunnel_info *p_tunn,
0490                   enum spq_mode comp_mode,
0491                   struct qed_spq_comp_cb *p_comp_data)
0492 {
0493     struct qed_spq_entry *p_ent = NULL;
0494     struct qed_sp_init_data init_data;
0495     int rc;
0496 
0497     if (IS_VF(p_hwfn->cdev))
0498         return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
0499 
0500     if (!p_tunn)
0501         return -EINVAL;
0502 
0503     /* Get SPQ entry */
0504     memset(&init_data, 0, sizeof(init_data));
0505     init_data.cid = qed_spq_get_cid(p_hwfn);
0506     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0507     init_data.comp_mode = comp_mode;
0508     init_data.p_comp_data = p_comp_data;
0509 
0510     rc = qed_sp_init_request(p_hwfn, &p_ent,
0511                  COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
0512                  &init_data);
0513     if (rc)
0514         return rc;
0515 
0516     qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
0517                       &p_ent->ramrod.pf_update.tunnel_config);
0518 
0519     rc = qed_spq_post(p_hwfn, p_ent, NULL);
0520     if (rc)
0521         return rc;
0522 
0523     qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
0524 
0525     return rc;
0526 }
0527 
0528 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
0529 {
0530     struct qed_spq_entry *p_ent = NULL;
0531     struct qed_sp_init_data init_data;
0532     int rc;
0533 
0534     /* Get SPQ entry */
0535     memset(&init_data, 0, sizeof(init_data));
0536     init_data.cid = qed_spq_get_cid(p_hwfn);
0537     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0538     init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
0539 
0540     rc = qed_sp_init_request(p_hwfn, &p_ent,
0541                  COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
0542                  &init_data);
0543     if (rc)
0544         return rc;
0545 
0546     return qed_spq_post(p_hwfn, p_ent, NULL);
0547 }
0548 
0549 int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
0550 {
0551     struct qed_spq_entry *p_ent = NULL;
0552     struct qed_sp_init_data init_data;
0553     int rc;
0554 
0555     /* Get SPQ entry */
0556     memset(&init_data, 0, sizeof(init_data));
0557     init_data.cid = qed_spq_get_cid(p_hwfn);
0558     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0559     init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
0560 
0561     rc = qed_sp_init_request(p_hwfn, &p_ent,
0562                  COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
0563                  &init_data);
0564     if (rc)
0565         return rc;
0566 
0567     return qed_spq_post(p_hwfn, p_ent, NULL);
0568 }
0569 
0570 int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
0571 {
0572     struct qed_spq_entry *p_ent = NULL;
0573     struct qed_sp_init_data init_data;
0574     int rc;
0575 
0576     /* Get SPQ entry */
0577     memset(&init_data, 0, sizeof(init_data));
0578     init_data.cid = qed_spq_get_cid(p_hwfn);
0579     init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
0580     init_data.comp_mode = QED_SPQ_MODE_CB;
0581 
0582     rc = qed_sp_init_request(p_hwfn, &p_ent,
0583                  COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
0584                  &init_data);
0585     if (rc)
0586         return rc;
0587 
0588     p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
0589     p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
0590     if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
0591         p_ent->ramrod.pf_update.mf_vlan |=
0592             cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
0593 
0594     return qed_spq_post(p_hwfn, p_ent, NULL);
0595 }