0001
0002
0003
0004 #include "ice_common.h"
0005 #include "ice_sched.h"
0006 #include "ice_adminq_cmd.h"
0007 #include "ice_flow.h"
0008
0009 #define ICE_PF_RESET_WAIT_COUNT 300
0010
0011
0012
0013
0014
0015
0016
0017
0018 static int ice_set_mac_type(struct ice_hw *hw)
0019 {
0020 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
0021 return -ENODEV;
0022
0023 switch (hw->device_id) {
0024 case ICE_DEV_ID_E810C_BACKPLANE:
0025 case ICE_DEV_ID_E810C_QSFP:
0026 case ICE_DEV_ID_E810C_SFP:
0027 case ICE_DEV_ID_E810_XXV_BACKPLANE:
0028 case ICE_DEV_ID_E810_XXV_QSFP:
0029 case ICE_DEV_ID_E810_XXV_SFP:
0030 hw->mac_type = ICE_MAC_E810;
0031 break;
0032 case ICE_DEV_ID_E823C_10G_BASE_T:
0033 case ICE_DEV_ID_E823C_BACKPLANE:
0034 case ICE_DEV_ID_E823C_QSFP:
0035 case ICE_DEV_ID_E823C_SFP:
0036 case ICE_DEV_ID_E823C_SGMII:
0037 case ICE_DEV_ID_E822C_10G_BASE_T:
0038 case ICE_DEV_ID_E822C_BACKPLANE:
0039 case ICE_DEV_ID_E822C_QSFP:
0040 case ICE_DEV_ID_E822C_SFP:
0041 case ICE_DEV_ID_E822C_SGMII:
0042 case ICE_DEV_ID_E822L_10G_BASE_T:
0043 case ICE_DEV_ID_E822L_BACKPLANE:
0044 case ICE_DEV_ID_E822L_SFP:
0045 case ICE_DEV_ID_E822L_SGMII:
0046 case ICE_DEV_ID_E823L_10G_BASE_T:
0047 case ICE_DEV_ID_E823L_1GBE:
0048 case ICE_DEV_ID_E823L_BACKPLANE:
0049 case ICE_DEV_ID_E823L_QSFP:
0050 case ICE_DEV_ID_E823L_SFP:
0051 hw->mac_type = ICE_MAC_GENERIC;
0052 break;
0053 default:
0054 hw->mac_type = ICE_MAC_UNKNOWN;
0055 break;
0056 }
0057
0058 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
0059 return 0;
0060 }
0061
0062
0063
0064
0065
0066
0067
0068 bool ice_is_e810(struct ice_hw *hw)
0069 {
0070 return hw->mac_type == ICE_MAC_E810;
0071 }
0072
0073
0074
0075
0076
0077
0078
0079 bool ice_is_e810t(struct ice_hw *hw)
0080 {
0081 switch (hw->device_id) {
0082 case ICE_DEV_ID_E810C_SFP:
0083 if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
0084 hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
0085 return true;
0086 break;
0087 default:
0088 break;
0089 }
0090
0091 return false;
0092 }
0093
0094
0095
0096
0097
0098
0099
0100
0101 int ice_clear_pf_cfg(struct ice_hw *hw)
0102 {
0103 struct ice_aq_desc desc;
0104
0105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
0106
0107 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
0108 }
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 static int
0126 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
0127 struct ice_sq_cd *cd)
0128 {
0129 struct ice_aqc_manage_mac_read_resp *resp;
0130 struct ice_aqc_manage_mac_read *cmd;
0131 struct ice_aq_desc desc;
0132 int status;
0133 u16 flags;
0134 u8 i;
0135
0136 cmd = &desc.params.mac_read;
0137
0138 if (buf_size < sizeof(*resp))
0139 return -EINVAL;
0140
0141 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
0142
0143 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
0144 if (status)
0145 return status;
0146
0147 resp = buf;
0148 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
0149
0150 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
0151 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
0152 return -EIO;
0153 }
0154
0155
0156 for (i = 0; i < cmd->num_addr; i++)
0157 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
0158 ether_addr_copy(hw->port_info->mac.lan_addr,
0159 resp[i].mac_addr);
0160 ether_addr_copy(hw->port_info->mac.perm_addr,
0161 resp[i].mac_addr);
0162 break;
0163 }
0164
0165 return 0;
0166 }
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178 int
0179 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
0180 struct ice_aqc_get_phy_caps_data *pcaps,
0181 struct ice_sq_cd *cd)
0182 {
0183 struct ice_aqc_get_phy_caps *cmd;
0184 u16 pcaps_size = sizeof(*pcaps);
0185 struct ice_aq_desc desc;
0186 struct ice_hw *hw;
0187 int status;
0188
0189 cmd = &desc.params.get_phy;
0190
0191 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
0192 return -EINVAL;
0193 hw = pi->hw;
0194
0195 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
0196 !ice_fw_supports_report_dflt_cfg(hw))
0197 return -EINVAL;
0198
0199 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
0200
0201 if (qual_mods)
0202 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
0203
0204 cmd->param0 |= cpu_to_le16(report_mode);
0205 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
0206
0207 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
0208 report_mode);
0209 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
0210 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
0211 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
0212 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
0213 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
0214 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
0215 pcaps->low_power_ctrl_an);
0216 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
0217 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
0218 pcaps->eeer_value);
0219 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
0220 pcaps->link_fec_options);
0221 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
0222 pcaps->module_compliance_enforcement);
0223 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
0224 pcaps->extended_compliance_code);
0225 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
0226 pcaps->module_type[0]);
0227 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
0228 pcaps->module_type[1]);
0229 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
0230 pcaps->module_type[2]);
0231
0232 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
0233 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
0234 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
0235 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
0236 sizeof(pi->phy.link_info.module_type));
0237 }
0238
0239 return status;
0240 }
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 static int
0255 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
0256 struct ice_sq_cd *cd)
0257 {
0258 struct ice_aqc_get_link_topo *cmd;
0259 struct ice_aq_desc desc;
0260
0261 cmd = &desc.params.get_link_topo;
0262
0263 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
0264
0265 cmd->addr.topo_params.node_type_ctx =
0266 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
0267 ICE_AQC_LINK_TOPO_NODE_CTX_S);
0268
0269
0270 cmd->addr.topo_params.node_type_ctx |=
0271 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
0272
0273 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
0274 }
0275
0276
0277
0278
0279
0280
0281
0282
0283 static bool ice_is_media_cage_present(struct ice_port_info *pi)
0284 {
0285
0286
0287
0288
0289 return !ice_aq_get_link_topo_handle(pi,
0290 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
0291 NULL);
0292 }
0293
0294
0295
0296
0297
0298 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
0299 {
0300 struct ice_link_status *hw_link_info;
0301
0302 if (!pi)
0303 return ICE_MEDIA_UNKNOWN;
0304
0305 hw_link_info = &pi->phy.link_info;
0306 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
0307
0308 return ICE_MEDIA_UNKNOWN;
0309
0310 if (hw_link_info->phy_type_low) {
0311
0312
0313
0314
0315
0316 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
0317 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
0318 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
0319 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
0320 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
0321 return ICE_MEDIA_DA;
0322
0323 switch (hw_link_info->phy_type_low) {
0324 case ICE_PHY_TYPE_LOW_1000BASE_SX:
0325 case ICE_PHY_TYPE_LOW_1000BASE_LX:
0326 case ICE_PHY_TYPE_LOW_10GBASE_SR:
0327 case ICE_PHY_TYPE_LOW_10GBASE_LR:
0328 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
0329 case ICE_PHY_TYPE_LOW_25GBASE_SR:
0330 case ICE_PHY_TYPE_LOW_25GBASE_LR:
0331 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
0332 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
0333 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
0334 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
0335 case ICE_PHY_TYPE_LOW_50GBASE_SR:
0336 case ICE_PHY_TYPE_LOW_50GBASE_FR:
0337 case ICE_PHY_TYPE_LOW_50GBASE_LR:
0338 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
0339 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
0340 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
0341 case ICE_PHY_TYPE_LOW_100GBASE_DR:
0342 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
0343 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
0344 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
0345 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
0346 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
0347 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
0348 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
0349 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
0350 return ICE_MEDIA_FIBER;
0351 case ICE_PHY_TYPE_LOW_100BASE_TX:
0352 case ICE_PHY_TYPE_LOW_1000BASE_T:
0353 case ICE_PHY_TYPE_LOW_2500BASE_T:
0354 case ICE_PHY_TYPE_LOW_5GBASE_T:
0355 case ICE_PHY_TYPE_LOW_10GBASE_T:
0356 case ICE_PHY_TYPE_LOW_25GBASE_T:
0357 return ICE_MEDIA_BASET;
0358 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
0359 case ICE_PHY_TYPE_LOW_25GBASE_CR:
0360 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
0361 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
0362 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
0363 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
0364 case ICE_PHY_TYPE_LOW_50GBASE_CP:
0365 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
0366 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
0367 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
0368 return ICE_MEDIA_DA;
0369 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
0370 case ICE_PHY_TYPE_LOW_40G_XLAUI:
0371 case ICE_PHY_TYPE_LOW_50G_LAUI2:
0372 case ICE_PHY_TYPE_LOW_50G_AUI2:
0373 case ICE_PHY_TYPE_LOW_50G_AUI1:
0374 case ICE_PHY_TYPE_LOW_100G_AUI4:
0375 case ICE_PHY_TYPE_LOW_100G_CAUI4:
0376 if (ice_is_media_cage_present(pi))
0377 return ICE_MEDIA_DA;
0378 fallthrough;
0379 case ICE_PHY_TYPE_LOW_1000BASE_KX:
0380 case ICE_PHY_TYPE_LOW_2500BASE_KX:
0381 case ICE_PHY_TYPE_LOW_2500BASE_X:
0382 case ICE_PHY_TYPE_LOW_5GBASE_KR:
0383 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
0384 case ICE_PHY_TYPE_LOW_25GBASE_KR:
0385 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
0386 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
0387 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
0388 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
0389 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
0390 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
0391 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
0392 return ICE_MEDIA_BACKPLANE;
0393 }
0394 } else {
0395 switch (hw_link_info->phy_type_high) {
0396 case ICE_PHY_TYPE_HIGH_100G_AUI2:
0397 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
0398 if (ice_is_media_cage_present(pi))
0399 return ICE_MEDIA_DA;
0400 fallthrough;
0401 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
0402 return ICE_MEDIA_BACKPLANE;
0403 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
0404 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
0405 return ICE_MEDIA_FIBER;
0406 }
0407 }
0408 return ICE_MEDIA_UNKNOWN;
0409 }
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420 int
0421 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
0422 struct ice_link_status *link, struct ice_sq_cd *cd)
0423 {
0424 struct ice_aqc_get_link_status_data link_data = { 0 };
0425 struct ice_aqc_get_link_status *resp;
0426 struct ice_link_status *li_old, *li;
0427 enum ice_media_type *hw_media_type;
0428 struct ice_fc_info *hw_fc_info;
0429 bool tx_pause, rx_pause;
0430 struct ice_aq_desc desc;
0431 struct ice_hw *hw;
0432 u16 cmd_flags;
0433 int status;
0434
0435 if (!pi)
0436 return -EINVAL;
0437 hw = pi->hw;
0438 li_old = &pi->phy.link_info_old;
0439 hw_media_type = &pi->phy.media_type;
0440 li = &pi->phy.link_info;
0441 hw_fc_info = &pi->fc;
0442
0443 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
0444 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
0445 resp = &desc.params.get_link_status;
0446 resp->cmd_flags = cpu_to_le16(cmd_flags);
0447 resp->lport_num = pi->lport;
0448
0449 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
0450
0451 if (status)
0452 return status;
0453
0454
0455 *li_old = *li;
0456
0457
0458 li->link_speed = le16_to_cpu(link_data.link_speed);
0459 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
0460 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
0461 *hw_media_type = ice_get_media_type(pi);
0462 li->link_info = link_data.link_info;
0463 li->link_cfg_err = link_data.link_cfg_err;
0464 li->an_info = link_data.an_info;
0465 li->ext_info = link_data.ext_info;
0466 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
0467 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
0468 li->topo_media_conflict = link_data.topo_media_conflict;
0469 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
0470 ICE_AQ_CFG_PACING_TYPE_M);
0471
0472
0473 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
0474 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
0475 if (tx_pause && rx_pause)
0476 hw_fc_info->current_mode = ICE_FC_FULL;
0477 else if (tx_pause)
0478 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
0479 else if (rx_pause)
0480 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
0481 else
0482 hw_fc_info->current_mode = ICE_FC_NONE;
0483
0484 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
0485
0486 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
0487 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
0488 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
0489 (unsigned long long)li->phy_type_low);
0490 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
0491 (unsigned long long)li->phy_type_high);
0492 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
0493 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
0494 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
0495 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
0496 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
0497 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
0498 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
0499 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
0500 li->max_frame_size);
0501 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
0502
0503
0504 if (link)
0505 *link = *li;
0506
0507
0508 pi->phy.get_link_info = false;
0509
0510 return 0;
0511 }
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521 static void
0522 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
0523 struct ice_aqc_set_mac_cfg *cmd)
0524 {
0525 u16 fc_thres_val, tx_timer_val;
0526 u32 val;
0527
0528
0529
0530
0531
0532
0533
0534
0535 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
0536
0537
0538 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
0539 tx_timer_val = val &
0540 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
0541 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
0542
0543
0544 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
0545 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
0546
0547 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
0548 }
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558 int
0559 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
0560 {
0561 struct ice_aqc_set_mac_cfg *cmd;
0562 struct ice_aq_desc desc;
0563
0564 cmd = &desc.params.set_mac_cfg;
0565
0566 if (max_frame_size == 0)
0567 return -EINVAL;
0568
0569 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
0570
0571 cmd->max_frame_size = cpu_to_le16(max_frame_size);
0572
0573 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
0574
0575 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
0576 }
0577
0578
0579
0580
0581
0582 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
0583 {
0584 struct ice_switch_info *sw;
0585 int status;
0586
0587 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
0588 sizeof(*hw->switch_info), GFP_KERNEL);
0589 sw = hw->switch_info;
0590
0591 if (!sw)
0592 return -ENOMEM;
0593
0594 INIT_LIST_HEAD(&sw->vsi_list_map_head);
0595 sw->prof_res_bm_init = 0;
0596
0597 status = ice_init_def_sw_recp(hw);
0598 if (status) {
0599 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
0600 return status;
0601 }
0602 return 0;
0603 }
0604
0605
0606
0607
0608
0609 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
0610 {
0611 struct ice_switch_info *sw = hw->switch_info;
0612 struct ice_vsi_list_map_info *v_pos_map;
0613 struct ice_vsi_list_map_info *v_tmp_map;
0614 struct ice_sw_recipe *recps;
0615 u8 i;
0616
0617 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
0618 list_entry) {
0619 list_del(&v_pos_map->list_entry);
0620 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
0621 }
0622 recps = sw->recp_list;
0623 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
0624 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
0625
0626 recps[i].root_rid = i;
0627 list_for_each_entry_safe(rg_entry, tmprg_entry,
0628 &recps[i].rg_list, l_entry) {
0629 list_del(&rg_entry->l_entry);
0630 devm_kfree(ice_hw_to_dev(hw), rg_entry);
0631 }
0632
0633 if (recps[i].adv_rule) {
0634 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
0635 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
0636
0637 mutex_destroy(&recps[i].filt_rule_lock);
0638 list_for_each_entry_safe(lst_itr, tmp_entry,
0639 &recps[i].filt_rules,
0640 list_entry) {
0641 list_del(&lst_itr->list_entry);
0642 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
0643 devm_kfree(ice_hw_to_dev(hw), lst_itr);
0644 }
0645 } else {
0646 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
0647
0648 mutex_destroy(&recps[i].filt_rule_lock);
0649 list_for_each_entry_safe(lst_itr, tmp_entry,
0650 &recps[i].filt_rules,
0651 list_entry) {
0652 list_del(&lst_itr->list_entry);
0653 devm_kfree(ice_hw_to_dev(hw), lst_itr);
0654 }
0655 }
0656 if (recps[i].root_buf)
0657 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
0658 }
0659 ice_rm_all_sw_replay_rule_info(hw);
0660 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
0661 devm_kfree(ice_hw_to_dev(hw), sw);
0662 }
0663
0664
0665
0666
0667
0668 static int ice_get_fw_log_cfg(struct ice_hw *hw)
0669 {
0670 struct ice_aq_desc desc;
0671 __le16 *config;
0672 int status;
0673 u16 size;
0674
0675 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
0676 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
0677 if (!config)
0678 return -ENOMEM;
0679
0680 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
0681
0682 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
0683 if (!status) {
0684 u16 i;
0685
0686
0687 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
0688 u16 v, m, flgs;
0689
0690 v = le16_to_cpu(config[i]);
0691 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
0692 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
0693
0694 if (m < ICE_AQC_FW_LOG_ID_MAX)
0695 hw->fw_log.evnts[m].cur = flgs;
0696 }
0697 }
0698
0699 devm_kfree(ice_hw_to_dev(hw), config);
0700
0701 return status;
0702 }
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740 static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
0741 {
0742 struct ice_aqc_fw_logging *cmd;
0743 u16 i, chgs = 0, len = 0;
0744 struct ice_aq_desc desc;
0745 __le16 *data = NULL;
0746 u8 actv_evnts = 0;
0747 void *buf = NULL;
0748 int status = 0;
0749
0750 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
0751 return 0;
0752
0753
0754 if (!enable &&
0755 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
0756 return 0;
0757
0758
0759 status = ice_get_fw_log_cfg(hw);
0760 if (status)
0761 return status;
0762
0763 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
0764 cmd = &desc.params.fw_logging;
0765
0766
0767 if (hw->fw_log.cq_en)
0768 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
0769
0770 if (hw->fw_log.uart_en)
0771 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
0772
0773 if (enable) {
0774
0775
0776
0777 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
0778 u16 val;
0779
0780
0781 actv_evnts |= hw->fw_log.evnts[i].cfg;
0782
0783 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
0784 continue;
0785
0786 if (!data) {
0787 data = devm_kcalloc(ice_hw_to_dev(hw),
0788 ICE_AQC_FW_LOG_ID_MAX,
0789 sizeof(*data),
0790 GFP_KERNEL);
0791 if (!data)
0792 return -ENOMEM;
0793 }
0794
0795 val = i << ICE_AQC_FW_LOG_ID_S;
0796 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
0797 data[chgs++] = cpu_to_le16(val);
0798 }
0799
0800
0801
0802
0803
0804 if (actv_evnts) {
0805
0806 if (!chgs)
0807 goto out;
0808
0809 if (hw->fw_log.cq_en)
0810 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
0811
0812 if (hw->fw_log.uart_en)
0813 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
0814
0815 buf = data;
0816 len = sizeof(*data) * chgs;
0817 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
0818 }
0819 }
0820
0821 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
0822 if (!status) {
0823
0824
0825
0826
0827
0828
0829 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
0830
0831 hw->fw_log.actv_evnts = actv_evnts;
0832 for (i = 0; i < cnt; i++) {
0833 u16 v, m;
0834
0835 if (!enable) {
0836
0837
0838
0839
0840
0841
0842 hw->fw_log.evnts[i].cur = 0;
0843 continue;
0844 }
0845
0846 v = le16_to_cpu(data[i]);
0847 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
0848 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
0849 }
0850 }
0851
0852 out:
0853 if (data)
0854 devm_kfree(ice_hw_to_dev(hw), data);
0855
0856 return status;
0857 }
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
0868 {
0869 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
0870 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
0871 le16_to_cpu(desc->datalen));
0872 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
0873 }
0874
0875
0876
0877
0878
0879
0880
0881
0882 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
0883 {
0884 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
0885 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
0886 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
0887
0888 switch (max_agg_bw) {
0889 case ICE_MAX_AGG_BW_200G:
0890 case ICE_MAX_AGG_BW_100G:
0891 case ICE_MAX_AGG_BW_50G:
0892 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
0893 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
0894 break;
0895 case ICE_MAX_AGG_BW_25G:
0896 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
0897 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
0898 break;
0899 }
0900 }
0901
0902
0903
0904
0905
0906 int ice_init_hw(struct ice_hw *hw)
0907 {
0908 struct ice_aqc_get_phy_caps_data *pcaps;
0909 u16 mac_buf_len;
0910 void *mac_buf;
0911 int status;
0912
0913
0914 status = ice_set_mac_type(hw);
0915 if (status)
0916 return status;
0917
0918 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
0919 PF_FUNC_RID_FUNC_NUM_M) >>
0920 PF_FUNC_RID_FUNC_NUM_S;
0921
0922 status = ice_reset(hw, ICE_RESET_PFR);
0923 if (status)
0924 return status;
0925
0926 ice_get_itr_intrl_gran(hw);
0927
0928 status = ice_create_all_ctrlq(hw);
0929 if (status)
0930 goto err_unroll_cqinit;
0931
0932
0933 status = ice_cfg_fw_log(hw, true);
0934 if (status)
0935 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
0936
0937 status = ice_clear_pf_cfg(hw);
0938 if (status)
0939 goto err_unroll_cqinit;
0940
0941
0942 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
0943 INIT_LIST_HEAD(&hw->fdir_list_head);
0944
0945 ice_clear_pxe_mode(hw);
0946
0947 status = ice_init_nvm(hw);
0948 if (status)
0949 goto err_unroll_cqinit;
0950
0951 status = ice_get_caps(hw);
0952 if (status)
0953 goto err_unroll_cqinit;
0954
0955 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
0956 sizeof(*hw->port_info), GFP_KERNEL);
0957 if (!hw->port_info) {
0958 status = -ENOMEM;
0959 goto err_unroll_cqinit;
0960 }
0961
0962
0963 hw->port_info->hw = hw;
0964
0965
0966 status = ice_get_initial_sw_cfg(hw);
0967 if (status)
0968 goto err_unroll_alloc;
0969
0970 hw->evb_veb = true;
0971
0972
0973 status = ice_sched_query_res_alloc(hw);
0974 if (status) {
0975 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
0976 goto err_unroll_alloc;
0977 }
0978 ice_sched_get_psm_clk_freq(hw);
0979
0980
0981 status = ice_sched_init_port(hw->port_info);
0982 if (status)
0983 goto err_unroll_sched;
0984
0985 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
0986 if (!pcaps) {
0987 status = -ENOMEM;
0988 goto err_unroll_sched;
0989 }
0990
0991
0992 status = ice_aq_get_phy_caps(hw->port_info, false,
0993 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
0994 NULL);
0995 devm_kfree(ice_hw_to_dev(hw), pcaps);
0996 if (status)
0997 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
0998 status);
0999
1000
1001 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1002 if (status)
1003 goto err_unroll_sched;
1004
1005
1006 if (!hw->sw_entry_point_layer) {
1007 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1008 status = -EIO;
1009 goto err_unroll_sched;
1010 }
1011 INIT_LIST_HEAD(&hw->agg_list);
1012
1013 if (!hw->max_burst_size)
1014 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1015
1016 status = ice_init_fltr_mgmt_struct(hw);
1017 if (status)
1018 goto err_unroll_sched;
1019
1020
1021
1022 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
1023 sizeof(struct ice_aqc_manage_mac_read_resp),
1024 GFP_KERNEL);
1025 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1026
1027 if (!mac_buf) {
1028 status = -ENOMEM;
1029 goto err_unroll_fltr_mgmt_struct;
1030 }
1031
1032 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1033 devm_kfree(ice_hw_to_dev(hw), mac_buf);
1034
1035 if (status)
1036 goto err_unroll_fltr_mgmt_struct;
1037
1038 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1039 if (status)
1040 goto err_unroll_fltr_mgmt_struct;
1041
1042 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1043 if (status)
1044 goto err_unroll_fltr_mgmt_struct;
1045 status = ice_init_hw_tbls(hw);
1046 if (status)
1047 goto err_unroll_fltr_mgmt_struct;
1048 mutex_init(&hw->tnl_lock);
1049 return 0;
1050
1051 err_unroll_fltr_mgmt_struct:
1052 ice_cleanup_fltr_mgmt_struct(hw);
1053 err_unroll_sched:
1054 ice_sched_cleanup_all(hw);
1055 err_unroll_alloc:
1056 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1057 err_unroll_cqinit:
1058 ice_destroy_all_ctrlq(hw);
1059 return status;
1060 }
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 void ice_deinit_hw(struct ice_hw *hw)
1071 {
1072 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1073 ice_cleanup_fltr_mgmt_struct(hw);
1074
1075 ice_sched_cleanup_all(hw);
1076 ice_sched_clear_agg(hw);
1077 ice_free_seg(hw);
1078 ice_free_hw_tbls(hw);
1079 mutex_destroy(&hw->tnl_lock);
1080
1081 if (hw->port_info) {
1082 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1083 hw->port_info = NULL;
1084 }
1085
1086
1087 ice_cfg_fw_log(hw, false);
1088 ice_destroy_all_ctrlq(hw);
1089
1090
1091 ice_clear_all_vsi_ctx(hw);
1092 }
1093
1094
1095
1096
1097
1098 int ice_check_reset(struct ice_hw *hw)
1099 {
1100 u32 cnt, reg = 0, grst_timeout, uld_mask;
1101
1102
1103
1104
1105
1106 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1107 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1108
1109 for (cnt = 0; cnt < grst_timeout; cnt++) {
1110 mdelay(100);
1111 reg = rd32(hw, GLGEN_RSTAT);
1112 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1113 break;
1114 }
1115
1116 if (cnt == grst_timeout) {
1117 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1118 return -EIO;
1119 }
1120
1121 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1122 GLNVM_ULD_PCIER_DONE_1_M |\
1123 GLNVM_ULD_CORER_DONE_M |\
1124 GLNVM_ULD_GLOBR_DONE_M |\
1125 GLNVM_ULD_POR_DONE_M |\
1126 GLNVM_ULD_POR_DONE_1_M |\
1127 GLNVM_ULD_PCIER_DONE_2_M)
1128
1129 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1130 GLNVM_ULD_PE_DONE_M : 0);
1131
1132
1133 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1134 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1135 if (reg == uld_mask) {
1136 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1137 break;
1138 }
1139 mdelay(10);
1140 }
1141
1142 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1143 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1144 reg);
1145 return -EIO;
1146 }
1147
1148 return 0;
1149 }
1150
1151
1152
1153
1154
1155
1156
1157
1158 static int ice_pf_reset(struct ice_hw *hw)
1159 {
1160 u32 cnt, reg;
1161
1162
1163
1164
1165
1166
1167 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1168 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1169
1170 if (ice_check_reset(hw))
1171 return -EIO;
1172
1173 return 0;
1174 }
1175
1176
1177 reg = rd32(hw, PFGEN_CTRL);
1178
1179 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1180
1181
1182
1183
1184
1185 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1186 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1187 reg = rd32(hw, PFGEN_CTRL);
1188 if (!(reg & PFGEN_CTRL_PFSWR_M))
1189 break;
1190
1191 mdelay(1);
1192 }
1193
1194 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1195 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1196 return -EIO;
1197 }
1198
1199 return 0;
1200 }
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214 int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1215 {
1216 u32 val = 0;
1217
1218 switch (req) {
1219 case ICE_RESET_PFR:
1220 return ice_pf_reset(hw);
1221 case ICE_RESET_CORER:
1222 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1223 val = GLGEN_RTRIG_CORER_M;
1224 break;
1225 case ICE_RESET_GLOBR:
1226 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1227 val = GLGEN_RTRIG_GLOBR_M;
1228 break;
1229 default:
1230 return -EINVAL;
1231 }
1232
1233 val |= rd32(hw, GLGEN_RTRIG);
1234 wr32(hw, GLGEN_RTRIG, val);
1235 ice_flush(hw);
1236
1237
1238 return ice_check_reset(hw);
1239 }
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 static int
1250 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1251 {
1252 u8 i;
1253
1254 if (!ice_rxq_ctx)
1255 return -EINVAL;
1256
1257 if (rxq_index > QRX_CTRL_MAX_INDEX)
1258 return -EINVAL;
1259
1260
1261 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1262 wr32(hw, QRX_CONTEXT(i, rxq_index),
1263 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1264
1265 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1266 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1267 }
1268
1269 return 0;
1270 }
1271
1272
1273 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1274
1275 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1276 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1277 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1278 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1279 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1280 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1281 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1282 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1283 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1284 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1285 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1286 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1287 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1288 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1289 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1290 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1291 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1292 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1293 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1294 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1295 { 0 }
1296 };
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 int
1309 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1310 u32 rxq_index)
1311 {
1312 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1313
1314 if (!rlan_ctx)
1315 return -EINVAL;
1316
1317 rlan_ctx->prefena = 1;
1318
1319 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1320 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1321 }
1322
1323
1324 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1325
1326 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1327 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1328 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1329 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1330 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1331 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1332 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1333 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1334 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1335 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1336 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1337 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1338 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1339 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1340 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1341 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1342 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1343 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1344 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1345 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1346 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1347 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1348 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1349 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1350 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1351 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1352 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1353 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1354 { 0 }
1355 };
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 static int
1368 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1369 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1370 {
1371 return ice_sq_send_cmd(hw, ice_get_sbq(hw),
1372 (struct ice_aq_desc *)desc, buf, buf_size, cd);
1373 }
1374
1375
1376
1377
1378
1379
1380 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1381 {
1382 struct ice_sbq_cmd_desc desc = {0};
1383 struct ice_sbq_msg_req msg = {0};
1384 u16 msg_len;
1385 int status;
1386
1387 msg_len = sizeof(msg);
1388
1389 msg.dest_dev = in->dest_dev;
1390 msg.opcode = in->opcode;
1391 msg.flags = ICE_SBQ_MSG_FLAGS;
1392 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1393 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1394 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1395
1396 if (in->opcode)
1397 msg.data = cpu_to_le32(in->data);
1398 else
1399
1400
1401
1402 msg_len -= sizeof(msg.data);
1403
1404 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1405 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1406 desc.param0.cmd_len = cpu_to_le16(msg_len);
1407 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1408 if (!status && !in->opcode)
1409 in->data = le32_to_cpu
1410 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1411 return status;
1412 }
1413
1414
1415
1416
1417
1418
1419
1420 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1421
1422
1423
1424
1425
1426
1427
1428
1429 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1430 {
1431 switch (opcode) {
1432 case ice_aqc_opc_get_link_topo:
1433 case ice_aqc_opc_lldp_stop:
1434 case ice_aqc_opc_lldp_start:
1435 case ice_aqc_opc_lldp_filter_ctrl:
1436 return true;
1437 }
1438
1439 return false;
1440 }
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454 static int
1455 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1456 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1457 struct ice_sq_cd *cd)
1458 {
1459 struct ice_aq_desc desc_cpy;
1460 bool is_cmd_for_retry;
1461 u8 *buf_cpy = NULL;
1462 u8 idx = 0;
1463 u16 opcode;
1464 int status;
1465
1466 opcode = le16_to_cpu(desc->opcode);
1467 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1468 memset(&desc_cpy, 0, sizeof(desc_cpy));
1469
1470 if (is_cmd_for_retry) {
1471 if (buf) {
1472 buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1473 if (!buf_cpy)
1474 return -ENOMEM;
1475 }
1476
1477 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1478 }
1479
1480 do {
1481 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1482
1483 if (!is_cmd_for_retry || !status ||
1484 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1485 break;
1486
1487 if (buf_cpy)
1488 memcpy(buf, buf_cpy, buf_size);
1489
1490 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1491
1492 mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1493
1494 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1495
1496 kfree(buf_cpy);
1497
1498 return status;
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511 int
1512 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1513 u16 buf_size, struct ice_sq_cd *cd)
1514 {
1515 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1516 bool lock_acquired = false;
1517 int status;
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529 switch (le16_to_cpu(desc->opcode)) {
1530 case ice_aqc_opc_download_pkg:
1531 case ice_aqc_opc_get_pkg_info_list:
1532 case ice_aqc_opc_get_ver:
1533 case ice_aqc_opc_upload_section:
1534 case ice_aqc_opc_update_pkg:
1535 case ice_aqc_opc_set_port_params:
1536 case ice_aqc_opc_get_vlan_mode_parameters:
1537 case ice_aqc_opc_set_vlan_mode_parameters:
1538 case ice_aqc_opc_add_recipe:
1539 case ice_aqc_opc_recipe_to_profile:
1540 case ice_aqc_opc_get_recipe:
1541 case ice_aqc_opc_get_recipe_to_profile:
1542 break;
1543 case ice_aqc_opc_release_res:
1544 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1545 break;
1546 fallthrough;
1547 default:
1548 mutex_lock(&ice_global_cfg_lock_sw);
1549 lock_acquired = true;
1550 break;
1551 }
1552
1553 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1554 if (lock_acquired)
1555 mutex_unlock(&ice_global_cfg_lock_sw);
1556
1557 return status;
1558 }
1559
1560
1561
1562
1563
1564
1565
1566
1567 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1568 {
1569 struct ice_aqc_get_ver *resp;
1570 struct ice_aq_desc desc;
1571 int status;
1572
1573 resp = &desc.params.get_ver;
1574
1575 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1576
1577 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1578
1579 if (!status) {
1580 hw->fw_branch = resp->fw_branch;
1581 hw->fw_maj_ver = resp->fw_major;
1582 hw->fw_min_ver = resp->fw_minor;
1583 hw->fw_patch = resp->fw_patch;
1584 hw->fw_build = le32_to_cpu(resp->fw_build);
1585 hw->api_branch = resp->api_branch;
1586 hw->api_maj_ver = resp->api_major;
1587 hw->api_min_ver = resp->api_minor;
1588 hw->api_patch = resp->api_patch;
1589 }
1590
1591 return status;
1592 }
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 int
1603 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1604 struct ice_sq_cd *cd)
1605 {
1606 struct ice_aqc_driver_ver *cmd;
1607 struct ice_aq_desc desc;
1608 u16 len;
1609
1610 cmd = &desc.params.driver_ver;
1611
1612 if (!dv)
1613 return -EINVAL;
1614
1615 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1616
1617 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1618 cmd->major_ver = dv->major_ver;
1619 cmd->minor_ver = dv->minor_ver;
1620 cmd->build_ver = dv->build_ver;
1621 cmd->subbuild_ver = dv->subbuild_ver;
1622
1623 len = 0;
1624 while (len < sizeof(dv->driver_string) &&
1625 isascii(dv->driver_string[len]) && dv->driver_string[len])
1626 len++;
1627
1628 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1629 }
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1640 {
1641 struct ice_aqc_q_shutdown *cmd;
1642 struct ice_aq_desc desc;
1643
1644 cmd = &desc.params.q_shutdown;
1645
1646 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1647
1648 if (unloading)
1649 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1650
1651 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1652 }
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 static int
1681 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1682 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1683 struct ice_sq_cd *cd)
1684 {
1685 struct ice_aqc_req_res *cmd_resp;
1686 struct ice_aq_desc desc;
1687 int status;
1688
1689 cmd_resp = &desc.params.res_owner;
1690
1691 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1692
1693 cmd_resp->res_id = cpu_to_le16(res);
1694 cmd_resp->access_type = cpu_to_le16(access);
1695 cmd_resp->res_number = cpu_to_le32(sdp_number);
1696 cmd_resp->timeout = cpu_to_le32(*timeout);
1697 *timeout = 0;
1698
1699 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1713 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1714 *timeout = le32_to_cpu(cmd_resp->timeout);
1715 return 0;
1716 } else if (le16_to_cpu(cmd_resp->status) ==
1717 ICE_AQ_RES_GLBL_IN_PROG) {
1718 *timeout = le32_to_cpu(cmd_resp->timeout);
1719 return -EIO;
1720 } else if (le16_to_cpu(cmd_resp->status) ==
1721 ICE_AQ_RES_GLBL_DONE) {
1722 return -EALREADY;
1723 }
1724
1725
1726 *timeout = 0;
1727 return -EIO;
1728 }
1729
1730
1731
1732
1733
1734 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1735 *timeout = le32_to_cpu(cmd_resp->timeout);
1736
1737 return status;
1738 }
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749 static int
1750 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1751 struct ice_sq_cd *cd)
1752 {
1753 struct ice_aqc_req_res *cmd;
1754 struct ice_aq_desc desc;
1755
1756 cmd = &desc.params.res_owner;
1757
1758 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1759
1760 cmd->res_id = cpu_to_le16(res);
1761 cmd->res_number = cpu_to_le32(sdp_number);
1762
1763 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1764 }
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775 int
1776 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1777 enum ice_aq_res_access_type access, u32 timeout)
1778 {
1779 #define ICE_RES_POLLING_DELAY_MS 10
1780 u32 delay = ICE_RES_POLLING_DELAY_MS;
1781 u32 time_left = timeout;
1782 int status;
1783
1784 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1785
1786
1787
1788
1789
1790
1791 if (status == -EALREADY)
1792 goto ice_acquire_res_exit;
1793
1794 if (status)
1795 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1796
1797
1798 timeout = time_left;
1799 while (status && timeout && time_left) {
1800 mdelay(delay);
1801 timeout = (timeout > delay) ? timeout - delay : 0;
1802 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1803
1804 if (status == -EALREADY)
1805
1806 break;
1807
1808 if (!status)
1809
1810 break;
1811 }
1812 if (status && status != -EALREADY)
1813 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1814
1815 ice_acquire_res_exit:
1816 if (status == -EALREADY) {
1817 if (access == ICE_RES_WRITE)
1818 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1819 else
1820 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
1821 }
1822 return status;
1823 }
1824
1825
1826
1827
1828
1829
1830
1831
1832 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1833 {
1834 u32 total_delay = 0;
1835 int status;
1836
1837 status = ice_aq_release_res(hw, res, 0, NULL);
1838
1839
1840
1841
1842 while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) {
1843 mdelay(1);
1844 status = ice_aq_release_res(hw, res, 0, NULL);
1845 total_delay++;
1846 }
1847 }
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860 int
1861 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1862 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1863 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1864 {
1865 struct ice_aqc_alloc_free_res_cmd *cmd;
1866 struct ice_aq_desc desc;
1867
1868 cmd = &desc.params.sw_res_ctrl;
1869
1870 if (!buf)
1871 return -EINVAL;
1872
1873 if (buf_size < flex_array_size(buf, elem, num_entries))
1874 return -EINVAL;
1875
1876 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1877
1878 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1879
1880 cmd->num_entries = cpu_to_le16(num_entries);
1881
1882 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1883 }
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 int
1894 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1895 {
1896 struct ice_aqc_alloc_free_res_elem *buf;
1897 u16 buf_len;
1898 int status;
1899
1900 buf_len = struct_size(buf, elem, num);
1901 buf = kzalloc(buf_len, GFP_KERNEL);
1902 if (!buf)
1903 return -ENOMEM;
1904
1905
1906 buf->num_elems = cpu_to_le16(num);
1907 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1908 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1909 if (btm)
1910 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1911
1912 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1913 ice_aqc_opc_alloc_res, NULL);
1914 if (status)
1915 goto ice_alloc_res_exit;
1916
1917 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1918
1919 ice_alloc_res_exit:
1920 kfree(buf);
1921 return status;
1922 }
1923
1924
1925
1926
1927
1928
1929
1930
1931 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1932 {
1933 struct ice_aqc_alloc_free_res_elem *buf;
1934 u16 buf_len;
1935 int status;
1936
1937 buf_len = struct_size(buf, elem, num);
1938 buf = kzalloc(buf_len, GFP_KERNEL);
1939 if (!buf)
1940 return -ENOMEM;
1941
1942
1943 buf->num_elems = cpu_to_le16(num);
1944 buf->res_type = cpu_to_le16(type);
1945 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1946
1947 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1948 ice_aqc_opc_free_res, NULL);
1949 if (status)
1950 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1951
1952 kfree(buf);
1953 return status;
1954 }
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1966 {
1967 u8 funcs;
1968
1969 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1970 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1971 ICE_CAPS_VALID_FUNCS_M);
1972
1973 if (!funcs)
1974 return 0;
1975
1976 return max / funcs;
1977 }
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992 static bool
1993 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1994 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1995 {
1996 u32 logical_id = le32_to_cpu(elem->logical_id);
1997 u32 phys_id = le32_to_cpu(elem->phys_id);
1998 u32 number = le32_to_cpu(elem->number);
1999 u16 cap = le16_to_cpu(elem->cap);
2000 bool found = true;
2001
2002 switch (cap) {
2003 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2004 caps->valid_functions = number;
2005 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2006 caps->valid_functions);
2007 break;
2008 case ICE_AQC_CAPS_SRIOV:
2009 caps->sr_iov_1_1 = (number == 1);
2010 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2011 caps->sr_iov_1_1);
2012 break;
2013 case ICE_AQC_CAPS_DCB:
2014 caps->dcb = (number == 1);
2015 caps->active_tc_bitmap = logical_id;
2016 caps->maxtc = phys_id;
2017 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2018 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2019 caps->active_tc_bitmap);
2020 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2021 break;
2022 case ICE_AQC_CAPS_RSS:
2023 caps->rss_table_size = number;
2024 caps->rss_table_entry_width = logical_id;
2025 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2026 caps->rss_table_size);
2027 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2028 caps->rss_table_entry_width);
2029 break;
2030 case ICE_AQC_CAPS_RXQS:
2031 caps->num_rxq = number;
2032 caps->rxq_first_id = phys_id;
2033 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2034 caps->num_rxq);
2035 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2036 caps->rxq_first_id);
2037 break;
2038 case ICE_AQC_CAPS_TXQS:
2039 caps->num_txq = number;
2040 caps->txq_first_id = phys_id;
2041 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2042 caps->num_txq);
2043 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2044 caps->txq_first_id);
2045 break;
2046 case ICE_AQC_CAPS_MSIX:
2047 caps->num_msix_vectors = number;
2048 caps->msix_vector_first_id = phys_id;
2049 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2050 caps->num_msix_vectors);
2051 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2052 caps->msix_vector_first_id);
2053 break;
2054 case ICE_AQC_CAPS_PENDING_NVM_VER:
2055 caps->nvm_update_pending_nvm = true;
2056 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2057 break;
2058 case ICE_AQC_CAPS_PENDING_OROM_VER:
2059 caps->nvm_update_pending_orom = true;
2060 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2061 break;
2062 case ICE_AQC_CAPS_PENDING_NET_VER:
2063 caps->nvm_update_pending_netlist = true;
2064 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2065 break;
2066 case ICE_AQC_CAPS_NVM_MGMT:
2067 caps->nvm_unified_update =
2068 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2069 true : false;
2070 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2071 caps->nvm_unified_update);
2072 break;
2073 case ICE_AQC_CAPS_RDMA:
2074 caps->rdma = (number == 1);
2075 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2076 break;
2077 case ICE_AQC_CAPS_MAX_MTU:
2078 caps->max_mtu = number;
2079 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2080 prefix, caps->max_mtu);
2081 break;
2082 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2083 caps->pcie_reset_avoidance = (number > 0);
2084 ice_debug(hw, ICE_DBG_INIT,
2085 "%s: pcie_reset_avoidance = %d\n", prefix,
2086 caps->pcie_reset_avoidance);
2087 break;
2088 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2089 caps->reset_restrict_support = (number == 1);
2090 ice_debug(hw, ICE_DBG_INIT,
2091 "%s: reset_restrict_support = %d\n", prefix,
2092 caps->reset_restrict_support);
2093 break;
2094 default:
2095
2096 found = false;
2097 }
2098
2099 return found;
2100 }
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111 static void
2112 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2113 {
2114
2115
2116
2117 if (hw->dev_caps.num_funcs > 4) {
2118
2119 caps->maxtc = 4;
2120 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2121 caps->maxtc);
2122 if (caps->rdma) {
2123 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2124 caps->rdma = 0;
2125 }
2126
2127
2128
2129
2130 if (caps == &hw->dev_caps.common_cap)
2131 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2132 }
2133 }
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143 static void
2144 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2145 struct ice_aqc_list_caps_elem *cap)
2146 {
2147 u32 logical_id = le32_to_cpu(cap->logical_id);
2148 u32 number = le32_to_cpu(cap->number);
2149
2150 func_p->num_allocd_vfs = number;
2151 func_p->vf_base_id = logical_id;
2152 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2153 func_p->num_allocd_vfs);
2154 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2155 func_p->vf_base_id);
2156 }
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166 static void
2167 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2168 struct ice_aqc_list_caps_elem *cap)
2169 {
2170 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2171 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2172 le32_to_cpu(cap->number));
2173 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2174 func_p->guar_num_vsi);
2175 }
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185 static void
2186 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2187 struct ice_aqc_list_caps_elem *cap)
2188 {
2189 struct ice_ts_func_info *info = &func_p->ts_func_info;
2190 u32 number = le32_to_cpu(cap->number);
2191
2192 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2193 func_p->common_cap.ieee_1588 = info->ena;
2194
2195 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2196 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2197 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2198 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2199
2200 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2201 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2202
2203 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2204 info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2205 } else {
2206
2207
2208
2209
2210 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2211 info->clk_freq);
2212 info->time_ref = ICE_TIME_REF_FREQ_25_000;
2213 }
2214
2215 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2216 func_p->common_cap.ieee_1588);
2217 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2218 info->src_tmr_owned);
2219 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2220 info->tmr_ena);
2221 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2222 info->tmr_index_owned);
2223 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2224 info->tmr_index_assoc);
2225 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2226 info->clk_freq);
2227 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2228 info->clk_src);
2229 }
2230
2231
2232
2233
2234
2235
2236
2237
2238 static void
2239 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2240 {
2241 u32 reg_val, val;
2242
2243 reg_val = rd32(hw, GLQF_FD_SIZE);
2244 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2245 GLQF_FD_SIZE_FD_GSIZE_S;
2246 func_p->fd_fltr_guar =
2247 ice_get_num_per_func(hw, val);
2248 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2249 GLQF_FD_SIZE_FD_BSIZE_S;
2250 func_p->fd_fltr_best_effort = val;
2251
2252 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2253 func_p->fd_fltr_guar);
2254 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2255 func_p->fd_fltr_best_effort);
2256 }
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272 static void
2273 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2274 void *buf, u32 cap_count)
2275 {
2276 struct ice_aqc_list_caps_elem *cap_resp;
2277 u32 i;
2278
2279 cap_resp = buf;
2280
2281 memset(func_p, 0, sizeof(*func_p));
2282
2283 for (i = 0; i < cap_count; i++) {
2284 u16 cap = le16_to_cpu(cap_resp[i].cap);
2285 bool found;
2286
2287 found = ice_parse_common_caps(hw, &func_p->common_cap,
2288 &cap_resp[i], "func caps");
2289
2290 switch (cap) {
2291 case ICE_AQC_CAPS_VF:
2292 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2293 break;
2294 case ICE_AQC_CAPS_VSI:
2295 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2296 break;
2297 case ICE_AQC_CAPS_1588:
2298 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2299 break;
2300 case ICE_AQC_CAPS_FD:
2301 ice_parse_fdir_func_caps(hw, func_p);
2302 break;
2303 default:
2304
2305 if (!found)
2306 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2307 i, cap);
2308 break;
2309 }
2310 }
2311
2312 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2313 }
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323 static void
2324 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2325 struct ice_aqc_list_caps_elem *cap)
2326 {
2327 u32 number = le32_to_cpu(cap->number);
2328
2329 dev_p->num_funcs = hweight32(number);
2330 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2331 dev_p->num_funcs);
2332 }
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342 static void
2343 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2344 struct ice_aqc_list_caps_elem *cap)
2345 {
2346 u32 number = le32_to_cpu(cap->number);
2347
2348 dev_p->num_vfs_exposed = number;
2349 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2350 dev_p->num_vfs_exposed);
2351 }
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361 static void
2362 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2363 struct ice_aqc_list_caps_elem *cap)
2364 {
2365 u32 number = le32_to_cpu(cap->number);
2366
2367 dev_p->num_vsi_allocd_to_host = number;
2368 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2369 dev_p->num_vsi_allocd_to_host);
2370 }
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380 static void
2381 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2382 struct ice_aqc_list_caps_elem *cap)
2383 {
2384 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2385 u32 logical_id = le32_to_cpu(cap->logical_id);
2386 u32 phys_id = le32_to_cpu(cap->phys_id);
2387 u32 number = le32_to_cpu(cap->number);
2388
2389 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2390 dev_p->common_cap.ieee_1588 = info->ena;
2391
2392 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2393 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2394 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2395
2396 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2397 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2398 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2399
2400 info->ena_ports = logical_id;
2401 info->tmr_own_map = phys_id;
2402
2403 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2404 dev_p->common_cap.ieee_1588);
2405 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2406 info->tmr0_owner);
2407 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2408 info->tmr0_owned);
2409 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2410 info->tmr0_ena);
2411 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2412 info->tmr1_owner);
2413 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2414 info->tmr1_owned);
2415 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2416 info->tmr1_ena);
2417 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2418 info->ena_ports);
2419 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2420 info->tmr_own_map);
2421 }
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431 static void
2432 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2433 struct ice_aqc_list_caps_elem *cap)
2434 {
2435 u32 number = le32_to_cpu(cap->number);
2436
2437 dev_p->num_flow_director_fltr = number;
2438 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2439 dev_p->num_flow_director_fltr);
2440 }
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456 static void
2457 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2458 void *buf, u32 cap_count)
2459 {
2460 struct ice_aqc_list_caps_elem *cap_resp;
2461 u32 i;
2462
2463 cap_resp = buf;
2464
2465 memset(dev_p, 0, sizeof(*dev_p));
2466
2467 for (i = 0; i < cap_count; i++) {
2468 u16 cap = le16_to_cpu(cap_resp[i].cap);
2469 bool found;
2470
2471 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2472 &cap_resp[i], "dev caps");
2473
2474 switch (cap) {
2475 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2476 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2477 break;
2478 case ICE_AQC_CAPS_VF:
2479 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2480 break;
2481 case ICE_AQC_CAPS_VSI:
2482 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2483 break;
2484 case ICE_AQC_CAPS_1588:
2485 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2486 break;
2487 case ICE_AQC_CAPS_FD:
2488 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2489 break;
2490 default:
2491
2492 if (!found)
2493 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2494 i, cap);
2495 break;
2496 }
2497 }
2498
2499 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2500 }
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521 int
2522 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2523 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2524 {
2525 struct ice_aqc_list_caps *cmd;
2526 struct ice_aq_desc desc;
2527 int status;
2528
2529 cmd = &desc.params.get_cap;
2530
2531 if (opc != ice_aqc_opc_list_func_caps &&
2532 opc != ice_aqc_opc_list_dev_caps)
2533 return -EINVAL;
2534
2535 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2536 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2537
2538 if (cap_count)
2539 *cap_count = le32_to_cpu(cmd->count);
2540
2541 return status;
2542 }
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552 int
2553 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2554 {
2555 u32 cap_count = 0;
2556 void *cbuf;
2557 int status;
2558
2559 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2560 if (!cbuf)
2561 return -ENOMEM;
2562
2563
2564
2565
2566
2567 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2568
2569 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2570 ice_aqc_opc_list_dev_caps, NULL);
2571 if (!status)
2572 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2573 kfree(cbuf);
2574
2575 return status;
2576 }
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586 static int
2587 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2588 {
2589 u32 cap_count = 0;
2590 void *cbuf;
2591 int status;
2592
2593 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2594 if (!cbuf)
2595 return -ENOMEM;
2596
2597
2598
2599
2600
2601 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2602
2603 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2604 ice_aqc_opc_list_func_caps, NULL);
2605 if (!status)
2606 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2607 kfree(cbuf);
2608
2609 return status;
2610 }
2611
2612
2613
2614
2615
2616 void ice_set_safe_mode_caps(struct ice_hw *hw)
2617 {
2618 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2619 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2620 struct ice_hw_common_caps cached_caps;
2621 u32 num_funcs;
2622
2623
2624 cached_caps = func_caps->common_cap;
2625
2626
2627 memset(func_caps, 0, sizeof(*func_caps));
2628
2629 #define ICE_RESTORE_FUNC_CAP(name) \
2630 func_caps->common_cap.name = cached_caps.name
2631
2632
2633 ICE_RESTORE_FUNC_CAP(valid_functions);
2634 ICE_RESTORE_FUNC_CAP(txq_first_id);
2635 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2636 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2637 ICE_RESTORE_FUNC_CAP(max_mtu);
2638 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2639 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2640 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2641 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2642
2643
2644 func_caps->common_cap.num_rxq = 1;
2645 func_caps->common_cap.num_txq = 1;
2646
2647
2648 func_caps->common_cap.num_msix_vectors = 2;
2649 func_caps->guar_num_vsi = 1;
2650
2651
2652 cached_caps = dev_caps->common_cap;
2653 num_funcs = dev_caps->num_funcs;
2654
2655
2656 memset(dev_caps, 0, sizeof(*dev_caps));
2657
2658 #define ICE_RESTORE_DEV_CAP(name) \
2659 dev_caps->common_cap.name = cached_caps.name
2660
2661
2662 ICE_RESTORE_DEV_CAP(valid_functions);
2663 ICE_RESTORE_DEV_CAP(txq_first_id);
2664 ICE_RESTORE_DEV_CAP(rxq_first_id);
2665 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2666 ICE_RESTORE_DEV_CAP(max_mtu);
2667 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2668 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2669 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2670 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2671 dev_caps->num_funcs = num_funcs;
2672
2673
2674 dev_caps->common_cap.num_rxq = num_funcs;
2675 dev_caps->common_cap.num_txq = num_funcs;
2676
2677
2678 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2679 }
2680
2681
2682
2683
2684
2685 int ice_get_caps(struct ice_hw *hw)
2686 {
2687 int status;
2688
2689 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2690 if (status)
2691 return status;
2692
2693 return ice_discover_func_caps(hw, &hw->func_caps);
2694 }
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705 int
2706 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2707 struct ice_sq_cd *cd)
2708 {
2709 struct ice_aqc_manage_mac_write *cmd;
2710 struct ice_aq_desc desc;
2711
2712 cmd = &desc.params.mac_write;
2713 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2714
2715 cmd->flags = flags;
2716 ether_addr_copy(cmd->mac_addr, mac_addr);
2717
2718 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2719 }
2720
2721
2722
2723
2724
2725
2726
2727 static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
2728 {
2729 struct ice_aq_desc desc;
2730
2731 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2732 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2733
2734 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2735 }
2736
2737
2738
2739
2740
2741
2742
2743
2744 void ice_clear_pxe_mode(struct ice_hw *hw)
2745 {
2746 if (ice_check_sq_alive(hw, &hw->adminq))
2747 ice_aq_clear_pxe_mode(hw);
2748 }
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758 int
2759 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
2760 struct ice_sq_cd *cd)
2761
2762 {
2763 struct ice_aqc_set_port_params *cmd;
2764 struct ice_hw *hw = pi->hw;
2765 struct ice_aq_desc desc;
2766 u16 cmd_flags = 0;
2767
2768 cmd = &desc.params.set_port_params;
2769
2770 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2771 if (double_vlan)
2772 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2773 cmd->cmd_flags = cpu_to_le16(cmd_flags);
2774
2775 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2776 }
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791 static u16
2792 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2793 {
2794 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2795 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2796
2797 switch (phy_type_low) {
2798 case ICE_PHY_TYPE_LOW_100BASE_TX:
2799 case ICE_PHY_TYPE_LOW_100M_SGMII:
2800 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2801 break;
2802 case ICE_PHY_TYPE_LOW_1000BASE_T:
2803 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2804 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2805 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2806 case ICE_PHY_TYPE_LOW_1G_SGMII:
2807 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2808 break;
2809 case ICE_PHY_TYPE_LOW_2500BASE_T:
2810 case ICE_PHY_TYPE_LOW_2500BASE_X:
2811 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2812 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2813 break;
2814 case ICE_PHY_TYPE_LOW_5GBASE_T:
2815 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2816 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2817 break;
2818 case ICE_PHY_TYPE_LOW_10GBASE_T:
2819 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2820 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2821 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2822 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2823 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2824 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2825 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2826 break;
2827 case ICE_PHY_TYPE_LOW_25GBASE_T:
2828 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2829 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2830 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2831 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2832 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2833 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2834 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2835 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2836 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2837 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2838 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2839 break;
2840 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2841 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2842 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2843 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2844 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2845 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2846 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2847 break;
2848 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2849 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2850 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2851 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2852 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2853 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2854 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2855 case ICE_PHY_TYPE_LOW_50G_AUI2:
2856 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2857 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2858 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2859 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2860 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2861 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2862 case ICE_PHY_TYPE_LOW_50G_AUI1:
2863 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2864 break;
2865 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2866 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2867 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2868 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2869 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2870 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2871 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2872 case ICE_PHY_TYPE_LOW_100G_AUI4:
2873 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2874 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2875 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2876 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2877 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2878 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2879 break;
2880 default:
2881 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2882 break;
2883 }
2884
2885 switch (phy_type_high) {
2886 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2887 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2888 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2889 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2890 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2891 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2892 break;
2893 default:
2894 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2895 break;
2896 }
2897
2898 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2899 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2900 return ICE_AQ_LINK_SPEED_UNKNOWN;
2901 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2902 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2903 return ICE_AQ_LINK_SPEED_UNKNOWN;
2904 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2905 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2906 return speed_phy_type_low;
2907 else
2908 return speed_phy_type_high;
2909 }
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926 void
2927 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2928 u16 link_speeds_bitmap)
2929 {
2930 u64 pt_high;
2931 u64 pt_low;
2932 int index;
2933 u16 speed;
2934
2935
2936 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2937 pt_low = BIT_ULL(index);
2938 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2939
2940 if (link_speeds_bitmap & speed)
2941 *phy_type_low |= BIT_ULL(index);
2942 }
2943
2944
2945 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2946 pt_high = BIT_ULL(index);
2947 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2948
2949 if (link_speeds_bitmap & speed)
2950 *phy_type_high |= BIT_ULL(index);
2951 }
2952 }
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966 int
2967 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2968 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2969 {
2970 struct ice_aq_desc desc;
2971 int status;
2972
2973 if (!cfg)
2974 return -EINVAL;
2975
2976
2977 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2978 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2979 cfg->caps);
2980
2981 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2982 }
2983
2984 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2985 desc.params.set_phy.lport_num = pi->lport;
2986 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2987
2988 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2989 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2990 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2991 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2992 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2993 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2994 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2995 cfg->low_power_ctrl_an);
2996 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2997 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2998 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2999 cfg->link_fec_opt);
3000
3001 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3002 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3003 status = 0;
3004
3005 if (!status)
3006 pi->phy.curr_user_phy_cfg = *cfg;
3007
3008 return status;
3009 }
3010
3011
3012
3013
3014
3015 int ice_update_link_info(struct ice_port_info *pi)
3016 {
3017 struct ice_link_status *li;
3018 int status;
3019
3020 if (!pi)
3021 return -EINVAL;
3022
3023 li = &pi->phy.link_info;
3024
3025 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3026 if (status)
3027 return status;
3028
3029 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3030 struct ice_aqc_get_phy_caps_data *pcaps;
3031 struct ice_hw *hw;
3032
3033 hw = pi->hw;
3034 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
3035 GFP_KERNEL);
3036 if (!pcaps)
3037 return -ENOMEM;
3038
3039 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3040 pcaps, NULL);
3041
3042 devm_kfree(ice_hw_to_dev(hw), pcaps);
3043 }
3044
3045 return status;
3046 }
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056 static void
3057 ice_cache_phy_user_req(struct ice_port_info *pi,
3058 struct ice_phy_cache_mode_data cache_data,
3059 enum ice_phy_cache_mode cache_mode)
3060 {
3061 if (!pi)
3062 return;
3063
3064 switch (cache_mode) {
3065 case ICE_FC_MODE:
3066 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3067 break;
3068 case ICE_SPEED_MODE:
3069 pi->phy.curr_user_speed_req =
3070 cache_data.data.curr_user_speed_req;
3071 break;
3072 case ICE_FEC_MODE:
3073 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3074 break;
3075 default:
3076 break;
3077 }
3078 }
3079
3080
3081
3082
3083
3084
3085
3086 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3087 {
3088 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3089 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3090 return ICE_FC_FULL;
3091
3092 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3093 return ICE_FC_TX_PAUSE;
3094
3095 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3096 return ICE_FC_RX_PAUSE;
3097
3098 return ICE_FC_NONE;
3099 }
3100
3101
3102
3103
3104
3105
3106
3107
3108 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3109 {
3110 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3111 return ICE_FEC_AUTO;
3112
3113 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3114 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3115 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3116 ICE_AQC_PHY_FEC_25G_KR_REQ))
3117 return ICE_FEC_BASER;
3118
3119 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3120 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3121 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3122 return ICE_FEC_RS;
3123
3124 return ICE_FEC_NONE;
3125 }
3126
3127
3128
3129
3130
3131
3132
3133 int
3134 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3135 enum ice_fc_mode req_mode)
3136 {
3137 struct ice_phy_cache_mode_data cache_data;
3138 u8 pause_mask = 0x0;
3139
3140 if (!pi || !cfg)
3141 return -EINVAL;
3142
3143 switch (req_mode) {
3144 case ICE_FC_FULL:
3145 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3146 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3147 break;
3148 case ICE_FC_RX_PAUSE:
3149 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3150 break;
3151 case ICE_FC_TX_PAUSE:
3152 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3153 break;
3154 default:
3155 break;
3156 }
3157
3158
3159 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3160 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3161
3162
3163 cfg->caps |= pause_mask;
3164
3165
3166 cache_data.data.curr_user_fc_req = req_mode;
3167 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3168
3169 return 0;
3170 }
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180 int
3181 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3182 {
3183 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3184 struct ice_aqc_get_phy_caps_data *pcaps;
3185 struct ice_hw *hw;
3186 int status;
3187
3188 if (!pi || !aq_failures)
3189 return -EINVAL;
3190
3191 *aq_failures = 0;
3192 hw = pi->hw;
3193
3194 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3195 if (!pcaps)
3196 return -ENOMEM;
3197
3198
3199 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3200 pcaps, NULL);
3201 if (status) {
3202 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3203 goto out;
3204 }
3205
3206 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3207
3208
3209 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3210 if (status)
3211 goto out;
3212
3213
3214 if (cfg.caps != pcaps->caps) {
3215 int retry_count, retry_max = 10;
3216
3217
3218 if (ena_auto_link_update)
3219 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3220
3221 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3222 if (status) {
3223 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3224 goto out;
3225 }
3226
3227
3228
3229
3230
3231
3232 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3233 status = ice_update_link_info(pi);
3234
3235 if (!status)
3236 break;
3237
3238 mdelay(100);
3239 }
3240
3241 if (status)
3242 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3243 }
3244
3245 out:
3246 devm_kfree(ice_hw_to_dev(hw), pcaps);
3247 return status;
3248 }
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258 bool
3259 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3260 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3261 {
3262 u8 caps_mask, cfg_mask;
3263
3264 if (!phy_caps || !phy_cfg)
3265 return false;
3266
3267
3268
3269
3270 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3271 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3272 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3273
3274 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3275 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3276 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3277 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3278 phy_caps->eee_cap != phy_cfg->eee_cap ||
3279 phy_caps->eeer_value != phy_cfg->eeer_value ||
3280 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3281 return false;
3282
3283 return true;
3284 }
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295 void
3296 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3297 struct ice_aqc_get_phy_caps_data *caps,
3298 struct ice_aqc_set_phy_cfg_data *cfg)
3299 {
3300 if (!pi || !caps || !cfg)
3301 return;
3302
3303 memset(cfg, 0, sizeof(*cfg));
3304 cfg->phy_type_low = caps->phy_type_low;
3305 cfg->phy_type_high = caps->phy_type_high;
3306 cfg->caps = caps->caps;
3307 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3308 cfg->eee_cap = caps->eee_cap;
3309 cfg->eeer_value = caps->eeer_value;
3310 cfg->link_fec_opt = caps->link_fec_options;
3311 cfg->module_compliance_enforcement =
3312 caps->module_compliance_enforcement;
3313 }
3314
3315
3316
3317
3318
3319
3320
3321 int
3322 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3323 enum ice_fec_mode fec)
3324 {
3325 struct ice_aqc_get_phy_caps_data *pcaps;
3326 struct ice_hw *hw;
3327 int status;
3328
3329 if (!pi || !cfg)
3330 return -EINVAL;
3331
3332 hw = pi->hw;
3333
3334 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3335 if (!pcaps)
3336 return -ENOMEM;
3337
3338 status = ice_aq_get_phy_caps(pi, false,
3339 (ice_fw_supports_report_dflt_cfg(hw) ?
3340 ICE_AQC_REPORT_DFLT_CFG :
3341 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3342 if (status)
3343 goto out;
3344
3345 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3346 cfg->link_fec_opt = pcaps->link_fec_options;
3347
3348 switch (fec) {
3349 case ICE_FEC_BASER:
3350
3351
3352
3353 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3354 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3355 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3356 ICE_AQC_PHY_FEC_25G_KR_REQ;
3357 break;
3358 case ICE_FEC_RS:
3359
3360
3361
3362 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3363 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3364 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3365 break;
3366 case ICE_FEC_NONE:
3367
3368 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3369 break;
3370 case ICE_FEC_AUTO:
3371
3372 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3373 cfg->link_fec_opt |= pcaps->link_fec_options;
3374 break;
3375 default:
3376 status = -EINVAL;
3377 break;
3378 }
3379
3380 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3381 !ice_fw_supports_report_dflt_cfg(hw)) {
3382 struct ice_link_default_override_tlv tlv = { 0 };
3383
3384 status = ice_get_link_default_override(&tlv, pi);
3385 if (status)
3386 goto out;
3387
3388 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3389 (tlv.options & ICE_LINK_OVERRIDE_EN))
3390 cfg->link_fec_opt = tlv.fec_options;
3391 }
3392
3393 out:
3394 kfree(pcaps);
3395
3396 return status;
3397 }
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408 int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3409 {
3410 struct ice_phy_info *phy_info;
3411 int status = 0;
3412
3413 if (!pi || !link_up)
3414 return -EINVAL;
3415
3416 phy_info = &pi->phy;
3417
3418 if (phy_info->get_link_info) {
3419 status = ice_update_link_info(pi);
3420
3421 if (status)
3422 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3423 status);
3424 }
3425
3426 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3427
3428 return status;
3429 }
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439 int
3440 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3441 struct ice_sq_cd *cd)
3442 {
3443 struct ice_aqc_restart_an *cmd;
3444 struct ice_aq_desc desc;
3445
3446 cmd = &desc.params.restart_an;
3447
3448 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3449
3450 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3451 cmd->lport_num = pi->lport;
3452 if (ena_link)
3453 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3454 else
3455 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3456
3457 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3458 }
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469 int
3470 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3471 struct ice_sq_cd *cd)
3472 {
3473 struct ice_aqc_set_event_mask *cmd;
3474 struct ice_aq_desc desc;
3475
3476 cmd = &desc.params.set_event_mask;
3477
3478 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3479
3480 cmd->lport_num = port_num;
3481
3482 cmd->event_mask = cpu_to_le16(mask);
3483 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3484 }
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494 int
3495 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3496 {
3497 struct ice_aqc_set_mac_lb *cmd;
3498 struct ice_aq_desc desc;
3499
3500 cmd = &desc.params.set_mac_lb;
3501
3502 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3503 if (ena_lpbk)
3504 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3505
3506 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3507 }
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517 int
3518 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3519 struct ice_sq_cd *cd)
3520 {
3521 struct ice_aqc_set_port_id_led *cmd;
3522 struct ice_hw *hw = pi->hw;
3523 struct ice_aq_desc desc;
3524
3525 cmd = &desc.params.set_port_id_led;
3526
3527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3528
3529 if (is_orig_mode)
3530 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3531 else
3532 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3533
3534 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3535 }
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552 int
3553 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3554 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3555 bool write, struct ice_sq_cd *cd)
3556 {
3557 struct ice_aqc_sff_eeprom *cmd;
3558 struct ice_aq_desc desc;
3559 int status;
3560
3561 if (!data || (mem_addr & 0xff00))
3562 return -EINVAL;
3563
3564 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3565 cmd = &desc.params.read_write_sff_param;
3566 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3567 cmd->lport_num = (u8)(lport & 0xff);
3568 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3569 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3570 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3571 ((set_page <<
3572 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3573 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3574 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3575 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3576 if (write)
3577 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3578
3579 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3580 return status;
3581 }
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591 static int
3592 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3593 {
3594 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3595 struct ice_aqc_get_set_rss_lut *cmd_resp;
3596 struct ice_aq_desc desc;
3597 int status;
3598 u8 *lut;
3599
3600 if (!params)
3601 return -EINVAL;
3602
3603 vsi_handle = params->vsi_handle;
3604 lut = params->lut;
3605
3606 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3607 return -EINVAL;
3608
3609 lut_size = params->lut_size;
3610 lut_type = params->lut_type;
3611 glob_lut_idx = params->global_lut_id;
3612 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3613
3614 cmd_resp = &desc.params.get_set_rss_lut;
3615
3616 if (set) {
3617 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3618 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3619 } else {
3620 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3621 }
3622
3623 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3624 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3625 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3626 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3627
3628 switch (lut_type) {
3629 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3630 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3631 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3632 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3633 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3634 break;
3635 default:
3636 status = -EINVAL;
3637 goto ice_aq_get_set_rss_lut_exit;
3638 }
3639
3640 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3641 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3642 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3643
3644 if (!set)
3645 goto ice_aq_get_set_rss_lut_send;
3646 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3647 if (!set)
3648 goto ice_aq_get_set_rss_lut_send;
3649 } else {
3650 goto ice_aq_get_set_rss_lut_send;
3651 }
3652
3653
3654 switch (lut_size) {
3655 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3656 break;
3657 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3658 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3659 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3660 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3661 break;
3662 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3663 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3664 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3665 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3666 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3667 break;
3668 }
3669 fallthrough;
3670 default:
3671 status = -EINVAL;
3672 goto ice_aq_get_set_rss_lut_exit;
3673 }
3674
3675 ice_aq_get_set_rss_lut_send:
3676 cmd_resp->flags = cpu_to_le16(flags);
3677 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3678
3679 ice_aq_get_set_rss_lut_exit:
3680 return status;
3681 }
3682
3683
3684
3685
3686
3687
3688
3689
3690 int
3691 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3692 {
3693 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3694 }
3695
3696
3697
3698
3699
3700
3701
3702
3703 int
3704 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3705 {
3706 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3707 }
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718 static int
3719 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3720 struct ice_aqc_get_set_rss_keys *key, bool set)
3721 {
3722 struct ice_aqc_get_set_rss_key *cmd_resp;
3723 u16 key_size = sizeof(*key);
3724 struct ice_aq_desc desc;
3725
3726 cmd_resp = &desc.params.get_set_rss_key;
3727
3728 if (set) {
3729 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3730 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3731 } else {
3732 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3733 }
3734
3735 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3736 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3737 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3738 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3739
3740 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3741 }
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751 int
3752 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3753 struct ice_aqc_get_set_rss_keys *key)
3754 {
3755 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3756 return -EINVAL;
3757
3758 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3759 key, false);
3760 }
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770 int
3771 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3772 struct ice_aqc_get_set_rss_keys *keys)
3773 {
3774 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3775 return -EINVAL;
3776
3777 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3778 keys, true);
3779 }
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802 static int
3803 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3804 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3805 struct ice_sq_cd *cd)
3806 {
3807 struct ice_aqc_add_tx_qgrp *list;
3808 struct ice_aqc_add_txqs *cmd;
3809 struct ice_aq_desc desc;
3810 u16 i, sum_size = 0;
3811
3812 cmd = &desc.params.add_txqs;
3813
3814 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3815
3816 if (!qg_list)
3817 return -EINVAL;
3818
3819 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3820 return -EINVAL;
3821
3822 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3823 sum_size += struct_size(list, txqs, list->num_txqs);
3824 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3825 list->num_txqs);
3826 }
3827
3828 if (buf_size != sum_size)
3829 return -EINVAL;
3830
3831 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3832
3833 cmd->num_qgrps = num_qgrps;
3834
3835 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3836 }
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850 static int
3851 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3852 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3853 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3854 struct ice_sq_cd *cd)
3855 {
3856 struct ice_aqc_dis_txq_item *item;
3857 struct ice_aqc_dis_txqs *cmd;
3858 struct ice_aq_desc desc;
3859 u16 i, sz = 0;
3860 int status;
3861
3862 cmd = &desc.params.dis_txqs;
3863 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3864
3865
3866 if (!qg_list && !rst_src)
3867 return -EINVAL;
3868
3869 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3870 return -EINVAL;
3871
3872 cmd->num_entries = num_qgrps;
3873
3874 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3875 ICE_AQC_Q_DIS_TIMEOUT_M);
3876
3877 switch (rst_src) {
3878 case ICE_VM_RESET:
3879 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3880 cmd->vmvf_and_timeout |=
3881 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3882 break;
3883 case ICE_VF_RESET:
3884 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3885
3886 cmd->vmvf_and_timeout |=
3887 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3888 ICE_AQC_Q_DIS_VMVF_NUM_M);
3889 break;
3890 case ICE_NO_RESET:
3891 default:
3892 break;
3893 }
3894
3895
3896 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3897
3898 if (!qg_list)
3899 goto do_aq;
3900
3901
3902
3903
3904 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3905
3906 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3907 u16 item_size = struct_size(item, q_id, item->num_qs);
3908
3909
3910 if ((item->num_qs % 2) == 0)
3911 item_size += 2;
3912
3913 sz += item_size;
3914
3915 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3916 }
3917
3918 if (buf_size != sz)
3919 return -EINVAL;
3920
3921 do_aq:
3922 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3923 if (status) {
3924 if (!qg_list)
3925 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3926 vmvf_num, hw->adminq.sq_last_status);
3927 else
3928 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3929 le16_to_cpu(qg_list[0].q_id[0]),
3930 hw->adminq.sq_last_status);
3931 }
3932 return status;
3933 }
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945 static int
3946 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
3947 struct ice_aqc_add_rdma_qset_data *qset_list,
3948 u16 buf_size, struct ice_sq_cd *cd)
3949 {
3950 struct ice_aqc_add_rdma_qset_data *list;
3951 struct ice_aqc_add_rdma_qset *cmd;
3952 struct ice_aq_desc desc;
3953 u16 i, sum_size = 0;
3954
3955 cmd = &desc.params.add_rdma_qset;
3956
3957 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
3958
3959 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
3960 return -EINVAL;
3961
3962 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
3963 u16 num_qsets = le16_to_cpu(list->num_qsets);
3964
3965 sum_size += struct_size(list, rdma_qsets, num_qsets);
3966 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
3967 num_qsets);
3968 }
3969
3970 if (buf_size != sum_size)
3971 return -EINVAL;
3972
3973 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3974
3975 cmd->num_qset_grps = num_qset_grps;
3976
3977 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
3978 }
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988 static void
3989 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3990 {
3991 u8 src_byte, dest_byte, mask;
3992 u8 *from, *dest;
3993 u16 shift_width;
3994
3995
3996 from = src_ctx + ce_info->offset;
3997
3998
3999 shift_width = ce_info->lsb % 8;
4000 mask = (u8)(BIT(ce_info->width) - 1);
4001
4002 src_byte = *from;
4003 src_byte &= mask;
4004
4005
4006 mask <<= shift_width;
4007 src_byte <<= shift_width;
4008
4009
4010 dest = dest_ctx + (ce_info->lsb / 8);
4011
4012 memcpy(&dest_byte, dest, sizeof(dest_byte));
4013
4014 dest_byte &= ~mask;
4015 dest_byte |= src_byte;
4016
4017
4018 memcpy(dest, &dest_byte, sizeof(dest_byte));
4019 }
4020
4021
4022
4023
4024
4025
4026
4027 static void
4028 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4029 {
4030 u16 src_word, mask;
4031 __le16 dest_word;
4032 u8 *from, *dest;
4033 u16 shift_width;
4034
4035
4036 from = src_ctx + ce_info->offset;
4037
4038
4039 shift_width = ce_info->lsb % 8;
4040 mask = BIT(ce_info->width) - 1;
4041
4042
4043
4044
4045 src_word = *(u16 *)from;
4046 src_word &= mask;
4047
4048
4049 mask <<= shift_width;
4050 src_word <<= shift_width;
4051
4052
4053 dest = dest_ctx + (ce_info->lsb / 8);
4054
4055 memcpy(&dest_word, dest, sizeof(dest_word));
4056
4057 dest_word &= ~(cpu_to_le16(mask));
4058 dest_word |= cpu_to_le16(src_word);
4059
4060
4061 memcpy(dest, &dest_word, sizeof(dest_word));
4062 }
4063
4064
4065
4066
4067
4068
4069
4070 static void
4071 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4072 {
4073 u32 src_dword, mask;
4074 __le32 dest_dword;
4075 u8 *from, *dest;
4076 u16 shift_width;
4077
4078
4079 from = src_ctx + ce_info->offset;
4080
4081
4082 shift_width = ce_info->lsb % 8;
4083
4084
4085
4086
4087
4088 if (ce_info->width < 32)
4089 mask = BIT(ce_info->width) - 1;
4090 else
4091 mask = (u32)~0;
4092
4093
4094
4095
4096 src_dword = *(u32 *)from;
4097 src_dword &= mask;
4098
4099
4100 mask <<= shift_width;
4101 src_dword <<= shift_width;
4102
4103
4104 dest = dest_ctx + (ce_info->lsb / 8);
4105
4106 memcpy(&dest_dword, dest, sizeof(dest_dword));
4107
4108 dest_dword &= ~(cpu_to_le32(mask));
4109 dest_dword |= cpu_to_le32(src_dword);
4110
4111
4112 memcpy(dest, &dest_dword, sizeof(dest_dword));
4113 }
4114
4115
4116
4117
4118
4119
4120
4121 static void
4122 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4123 {
4124 u64 src_qword, mask;
4125 __le64 dest_qword;
4126 u8 *from, *dest;
4127 u16 shift_width;
4128
4129
4130 from = src_ctx + ce_info->offset;
4131
4132
4133 shift_width = ce_info->lsb % 8;
4134
4135
4136
4137
4138
4139 if (ce_info->width < 64)
4140 mask = BIT_ULL(ce_info->width) - 1;
4141 else
4142 mask = (u64)~0;
4143
4144
4145
4146
4147 src_qword = *(u64 *)from;
4148 src_qword &= mask;
4149
4150
4151 mask <<= shift_width;
4152 src_qword <<= shift_width;
4153
4154
4155 dest = dest_ctx + (ce_info->lsb / 8);
4156
4157 memcpy(&dest_qword, dest, sizeof(dest_qword));
4158
4159 dest_qword &= ~(cpu_to_le64(mask));
4160 dest_qword |= cpu_to_le64(src_qword);
4161
4162
4163 memcpy(dest, &dest_qword, sizeof(dest_qword));
4164 }
4165
4166
4167
4168
4169
4170
4171
4172
4173 int
4174 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4175 const struct ice_ctx_ele *ce_info)
4176 {
4177 int f;
4178
4179 for (f = 0; ce_info[f].width; f++) {
4180
4181
4182
4183
4184 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4185 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4186 f, ce_info[f].width, ce_info[f].size_of);
4187 continue;
4188 }
4189 switch (ce_info[f].size_of) {
4190 case sizeof(u8):
4191 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4192 break;
4193 case sizeof(u16):
4194 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4195 break;
4196 case sizeof(u32):
4197 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4198 break;
4199 case sizeof(u64):
4200 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4201 break;
4202 default:
4203 return -EINVAL;
4204 }
4205 }
4206
4207 return 0;
4208 }
4209
4210
4211
4212
4213
4214
4215
4216
4217 struct ice_q_ctx *
4218 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4219 {
4220 struct ice_vsi_ctx *vsi;
4221 struct ice_q_ctx *q_ctx;
4222
4223 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4224 if (!vsi)
4225 return NULL;
4226 if (q_handle >= vsi->num_lan_q_entries[tc])
4227 return NULL;
4228 if (!vsi->lan_q_ctx[tc])
4229 return NULL;
4230 q_ctx = vsi->lan_q_ctx[tc];
4231 return &q_ctx[q_handle];
4232 }
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247 int
4248 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4249 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4250 struct ice_sq_cd *cd)
4251 {
4252 struct ice_aqc_txsched_elem_data node = { 0 };
4253 struct ice_sched_node *parent;
4254 struct ice_q_ctx *q_ctx;
4255 struct ice_hw *hw;
4256 int status;
4257
4258 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4259 return -EIO;
4260
4261 if (num_qgrps > 1 || buf->num_txqs > 1)
4262 return -ENOSPC;
4263
4264 hw = pi->hw;
4265
4266 if (!ice_is_vsi_valid(hw, vsi_handle))
4267 return -EINVAL;
4268
4269 mutex_lock(&pi->sched_lock);
4270
4271 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4272 if (!q_ctx) {
4273 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4274 q_handle);
4275 status = -EINVAL;
4276 goto ena_txq_exit;
4277 }
4278
4279
4280 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4281 ICE_SCHED_NODE_OWNER_LAN);
4282 if (!parent) {
4283 status = -EINVAL;
4284 goto ena_txq_exit;
4285 }
4286
4287 buf->parent_teid = parent->info.node_teid;
4288 node.parent_teid = parent->info.node_teid;
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300 buf->txqs[0].info.valid_sections =
4301 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4302 ICE_AQC_ELEM_VALID_EIR;
4303 buf->txqs[0].info.generic = 0;
4304 buf->txqs[0].info.cir_bw.bw_profile_idx =
4305 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4306 buf->txqs[0].info.cir_bw.bw_alloc =
4307 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4308 buf->txqs[0].info.eir_bw.bw_profile_idx =
4309 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4310 buf->txqs[0].info.eir_bw.bw_alloc =
4311 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4312
4313
4314 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4315 if (status) {
4316 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4317 le16_to_cpu(buf->txqs[0].txq_id),
4318 hw->adminq.sq_last_status);
4319 goto ena_txq_exit;
4320 }
4321
4322 node.node_teid = buf->txqs[0].q_teid;
4323 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4324 q_ctx->q_handle = q_handle;
4325 q_ctx->q_teid = le32_to_cpu(node.node_teid);
4326
4327
4328 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4329 if (!status)
4330 status = ice_sched_replay_q_bw(pi, q_ctx);
4331
4332 ena_txq_exit:
4333 mutex_unlock(&pi->sched_lock);
4334 return status;
4335 }
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352 int
4353 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4354 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4355 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4356 struct ice_sq_cd *cd)
4357 {
4358 struct ice_aqc_dis_txq_item *qg_list;
4359 struct ice_q_ctx *q_ctx;
4360 int status = -ENOENT;
4361 struct ice_hw *hw;
4362 u16 i, buf_size;
4363
4364 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4365 return -EIO;
4366
4367 hw = pi->hw;
4368
4369 if (!num_queues) {
4370
4371
4372
4373
4374 if (rst_src)
4375 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4376 vmvf_num, NULL);
4377 return -EIO;
4378 }
4379
4380 buf_size = struct_size(qg_list, q_id, 1);
4381 qg_list = kzalloc(buf_size, GFP_KERNEL);
4382 if (!qg_list)
4383 return -ENOMEM;
4384
4385 mutex_lock(&pi->sched_lock);
4386
4387 for (i = 0; i < num_queues; i++) {
4388 struct ice_sched_node *node;
4389
4390 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4391 if (!node)
4392 continue;
4393 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4394 if (!q_ctx) {
4395 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4396 q_handles[i]);
4397 continue;
4398 }
4399 if (q_ctx->q_handle != q_handles[i]) {
4400 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4401 q_ctx->q_handle, q_handles[i]);
4402 continue;
4403 }
4404 qg_list->parent_teid = node->info.parent_teid;
4405 qg_list->num_qs = 1;
4406 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4407 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4408 vmvf_num, cd);
4409
4410 if (status)
4411 break;
4412 ice_free_sched_node(pi, node);
4413 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4414 }
4415 mutex_unlock(&pi->sched_lock);
4416 kfree(qg_list);
4417 return status;
4418 }
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430 static int
4431 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4432 u16 *maxqs, u8 owner)
4433 {
4434 int status = 0;
4435 u8 i;
4436
4437 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4438 return -EIO;
4439
4440 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4441 return -EINVAL;
4442
4443 mutex_lock(&pi->sched_lock);
4444
4445 ice_for_each_traffic_class(i) {
4446
4447 if (!ice_sched_get_tc_node(pi, i))
4448 continue;
4449
4450 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4451 ice_is_tc_ena(tc_bitmap, i));
4452 if (status)
4453 break;
4454 }
4455
4456 mutex_unlock(&pi->sched_lock);
4457 return status;
4458 }
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469 int
4470 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4471 u16 *max_lanqs)
4472 {
4473 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4474 ICE_SCHED_NODE_OWNER_LAN);
4475 }
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486 int
4487 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4488 u16 *max_rdmaqs)
4489 {
4490 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
4491 ICE_SCHED_NODE_OWNER_RDMA);
4492 }
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505 int
4506 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4507 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4508 {
4509 struct ice_aqc_txsched_elem_data node = { 0 };
4510 struct ice_aqc_add_rdma_qset_data *buf;
4511 struct ice_sched_node *parent;
4512 struct ice_hw *hw;
4513 u16 i, buf_size;
4514 int ret;
4515
4516 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4517 return -EIO;
4518 hw = pi->hw;
4519
4520 if (!ice_is_vsi_valid(hw, vsi_handle))
4521 return -EINVAL;
4522
4523 buf_size = struct_size(buf, rdma_qsets, num_qsets);
4524 buf = kzalloc(buf_size, GFP_KERNEL);
4525 if (!buf)
4526 return -ENOMEM;
4527 mutex_lock(&pi->sched_lock);
4528
4529 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4530 ICE_SCHED_NODE_OWNER_RDMA);
4531 if (!parent) {
4532 ret = -EINVAL;
4533 goto rdma_error_exit;
4534 }
4535 buf->parent_teid = parent->info.node_teid;
4536 node.parent_teid = parent->info.node_teid;
4537
4538 buf->num_qsets = cpu_to_le16(num_qsets);
4539 for (i = 0; i < num_qsets; i++) {
4540 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4541 buf->rdma_qsets[i].info.valid_sections =
4542 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4543 ICE_AQC_ELEM_VALID_EIR;
4544 buf->rdma_qsets[i].info.generic = 0;
4545 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4546 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4547 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4548 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4549 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4550 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4551 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4552 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4553 }
4554 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4555 if (ret) {
4556 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4557 goto rdma_error_exit;
4558 }
4559 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4560 for (i = 0; i < num_qsets; i++) {
4561 node.node_teid = buf->rdma_qsets[i].qset_teid;
4562 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4563 &node);
4564 if (ret)
4565 break;
4566 qset_teid[i] = le32_to_cpu(node.node_teid);
4567 }
4568 rdma_error_exit:
4569 mutex_unlock(&pi->sched_lock);
4570 kfree(buf);
4571 return ret;
4572 }
4573
4574
4575
4576
4577
4578
4579
4580
4581 int
4582 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4583 u16 *q_id)
4584 {
4585 struct ice_aqc_dis_txq_item *qg_list;
4586 struct ice_hw *hw;
4587 int status = 0;
4588 u16 qg_size;
4589 int i;
4590
4591 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4592 return -EIO;
4593
4594 hw = pi->hw;
4595
4596 qg_size = struct_size(qg_list, q_id, 1);
4597 qg_list = kzalloc(qg_size, GFP_KERNEL);
4598 if (!qg_list)
4599 return -ENOMEM;
4600
4601 mutex_lock(&pi->sched_lock);
4602
4603 for (i = 0; i < count; i++) {
4604 struct ice_sched_node *node;
4605
4606 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4607 if (!node)
4608 continue;
4609
4610 qg_list->parent_teid = node->info.parent_teid;
4611 qg_list->num_qs = 1;
4612 qg_list->q_id[0] =
4613 cpu_to_le16(q_id[i] |
4614 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4615
4616 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4617 ICE_NO_RESET, 0, NULL);
4618 if (status)
4619 break;
4620
4621 ice_free_sched_node(pi, node);
4622 }
4623
4624 mutex_unlock(&pi->sched_lock);
4625 kfree(qg_list);
4626 return status;
4627 }
4628
4629
4630
4631
4632
4633
4634
4635 static int ice_replay_pre_init(struct ice_hw *hw)
4636 {
4637 struct ice_switch_info *sw = hw->switch_info;
4638 u8 i;
4639
4640
4641 ice_rm_all_sw_replay_rule_info(hw);
4642
4643
4644
4645
4646 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4647 list_replace_init(&sw->recp_list[i].filt_rules,
4648 &sw->recp_list[i].filt_replay_rules);
4649 ice_sched_replay_agg_vsi_preinit(hw);
4650
4651 return 0;
4652 }
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4663 {
4664 int status;
4665
4666 if (!ice_is_vsi_valid(hw, vsi_handle))
4667 return -EINVAL;
4668
4669
4670 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4671 status = ice_replay_pre_init(hw);
4672 if (status)
4673 return status;
4674 }
4675
4676 status = ice_replay_rss_cfg(hw, vsi_handle);
4677 if (status)
4678 return status;
4679
4680 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4681 if (!status)
4682 status = ice_replay_vsi_agg(hw, vsi_handle);
4683 return status;
4684 }
4685
4686
4687
4688
4689
4690
4691
4692 void ice_replay_post(struct ice_hw *hw)
4693 {
4694
4695 ice_rm_all_sw_replay_rule_info(hw);
4696 ice_sched_replay_agg(hw);
4697 }
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707 void
4708 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4709 u64 *prev_stat, u64 *cur_stat)
4710 {
4711 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4712
4713
4714
4715
4716
4717
4718 if (!prev_stat_loaded) {
4719 *prev_stat = new_data;
4720 return;
4721 }
4722
4723
4724
4725
4726 if (new_data >= *prev_stat)
4727 *cur_stat += new_data - *prev_stat;
4728 else
4729
4730 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4731
4732
4733 *prev_stat = new_data;
4734 }
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744 void
4745 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4746 u64 *prev_stat, u64 *cur_stat)
4747 {
4748 u32 new_data;
4749
4750 new_data = rd32(hw, reg);
4751
4752
4753
4754
4755
4756
4757 if (!prev_stat_loaded) {
4758 *prev_stat = new_data;
4759 return;
4760 }
4761
4762
4763
4764
4765 if (new_data >= *prev_stat)
4766 *cur_stat += new_data - *prev_stat;
4767 else
4768
4769 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4770
4771
4772 *prev_stat = new_data;
4773 }
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783 int
4784 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4785 struct ice_aqc_txsched_elem_data *buf)
4786 {
4787 u16 buf_size, num_elem_ret = 0;
4788 int status;
4789
4790 buf_size = sizeof(*buf);
4791 memset(buf, 0, buf_size);
4792 buf->node_teid = cpu_to_le32(node_teid);
4793 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4794 NULL);
4795 if (status || num_elem_ret != 1)
4796 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4797 return status;
4798 }
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815 int
4816 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
4817 u16 bus_addr, __le16 addr, u8 params, u8 *data,
4818 struct ice_sq_cd *cd)
4819 {
4820 struct ice_aq_desc desc = { 0 };
4821 struct ice_aqc_i2c *cmd;
4822 u8 data_size;
4823 int status;
4824
4825 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
4826 cmd = &desc.params.read_write_i2c;
4827
4828 if (!data)
4829 return -EINVAL;
4830
4831 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
4832
4833 cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
4834 cmd->topo_addr = topo_addr;
4835 cmd->i2c_params = params;
4836 cmd->i2c_addr = addr;
4837
4838 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4839 if (!status) {
4840 struct ice_aqc_read_i2c_resp *resp;
4841 u8 i;
4842
4843 resp = &desc.params.read_i2c_resp;
4844 for (i = 0; i < data_size; i++) {
4845 *data = resp->i2c_data[i];
4846 data++;
4847 }
4848 }
4849
4850 return status;
4851 }
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870 int
4871 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
4872 u16 bus_addr, __le16 addr, u8 params, u8 *data,
4873 struct ice_sq_cd *cd)
4874 {
4875 struct ice_aq_desc desc = { 0 };
4876 struct ice_aqc_i2c *cmd;
4877 u8 data_size;
4878
4879 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
4880 cmd = &desc.params.read_write_i2c;
4881
4882 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
4883
4884
4885 if (data_size > 4)
4886 return -EINVAL;
4887
4888 cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
4889 cmd->topo_addr = topo_addr;
4890 cmd->i2c_params = params;
4891 cmd->i2c_addr = addr;
4892
4893 memcpy(cmd->i2c_data, data, data_size);
4894
4895 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4896 }
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913 int
4914 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4915 u32 value, struct ice_sq_cd *cd)
4916 {
4917 struct ice_aqc_driver_shared_params *cmd;
4918 struct ice_aq_desc desc;
4919
4920 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4921 return -EIO;
4922
4923 cmd = &desc.params.drv_shared_params;
4924
4925 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4926
4927 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
4928 cmd->param_indx = idx;
4929 cmd->param_val = cpu_to_le32(value);
4930
4931 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4932 }
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946 int
4947 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4948 u32 *value, struct ice_sq_cd *cd)
4949 {
4950 struct ice_aqc_driver_shared_params *cmd;
4951 struct ice_aq_desc desc;
4952 int status;
4953
4954 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4955 return -EIO;
4956
4957 cmd = &desc.params.drv_shared_params;
4958
4959 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4960
4961 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
4962 cmd->param_indx = idx;
4963
4964 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4965 if (status)
4966 return status;
4967
4968 *value = le32_to_cpu(cmd->param_val);
4969
4970 return 0;
4971 }
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983 int
4984 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
4985 struct ice_sq_cd *cd)
4986 {
4987 struct ice_aqc_gpio *cmd;
4988 struct ice_aq_desc desc;
4989
4990 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
4991 cmd = &desc.params.read_write_gpio;
4992 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4993 cmd->gpio_num = pin_idx;
4994 cmd->gpio_val = value ? 1 : 0;
4995
4996 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4997 }
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010 int
5011 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5012 bool *value, struct ice_sq_cd *cd)
5013 {
5014 struct ice_aqc_gpio *cmd;
5015 struct ice_aq_desc desc;
5016 int status;
5017
5018 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5019 cmd = &desc.params.read_write_gpio;
5020 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
5021 cmd->gpio_num = pin_idx;
5022
5023 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5024 if (status)
5025 return status;
5026
5027 *value = !!cmd->gpio_val;
5028 return 0;
5029 }
5030
5031
5032
5033
5034
5035
5036
5037 bool ice_fw_supports_link_override(struct ice_hw *hw)
5038 {
5039 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5040 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5041 return true;
5042 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5043 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5044 return true;
5045 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5046 return true;
5047 }
5048
5049 return false;
5050 }
5051
5052
5053
5054
5055
5056
5057
5058
5059 int
5060 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5061 struct ice_port_info *pi)
5062 {
5063 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5064 struct ice_hw *hw = pi->hw;
5065 int status;
5066
5067 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5068 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5069 if (status) {
5070 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5071 return status;
5072 }
5073
5074
5075 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5076 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5077
5078
5079 status = ice_read_sr_word(hw, tlv_start, &buf);
5080 if (status) {
5081 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5082 return status;
5083 }
5084 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5085 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5086 ICE_LINK_OVERRIDE_PHY_CFG_S;
5087
5088
5089 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5090 status = ice_read_sr_word(hw, offset, &buf);
5091 if (status) {
5092 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5093 return status;
5094 }
5095 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5096
5097
5098 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5099 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5100 status = ice_read_sr_word(hw, (offset + i), &buf);
5101 if (status) {
5102 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5103 return status;
5104 }
5105
5106 ldo->phy_type_low |= ((u64)buf << (i * 16));
5107 }
5108
5109
5110 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5111 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5112 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5113 status = ice_read_sr_word(hw, (offset + i), &buf);
5114 if (status) {
5115 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5116 return status;
5117 }
5118
5119 ldo->phy_type_high |= ((u64)buf << (i * 16));
5120 }
5121
5122 return status;
5123 }
5124
5125
5126
5127
5128
5129 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5130 {
5131 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5132 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5133 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5134 ICE_AQC_PHY_AN_EN_CLAUSE37))
5135 return true;
5136
5137 return false;
5138 }
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150 int
5151 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5152 struct ice_sq_cd *cd)
5153 {
5154 struct ice_aqc_lldp_set_local_mib *cmd;
5155 struct ice_aq_desc desc;
5156
5157 cmd = &desc.params.lldp_set_mib;
5158
5159 if (buf_size == 0 || !buf)
5160 return -EINVAL;
5161
5162 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5163
5164 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
5165 desc.datalen = cpu_to_le16(buf_size);
5166
5167 cmd->type = mib_type;
5168 cmd->length = cpu_to_le16(buf_size);
5169
5170 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5171 }
5172
5173
5174
5175
5176
5177 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5178 {
5179 if (hw->mac_type != ICE_MAC_E810)
5180 return false;
5181
5182 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5183 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5184 return true;
5185 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5186 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5187 return true;
5188 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5189 return true;
5190 }
5191 return false;
5192 }
5193
5194
5195
5196
5197
5198
5199
5200 int
5201 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5202 {
5203 struct ice_aqc_lldp_filter_ctrl *cmd;
5204 struct ice_aq_desc desc;
5205
5206 cmd = &desc.params.lldp_filter_ctrl;
5207
5208 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5209
5210 if (add)
5211 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5212 else
5213 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5214
5215 cmd->vsi_num = cpu_to_le16(vsi_num);
5216
5217 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5218 }
5219
5220
5221
5222
5223
5224
5225
5226 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5227 {
5228 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5229 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5230 return true;
5231 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5232 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5233 return true;
5234 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5235 return true;
5236 }
5237 return false;
5238 }