0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007
0008 #include <generated/utsrelease.h>
0009 #include "ice.h"
0010 #include "ice_base.h"
0011 #include "ice_lib.h"
0012 #include "ice_fltr.h"
0013 #include "ice_dcb_lib.h"
0014 #include "ice_dcb_nl.h"
0015 #include "ice_devlink.h"
0016
0017
0018
0019
0020 #define CREATE_TRACE_POINTS
0021 #include "ice_trace.h"
0022 #include "ice_eswitch.h"
0023 #include "ice_tc_lib.h"
0024 #include "ice_vsi_vlan_ops.h"
0025
0026 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
0027 static const char ice_driver_string[] = DRV_SUMMARY;
0028 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
0029
0030
0031 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
0032 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
0033
0034 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
0035 MODULE_DESCRIPTION(DRV_SUMMARY);
0036 MODULE_LICENSE("GPL v2");
0037 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
0038
0039 static int debug = -1;
0040 module_param(debug, int, 0644);
0041 #ifndef CONFIG_DYNAMIC_DEBUG
0042 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
0043 #else
0044 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
0045 #endif
0046
0047 static DEFINE_IDA(ice_aux_ida);
0048 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
0049 EXPORT_SYMBOL(ice_xdp_locking_key);
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059 struct device *ice_hw_to_dev(struct ice_hw *hw)
0060 {
0061 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
0062
0063 return &pf->pdev->dev;
0064 }
0065
0066 static struct workqueue_struct *ice_wq;
0067 static const struct net_device_ops ice_netdev_safe_mode_ops;
0068 static const struct net_device_ops ice_netdev_ops;
0069
0070 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
0071
0072 static void ice_vsi_release_all(struct ice_pf *pf);
0073
0074 static int ice_rebuild_channels(struct ice_pf *pf);
0075 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
0076
0077 static int
0078 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
0079 void *cb_priv, enum tc_setup_type type, void *type_data,
0080 void *data,
0081 void (*cleanup)(struct flow_block_cb *block_cb));
0082
0083 bool netif_is_ice(struct net_device *dev)
0084 {
0085 return dev && (dev->netdev_ops == &ice_netdev_ops);
0086 }
0087
0088
0089
0090
0091
0092 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
0093 {
0094 u16 head, tail;
0095
0096 head = ring->next_to_clean;
0097 tail = ring->next_to_use;
0098
0099 if (head != tail)
0100 return (head < tail) ?
0101 tail - head : (tail + ring->count - head);
0102 return 0;
0103 }
0104
0105
0106
0107
0108
0109 static void ice_check_for_hang_subtask(struct ice_pf *pf)
0110 {
0111 struct ice_vsi *vsi = NULL;
0112 struct ice_hw *hw;
0113 unsigned int i;
0114 int packets;
0115 u32 v;
0116
0117 ice_for_each_vsi(pf, v)
0118 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
0119 vsi = pf->vsi[v];
0120 break;
0121 }
0122
0123 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
0124 return;
0125
0126 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
0127 return;
0128
0129 hw = &vsi->back->hw;
0130
0131 ice_for_each_txq(vsi, i) {
0132 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
0133
0134 if (!tx_ring)
0135 continue;
0136 if (ice_ring_ch_enabled(tx_ring))
0137 continue;
0138
0139 if (tx_ring->desc) {
0140
0141
0142
0143
0144
0145
0146
0147 packets = tx_ring->stats.pkts & INT_MAX;
0148 if (tx_ring->tx_stats.prev_pkt == packets) {
0149
0150 ice_trigger_sw_intr(hw, tx_ring->q_vector);
0151 continue;
0152 }
0153
0154
0155
0156
0157 smp_rmb();
0158 tx_ring->tx_stats.prev_pkt =
0159 ice_get_tx_pending(tx_ring) ? packets : -1;
0160 }
0161 }
0162 }
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172 static int ice_init_mac_fltr(struct ice_pf *pf)
0173 {
0174 struct ice_vsi *vsi;
0175 u8 *perm_addr;
0176
0177 vsi = ice_get_main_vsi(pf);
0178 if (!vsi)
0179 return -EINVAL;
0180
0181 perm_addr = vsi->port_info->mac.perm_addr;
0182 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
0183 }
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
0196 {
0197 struct ice_netdev_priv *np = netdev_priv(netdev);
0198 struct ice_vsi *vsi = np->vsi;
0199
0200 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
0201 ICE_FWD_TO_VSI))
0202 return -EINVAL;
0203
0204 return 0;
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
0218 {
0219 struct ice_netdev_priv *np = netdev_priv(netdev);
0220 struct ice_vsi *vsi = np->vsi;
0221
0222
0223
0224
0225
0226
0227 if (ether_addr_equal(addr, netdev->dev_addr))
0228 return 0;
0229
0230 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
0231 ICE_FWD_TO_VSI))
0232 return -EINVAL;
0233
0234 return 0;
0235 }
0236
0237
0238
0239
0240
0241
0242
0243 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
0244 {
0245 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
0246 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
0247 }
0248
0249
0250
0251
0252
0253
0254
0255 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
0256 {
0257 int status;
0258
0259 if (vsi->type != ICE_VSI_PF)
0260 return 0;
0261
0262 if (ice_vsi_has_non_zero_vlans(vsi)) {
0263 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
0264 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
0265 promisc_m);
0266 } else {
0267 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
0268 promisc_m, 0);
0269 }
0270 if (status && status != -EEXIST)
0271 return status;
0272
0273 return 0;
0274 }
0275
0276
0277
0278
0279
0280
0281
0282 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
0283 {
0284 int status;
0285
0286 if (vsi->type != ICE_VSI_PF)
0287 return 0;
0288
0289 if (ice_vsi_has_non_zero_vlans(vsi)) {
0290 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
0291 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
0292 promisc_m);
0293 } else {
0294 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
0295 promisc_m, 0);
0296 }
0297
0298 return status;
0299 }
0300
0301
0302
0303
0304
0305 static struct devlink_port *ice_get_devlink_port(struct net_device *netdev)
0306 {
0307 struct ice_pf *pf = ice_netdev_to_pf(netdev);
0308
0309 if (!ice_is_switchdev_running(pf))
0310 return NULL;
0311
0312 return &pf->devlink_port;
0313 }
0314
0315
0316
0317
0318
0319
0320
0321 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
0322 {
0323 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
0324 struct device *dev = ice_pf_to_dev(vsi->back);
0325 struct net_device *netdev = vsi->netdev;
0326 bool promisc_forced_on = false;
0327 struct ice_pf *pf = vsi->back;
0328 struct ice_hw *hw = &pf->hw;
0329 u32 changed_flags = 0;
0330 int err;
0331
0332 if (!vsi->netdev)
0333 return -EINVAL;
0334
0335 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
0336 usleep_range(1000, 2000);
0337
0338 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
0339 vsi->current_netdev_flags = vsi->netdev->flags;
0340
0341 INIT_LIST_HEAD(&vsi->tmp_sync_list);
0342 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
0343
0344 if (ice_vsi_fltr_changed(vsi)) {
0345 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
0346 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
0347
0348
0349 netif_addr_lock_bh(netdev);
0350 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
0351 ice_add_mac_to_unsync_list);
0352 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
0353 ice_add_mac_to_unsync_list);
0354
0355 netif_addr_unlock_bh(netdev);
0356 }
0357
0358
0359 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
0360 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
0361 if (err) {
0362 netdev_err(netdev, "Failed to delete MAC filters\n");
0363
0364 if (err == -ENOMEM)
0365 goto out;
0366 }
0367
0368
0369 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
0370 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
0371
0372
0373
0374
0375 if (err && err != -EEXIST) {
0376 netdev_err(netdev, "Failed to add MAC filters\n");
0377
0378
0379
0380
0381 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
0382 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
0383 vsi->state)) {
0384 promisc_forced_on = true;
0385 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
0386 vsi->vsi_num);
0387 } else {
0388 goto out;
0389 }
0390 }
0391 err = 0;
0392
0393 if (changed_flags & IFF_ALLMULTI) {
0394 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
0395 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
0396 if (err) {
0397 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
0398 goto out_promisc;
0399 }
0400 } else {
0401
0402 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
0403 if (err) {
0404 vsi->current_netdev_flags |= IFF_ALLMULTI;
0405 goto out_promisc;
0406 }
0407 }
0408 }
0409
0410 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
0411 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
0412 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
0413 if (vsi->current_netdev_flags & IFF_PROMISC) {
0414
0415 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
0416 err = ice_set_dflt_vsi(vsi);
0417 if (err && err != -EEXIST) {
0418 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
0419 err, vsi->vsi_num);
0420 vsi->current_netdev_flags &=
0421 ~IFF_PROMISC;
0422 goto out_promisc;
0423 }
0424 err = 0;
0425 vlan_ops->dis_rx_filtering(vsi);
0426 }
0427 } else {
0428
0429 if (ice_is_vsi_dflt_vsi(vsi)) {
0430 err = ice_clear_dflt_vsi(vsi);
0431 if (err) {
0432 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
0433 err, vsi->vsi_num);
0434 vsi->current_netdev_flags |=
0435 IFF_PROMISC;
0436 goto out_promisc;
0437 }
0438 if (vsi->netdev->features &
0439 NETIF_F_HW_VLAN_CTAG_FILTER)
0440 vlan_ops->ena_rx_filtering(vsi);
0441 }
0442 }
0443 }
0444 goto exit;
0445
0446 out_promisc:
0447 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
0448 goto exit;
0449 out:
0450
0451 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
0452 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
0453 exit:
0454 clear_bit(ICE_CFG_BUSY, vsi->state);
0455 return err;
0456 }
0457
0458
0459
0460
0461
0462 static void ice_sync_fltr_subtask(struct ice_pf *pf)
0463 {
0464 int v;
0465
0466 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
0467 return;
0468
0469 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
0470
0471 ice_for_each_vsi(pf, v)
0472 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
0473 ice_vsi_sync_fltr(pf->vsi[v])) {
0474
0475 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
0476 break;
0477 }
0478 }
0479
0480
0481
0482
0483
0484
0485 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
0486 {
0487 int node;
0488 int v;
0489
0490 ice_for_each_vsi(pf, v)
0491 if (pf->vsi[v])
0492 ice_dis_vsi(pf->vsi[v], locked);
0493
0494 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
0495 pf->pf_agg_node[node].num_vsis = 0;
0496
0497 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
0498 pf->vf_agg_node[node].num_vsis = 0;
0499 }
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
0511 {
0512 struct ice_sw_recipe *recp;
0513 u8 i;
0514
0515 recp = pf->hw.switch_info->recp_list;
0516 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
0517 recp[i].recp_created = false;
0518 }
0519
0520
0521
0522
0523
0524
0525
0526
0527 static void
0528 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
0529 {
0530 struct ice_hw *hw = &pf->hw;
0531 struct ice_vsi *vsi;
0532 struct ice_vf *vf;
0533 unsigned int bkt;
0534
0535 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
0536
0537
0538 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
0539 return;
0540
0541 ice_unplug_aux_dev(pf);
0542
0543
0544 if (ice_check_sq_alive(hw, &hw->mailboxq))
0545 ice_vc_notify_reset(pf);
0546
0547
0548 mutex_lock(&pf->vfs.table_lock);
0549 ice_for_each_vf(pf, bkt, vf)
0550 ice_set_vf_state_qs_dis(vf);
0551 mutex_unlock(&pf->vfs.table_lock);
0552
0553 if (ice_is_eswitch_mode_switchdev(pf)) {
0554 if (reset_type != ICE_RESET_PFR)
0555 ice_clear_sw_switch_recipes(pf);
0556 }
0557
0558
0559 vsi = ice_get_main_vsi(pf);
0560 if (!vsi)
0561 goto skip;
0562
0563
0564
0565
0566 vsi->orig_rss_size = 0;
0567
0568 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
0569 if (reset_type == ICE_RESET_PFR) {
0570 vsi->old_ena_tc = vsi->all_enatc;
0571 vsi->old_numtc = vsi->all_numtc;
0572 } else {
0573 ice_remove_q_channels(vsi, true);
0574
0575
0576
0577
0578 vsi->old_ena_tc = 0;
0579 vsi->all_enatc = 0;
0580 vsi->old_numtc = 0;
0581 vsi->all_numtc = 0;
0582 vsi->req_txq = 0;
0583 vsi->req_rxq = 0;
0584 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
0585 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
0586 }
0587 }
0588 skip:
0589
0590
0591 ice_clear_hw_tbls(hw);
0592
0593 ice_pf_dis_all_vsi(pf, false);
0594
0595 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
0596 ice_ptp_prepare_for_reset(pf);
0597
0598 if (ice_is_feature_supported(pf, ICE_F_GNSS))
0599 ice_gnss_exit(pf);
0600
0601 if (hw->port_info)
0602 ice_sched_clear_port(hw->port_info);
0603
0604 ice_shutdown_all_ctrlq(hw);
0605
0606 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
0607 }
0608
0609
0610
0611
0612
0613
0614 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
0615 {
0616 struct device *dev = ice_pf_to_dev(pf);
0617 struct ice_hw *hw = &pf->hw;
0618
0619 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
0620
0621 ice_prepare_for_reset(pf, reset_type);
0622
0623
0624 if (ice_reset(hw, reset_type)) {
0625 dev_err(dev, "reset %d failed\n", reset_type);
0626 set_bit(ICE_RESET_FAILED, pf->state);
0627 clear_bit(ICE_RESET_OICR_RECV, pf->state);
0628 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
0629 clear_bit(ICE_PFR_REQ, pf->state);
0630 clear_bit(ICE_CORER_REQ, pf->state);
0631 clear_bit(ICE_GLOBR_REQ, pf->state);
0632 wake_up(&pf->reset_wait_queue);
0633 return;
0634 }
0635
0636
0637
0638
0639
0640 if (reset_type == ICE_RESET_PFR) {
0641 pf->pfr_count++;
0642 ice_rebuild(pf, reset_type);
0643 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
0644 clear_bit(ICE_PFR_REQ, pf->state);
0645 wake_up(&pf->reset_wait_queue);
0646 ice_reset_all_vfs(pf);
0647 }
0648 }
0649
0650
0651
0652
0653
0654 static void ice_reset_subtask(struct ice_pf *pf)
0655 {
0656 enum ice_reset_req reset_type = ICE_RESET_INVAL;
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
0669
0670 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
0671 reset_type = ICE_RESET_CORER;
0672 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
0673 reset_type = ICE_RESET_GLOBR;
0674 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
0675 reset_type = ICE_RESET_EMPR;
0676
0677 if (reset_type == ICE_RESET_INVAL)
0678 return;
0679 ice_prepare_for_reset(pf, reset_type);
0680
0681
0682 if (ice_check_reset(&pf->hw)) {
0683 set_bit(ICE_RESET_FAILED, pf->state);
0684 } else {
0685
0686 pf->hw.reset_ongoing = false;
0687 ice_rebuild(pf, reset_type);
0688
0689
0690
0691 clear_bit(ICE_RESET_OICR_RECV, pf->state);
0692 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
0693 clear_bit(ICE_PFR_REQ, pf->state);
0694 clear_bit(ICE_CORER_REQ, pf->state);
0695 clear_bit(ICE_GLOBR_REQ, pf->state);
0696 wake_up(&pf->reset_wait_queue);
0697 ice_reset_all_vfs(pf);
0698 }
0699
0700 return;
0701 }
0702
0703
0704 if (test_bit(ICE_PFR_REQ, pf->state))
0705 reset_type = ICE_RESET_PFR;
0706 if (test_bit(ICE_CORER_REQ, pf->state))
0707 reset_type = ICE_RESET_CORER;
0708 if (test_bit(ICE_GLOBR_REQ, pf->state))
0709 reset_type = ICE_RESET_GLOBR;
0710
0711 if (reset_type == ICE_RESET_INVAL)
0712 return;
0713
0714
0715 if (!test_bit(ICE_DOWN, pf->state) &&
0716 !test_bit(ICE_CFG_BUSY, pf->state)) {
0717 ice_do_reset(pf, reset_type);
0718 }
0719 }
0720
0721
0722
0723
0724
0725 static void ice_print_topo_conflict(struct ice_vsi *vsi)
0726 {
0727 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
0728 case ICE_AQ_LINK_TOPO_CONFLICT:
0729 case ICE_AQ_LINK_MEDIA_CONFLICT:
0730 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
0731 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
0732 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
0733 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
0734 break;
0735 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
0736 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
0737 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
0738 else
0739 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
0740 break;
0741 default:
0742 break;
0743 }
0744 }
0745
0746
0747
0748
0749
0750
0751 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
0752 {
0753 struct ice_aqc_get_phy_caps_data *caps;
0754 const char *an_advertised;
0755 const char *fec_req;
0756 const char *speed;
0757 const char *fec;
0758 const char *fc;
0759 const char *an;
0760 int status;
0761
0762 if (!vsi)
0763 return;
0764
0765 if (vsi->current_isup == isup)
0766 return;
0767
0768 vsi->current_isup = isup;
0769
0770 if (!isup) {
0771 netdev_info(vsi->netdev, "NIC Link is Down\n");
0772 return;
0773 }
0774
0775 switch (vsi->port_info->phy.link_info.link_speed) {
0776 case ICE_AQ_LINK_SPEED_100GB:
0777 speed = "100 G";
0778 break;
0779 case ICE_AQ_LINK_SPEED_50GB:
0780 speed = "50 G";
0781 break;
0782 case ICE_AQ_LINK_SPEED_40GB:
0783 speed = "40 G";
0784 break;
0785 case ICE_AQ_LINK_SPEED_25GB:
0786 speed = "25 G";
0787 break;
0788 case ICE_AQ_LINK_SPEED_20GB:
0789 speed = "20 G";
0790 break;
0791 case ICE_AQ_LINK_SPEED_10GB:
0792 speed = "10 G";
0793 break;
0794 case ICE_AQ_LINK_SPEED_5GB:
0795 speed = "5 G";
0796 break;
0797 case ICE_AQ_LINK_SPEED_2500MB:
0798 speed = "2.5 G";
0799 break;
0800 case ICE_AQ_LINK_SPEED_1000MB:
0801 speed = "1 G";
0802 break;
0803 case ICE_AQ_LINK_SPEED_100MB:
0804 speed = "100 M";
0805 break;
0806 default:
0807 speed = "Unknown ";
0808 break;
0809 }
0810
0811 switch (vsi->port_info->fc.current_mode) {
0812 case ICE_FC_FULL:
0813 fc = "Rx/Tx";
0814 break;
0815 case ICE_FC_TX_PAUSE:
0816 fc = "Tx";
0817 break;
0818 case ICE_FC_RX_PAUSE:
0819 fc = "Rx";
0820 break;
0821 case ICE_FC_NONE:
0822 fc = "None";
0823 break;
0824 default:
0825 fc = "Unknown";
0826 break;
0827 }
0828
0829
0830 switch (vsi->port_info->phy.link_info.fec_info) {
0831 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
0832 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
0833 fec = "RS-FEC";
0834 break;
0835 case ICE_AQ_LINK_25G_KR_FEC_EN:
0836 fec = "FC-FEC/BASE-R";
0837 break;
0838 default:
0839 fec = "NONE";
0840 break;
0841 }
0842
0843
0844 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
0845 an = "True";
0846 else
0847 an = "False";
0848
0849
0850 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
0851 if (!caps) {
0852 fec_req = "Unknown";
0853 an_advertised = "Unknown";
0854 goto done;
0855 }
0856
0857 status = ice_aq_get_phy_caps(vsi->port_info, false,
0858 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
0859 if (status)
0860 netdev_info(vsi->netdev, "Get phy capability failed.\n");
0861
0862 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
0863
0864 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
0865 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
0866 fec_req = "RS-FEC";
0867 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
0868 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
0869 fec_req = "FC-FEC/BASE-R";
0870 else
0871 fec_req = "NONE";
0872
0873 kfree(caps);
0874
0875 done:
0876 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
0877 speed, fec_req, fec, an_advertised, an, fc);
0878 ice_print_topo_conflict(vsi);
0879 }
0880
0881
0882
0883
0884
0885
0886 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
0887 {
0888 if (!vsi)
0889 return;
0890
0891 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
0892 return;
0893
0894 if (vsi->type == ICE_VSI_PF) {
0895 if (link_up == netif_carrier_ok(vsi->netdev))
0896 return;
0897
0898 if (link_up) {
0899 netif_carrier_on(vsi->netdev);
0900 netif_tx_wake_all_queues(vsi->netdev);
0901 } else {
0902 netif_carrier_off(vsi->netdev);
0903 netif_tx_stop_all_queues(vsi->netdev);
0904 }
0905 }
0906 }
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919 static void ice_set_dflt_mib(struct ice_pf *pf)
0920 {
0921 struct device *dev = ice_pf_to_dev(pf);
0922 u8 mib_type, *buf, *lldpmib = NULL;
0923 u16 len, typelen, offset = 0;
0924 struct ice_lldp_org_tlv *tlv;
0925 struct ice_hw *hw = &pf->hw;
0926 u32 ouisubtype;
0927
0928 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
0929 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
0930 if (!lldpmib) {
0931 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
0932 __func__);
0933 return;
0934 }
0935
0936
0937 tlv = (struct ice_lldp_org_tlv *)lldpmib;
0938 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
0939 ICE_IEEE_ETS_TLV_LEN);
0940 tlv->typelen = htons(typelen);
0941 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
0942 ICE_IEEE_SUBTYPE_ETS_CFG);
0943 tlv->ouisubtype = htonl(ouisubtype);
0944
0945 buf = tlv->tlvinfo;
0946 buf[0] = 0;
0947
0948
0949
0950
0951
0952 buf[5] = 0x64;
0953 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
0954 offset += len + 2;
0955 tlv = (struct ice_lldp_org_tlv *)
0956 ((char *)tlv + sizeof(tlv->typelen) + len);
0957
0958
0959 buf = tlv->tlvinfo;
0960 tlv->typelen = htons(typelen);
0961
0962 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
0963 ICE_IEEE_SUBTYPE_ETS_REC);
0964 tlv->ouisubtype = htonl(ouisubtype);
0965
0966
0967
0968
0969
0970
0971 buf[5] = 0x64;
0972 offset += len + 2;
0973 tlv = (struct ice_lldp_org_tlv *)
0974 ((char *)tlv + sizeof(tlv->typelen) + len);
0975
0976
0977 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
0978 ICE_IEEE_PFC_TLV_LEN);
0979 tlv->typelen = htons(typelen);
0980
0981 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
0982 ICE_IEEE_SUBTYPE_PFC_CFG);
0983 tlv->ouisubtype = htonl(ouisubtype);
0984
0985
0986 buf[0] = 0x08;
0987 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
0988 offset += len + 2;
0989
0990 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
0991 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
0992
0993 kfree(lldpmib);
0994 }
0995
0996
0997
0998
0999
1000
1001
1002
1003 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1004 {
1005 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1006 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1007 return;
1008 }
1009
1010 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1011 return;
1012
1013 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1014 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1015 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1016 }
1017 }
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1028 {
1029
1030 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1031 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1032 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1033 return;
1034 }
1035
1036
1037
1038
1039 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1040 return;
1041
1042 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1043 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1044 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1045 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1046 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1047 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1048 }
1049 }
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1060 {
1061 ice_check_module_power(pf, link_cfg_err);
1062 ice_check_phy_fw_load(pf, link_cfg_err);
1063 }
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 static int
1075 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1076 u16 link_speed)
1077 {
1078 struct device *dev = ice_pf_to_dev(pf);
1079 struct ice_phy_info *phy_info;
1080 struct ice_vsi *vsi;
1081 u16 old_link_speed;
1082 bool old_link;
1083 int status;
1084
1085 phy_info = &pi->phy;
1086 phy_info->link_info_old = phy_info->link_info;
1087
1088 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1089 old_link_speed = phy_info->link_info_old.link_speed;
1090
1091
1092
1093
1094 status = ice_update_link_info(pi);
1095 if (status)
1096 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1097 pi->lport, status,
1098 ice_aq_str(pi->hw->adminq.sq_last_status));
1099
1100 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1101
1102
1103
1104
1105 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1106 link_up = true;
1107
1108 vsi = ice_get_main_vsi(pf);
1109 if (!vsi || !vsi->port_info)
1110 return -EINVAL;
1111
1112
1113 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1114 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1115 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1116 ice_set_link(vsi, false);
1117 }
1118
1119
1120 if (link_up == old_link && link_speed == old_link_speed)
1121 return 0;
1122
1123 if (!ice_is_e810(&pf->hw))
1124 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1125
1126 if (ice_is_dcb_active(pf)) {
1127 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1128 ice_dcb_rebuild(pf);
1129 } else {
1130 if (link_up)
1131 ice_set_dflt_mib(pf);
1132 }
1133 ice_vsi_link_event(vsi, link_up);
1134 ice_print_link_msg(vsi, link_up);
1135
1136 ice_vc_notify_link_state(pf);
1137
1138 return 0;
1139 }
1140
1141
1142
1143
1144
1145 static void ice_watchdog_subtask(struct ice_pf *pf)
1146 {
1147 int i;
1148
1149
1150 if (test_bit(ICE_DOWN, pf->state) ||
1151 test_bit(ICE_CFG_BUSY, pf->state))
1152 return;
1153
1154
1155 if (time_before(jiffies,
1156 pf->serv_tmr_prev + pf->serv_tmr_period))
1157 return;
1158
1159 pf->serv_tmr_prev = jiffies;
1160
1161
1162
1163
1164 ice_update_pf_stats(pf);
1165 ice_for_each_vsi(pf, i)
1166 if (pf->vsi[i] && pf->vsi[i]->netdev)
1167 ice_update_vsi_stats(pf->vsi[i]);
1168 }
1169
1170
1171
1172
1173
1174
1175
1176 static int ice_init_link_events(struct ice_port_info *pi)
1177 {
1178 u16 mask;
1179
1180 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1181 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1182 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1183
1184 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1185 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1186 pi->lport);
1187 return -EIO;
1188 }
1189
1190 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1191 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1192 pi->lport);
1193 return -EIO;
1194 }
1195
1196 return 0;
1197 }
1198
1199
1200
1201
1202
1203
1204 static int
1205 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1206 {
1207 struct ice_aqc_get_link_status_data *link_data;
1208 struct ice_port_info *port_info;
1209 int status;
1210
1211 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1212 port_info = pf->hw.port_info;
1213 if (!port_info)
1214 return -EINVAL;
1215
1216 status = ice_link_event(pf, port_info,
1217 !!(link_data->link_info & ICE_AQ_LINK_UP),
1218 le16_to_cpu(link_data->link_speed));
1219 if (status)
1220 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1221 status);
1222
1223 return status;
1224 }
1225
1226 enum ice_aq_task_state {
1227 ICE_AQ_TASK_WAITING = 0,
1228 ICE_AQ_TASK_COMPLETE,
1229 ICE_AQ_TASK_CANCELED,
1230 };
1231
1232 struct ice_aq_task {
1233 struct hlist_node entry;
1234
1235 u16 opcode;
1236 struct ice_rq_event_info *event;
1237 enum ice_aq_task_state state;
1238 };
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1258 struct ice_rq_event_info *event)
1259 {
1260 struct device *dev = ice_pf_to_dev(pf);
1261 struct ice_aq_task *task;
1262 unsigned long start;
1263 long ret;
1264 int err;
1265
1266 task = kzalloc(sizeof(*task), GFP_KERNEL);
1267 if (!task)
1268 return -ENOMEM;
1269
1270 INIT_HLIST_NODE(&task->entry);
1271 task->opcode = opcode;
1272 task->event = event;
1273 task->state = ICE_AQ_TASK_WAITING;
1274
1275 spin_lock_bh(&pf->aq_wait_lock);
1276 hlist_add_head(&task->entry, &pf->aq_wait_list);
1277 spin_unlock_bh(&pf->aq_wait_lock);
1278
1279 start = jiffies;
1280
1281 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1282 timeout);
1283 switch (task->state) {
1284 case ICE_AQ_TASK_WAITING:
1285 err = ret < 0 ? ret : -ETIMEDOUT;
1286 break;
1287 case ICE_AQ_TASK_CANCELED:
1288 err = ret < 0 ? ret : -ECANCELED;
1289 break;
1290 case ICE_AQ_TASK_COMPLETE:
1291 err = ret < 0 ? ret : 0;
1292 break;
1293 default:
1294 WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1295 err = -EINVAL;
1296 break;
1297 }
1298
1299 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1300 jiffies_to_msecs(jiffies - start),
1301 jiffies_to_msecs(timeout),
1302 opcode);
1303
1304 spin_lock_bh(&pf->aq_wait_lock);
1305 hlist_del(&task->entry);
1306 spin_unlock_bh(&pf->aq_wait_lock);
1307 kfree(task);
1308
1309 return err;
1310 }
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1331 struct ice_rq_event_info *event)
1332 {
1333 struct ice_aq_task *task;
1334 bool found = false;
1335
1336 spin_lock_bh(&pf->aq_wait_lock);
1337 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1338 if (task->state || task->opcode != opcode)
1339 continue;
1340
1341 memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1342 task->event->msg_len = event->msg_len;
1343
1344
1345 if (task->event->msg_buf &&
1346 task->event->buf_len > event->buf_len) {
1347 memcpy(task->event->msg_buf, event->msg_buf,
1348 event->buf_len);
1349 task->event->buf_len = event->buf_len;
1350 }
1351
1352 task->state = ICE_AQ_TASK_COMPLETE;
1353 found = true;
1354 }
1355 spin_unlock_bh(&pf->aq_wait_lock);
1356
1357 if (found)
1358 wake_up(&pf->aq_wait_queue);
1359 }
1360
1361
1362
1363
1364
1365
1366
1367
1368 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1369 {
1370 struct ice_aq_task *task;
1371
1372 spin_lock_bh(&pf->aq_wait_lock);
1373 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1374 task->state = ICE_AQ_TASK_CANCELED;
1375 spin_unlock_bh(&pf->aq_wait_lock);
1376
1377 wake_up(&pf->aq_wait_queue);
1378 }
1379
1380
1381
1382
1383
1384
1385 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1386 {
1387 struct device *dev = ice_pf_to_dev(pf);
1388 struct ice_rq_event_info event;
1389 struct ice_hw *hw = &pf->hw;
1390 struct ice_ctl_q_info *cq;
1391 u16 pending, i = 0;
1392 const char *qtype;
1393 u32 oldval, val;
1394
1395
1396 if (test_bit(ICE_RESET_FAILED, pf->state))
1397 return 0;
1398
1399 switch (q_type) {
1400 case ICE_CTL_Q_ADMIN:
1401 cq = &hw->adminq;
1402 qtype = "Admin";
1403 break;
1404 case ICE_CTL_Q_SB:
1405 cq = &hw->sbq;
1406 qtype = "Sideband";
1407 break;
1408 case ICE_CTL_Q_MAILBOX:
1409 cq = &hw->mailboxq;
1410 qtype = "Mailbox";
1411
1412
1413
1414 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1415 break;
1416 default:
1417 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1418 return 0;
1419 }
1420
1421
1422
1423
1424 val = rd32(hw, cq->rq.len);
1425 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1426 PF_FW_ARQLEN_ARQCRIT_M)) {
1427 oldval = val;
1428 if (val & PF_FW_ARQLEN_ARQVFE_M)
1429 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1430 qtype);
1431 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1432 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1433 qtype);
1434 }
1435 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1436 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1437 qtype);
1438 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1439 PF_FW_ARQLEN_ARQCRIT_M);
1440 if (oldval != val)
1441 wr32(hw, cq->rq.len, val);
1442 }
1443
1444 val = rd32(hw, cq->sq.len);
1445 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1446 PF_FW_ATQLEN_ATQCRIT_M)) {
1447 oldval = val;
1448 if (val & PF_FW_ATQLEN_ATQVFE_M)
1449 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1450 qtype);
1451 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1452 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1453 qtype);
1454 }
1455 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1456 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1457 qtype);
1458 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1459 PF_FW_ATQLEN_ATQCRIT_M);
1460 if (oldval != val)
1461 wr32(hw, cq->sq.len, val);
1462 }
1463
1464 event.buf_len = cq->rq_buf_size;
1465 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1466 if (!event.msg_buf)
1467 return 0;
1468
1469 do {
1470 u16 opcode;
1471 int ret;
1472
1473 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1474 if (ret == -EALREADY)
1475 break;
1476 if (ret) {
1477 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1478 ret);
1479 break;
1480 }
1481
1482 opcode = le16_to_cpu(event.desc.opcode);
1483
1484
1485 ice_aq_check_events(pf, opcode, &event);
1486
1487 switch (opcode) {
1488 case ice_aqc_opc_get_link_status:
1489 if (ice_handle_link_event(pf, &event))
1490 dev_err(dev, "Could not handle link event\n");
1491 break;
1492 case ice_aqc_opc_event_lan_overflow:
1493 ice_vf_lan_overflow_event(pf, &event);
1494 break;
1495 case ice_mbx_opc_send_msg_to_pf:
1496 if (!ice_is_malicious_vf(pf, &event, i, pending))
1497 ice_vc_process_vf_msg(pf, &event);
1498 break;
1499 case ice_aqc_opc_fw_logging:
1500 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1501 break;
1502 case ice_aqc_opc_lldp_set_mib_change:
1503 ice_dcb_process_lldp_set_mib_change(pf, &event);
1504 break;
1505 default:
1506 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1507 qtype, opcode);
1508 break;
1509 }
1510 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1511
1512 kfree(event.msg_buf);
1513
1514 return pending && (i == ICE_DFLT_IRQ_WORK);
1515 }
1516
1517
1518
1519
1520
1521
1522
1523
1524 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1525 {
1526 u16 ntu;
1527
1528 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1529 return cq->rq.next_to_clean != ntu;
1530 }
1531
1532
1533
1534
1535
1536 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1537 {
1538 struct ice_hw *hw = &pf->hw;
1539
1540 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1541 return;
1542
1543 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1544 return;
1545
1546 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1547
1548
1549
1550
1551
1552
1553 if (ice_ctrlq_pending(hw, &hw->adminq))
1554 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1555
1556 ice_flush(hw);
1557 }
1558
1559
1560
1561
1562
1563 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1564 {
1565 struct ice_hw *hw = &pf->hw;
1566
1567 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1568 return;
1569
1570 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1571 return;
1572
1573 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1574
1575 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1576 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1577
1578 ice_flush(hw);
1579 }
1580
1581
1582
1583
1584
1585 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1586 {
1587 struct ice_hw *hw = &pf->hw;
1588
1589
1590 if (!ice_is_sbq_supported(hw)) {
1591 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1592 return;
1593 }
1594
1595 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1596 return;
1597
1598 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1599 return;
1600
1601 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1602
1603 if (ice_ctrlq_pending(hw, &hw->sbq))
1604 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1605
1606 ice_flush(hw);
1607 }
1608
1609
1610
1611
1612
1613
1614
1615 void ice_service_task_schedule(struct ice_pf *pf)
1616 {
1617 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1618 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1619 !test_bit(ICE_NEEDS_RESTART, pf->state))
1620 queue_work(ice_wq, &pf->serv_task);
1621 }
1622
1623
1624
1625
1626
1627 static void ice_service_task_complete(struct ice_pf *pf)
1628 {
1629 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1630
1631
1632 smp_mb__before_atomic();
1633 clear_bit(ICE_SERVICE_SCHED, pf->state);
1634 }
1635
1636
1637
1638
1639
1640
1641
1642
1643 static int ice_service_task_stop(struct ice_pf *pf)
1644 {
1645 int ret;
1646
1647 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1648
1649 if (pf->serv_tmr.function)
1650 del_timer_sync(&pf->serv_tmr);
1651 if (pf->serv_task.func)
1652 cancel_work_sync(&pf->serv_task);
1653
1654 clear_bit(ICE_SERVICE_SCHED, pf->state);
1655 return ret;
1656 }
1657
1658
1659
1660
1661
1662
1663
1664 static void ice_service_task_restart(struct ice_pf *pf)
1665 {
1666 clear_bit(ICE_SERVICE_DIS, pf->state);
1667 ice_service_task_schedule(pf);
1668 }
1669
1670
1671
1672
1673
1674 static void ice_service_timer(struct timer_list *t)
1675 {
1676 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1677
1678 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1679 ice_service_task_schedule(pf);
1680 }
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692 static void ice_handle_mdd_event(struct ice_pf *pf)
1693 {
1694 struct device *dev = ice_pf_to_dev(pf);
1695 struct ice_hw *hw = &pf->hw;
1696 struct ice_vf *vf;
1697 unsigned int bkt;
1698 u32 reg;
1699
1700 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1701
1702
1703
1704 ice_print_vfs_mdd_events(pf);
1705 return;
1706 }
1707
1708
1709 reg = rd32(hw, GL_MDET_TX_PQM);
1710 if (reg & GL_MDET_TX_PQM_VALID_M) {
1711 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1712 GL_MDET_TX_PQM_PF_NUM_S;
1713 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1714 GL_MDET_TX_PQM_VF_NUM_S;
1715 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1716 GL_MDET_TX_PQM_MAL_TYPE_S;
1717 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1718 GL_MDET_TX_PQM_QNUM_S);
1719
1720 if (netif_msg_tx_err(pf))
1721 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1722 event, queue, pf_num, vf_num);
1723 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1724 }
1725
1726 reg = rd32(hw, GL_MDET_TX_TCLAN);
1727 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1728 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1729 GL_MDET_TX_TCLAN_PF_NUM_S;
1730 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1731 GL_MDET_TX_TCLAN_VF_NUM_S;
1732 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1733 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1734 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1735 GL_MDET_TX_TCLAN_QNUM_S);
1736
1737 if (netif_msg_tx_err(pf))
1738 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1739 event, queue, pf_num, vf_num);
1740 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1741 }
1742
1743 reg = rd32(hw, GL_MDET_RX);
1744 if (reg & GL_MDET_RX_VALID_M) {
1745 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1746 GL_MDET_RX_PF_NUM_S;
1747 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1748 GL_MDET_RX_VF_NUM_S;
1749 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1750 GL_MDET_RX_MAL_TYPE_S;
1751 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1752 GL_MDET_RX_QNUM_S);
1753
1754 if (netif_msg_rx_err(pf))
1755 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1756 event, queue, pf_num, vf_num);
1757 wr32(hw, GL_MDET_RX, 0xffffffff);
1758 }
1759
1760
1761 reg = rd32(hw, PF_MDET_TX_PQM);
1762 if (reg & PF_MDET_TX_PQM_VALID_M) {
1763 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1764 if (netif_msg_tx_err(pf))
1765 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1766 }
1767
1768 reg = rd32(hw, PF_MDET_TX_TCLAN);
1769 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1770 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1771 if (netif_msg_tx_err(pf))
1772 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1773 }
1774
1775 reg = rd32(hw, PF_MDET_RX);
1776 if (reg & PF_MDET_RX_VALID_M) {
1777 wr32(hw, PF_MDET_RX, 0xFFFF);
1778 if (netif_msg_rx_err(pf))
1779 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1780 }
1781
1782
1783
1784
1785 mutex_lock(&pf->vfs.table_lock);
1786 ice_for_each_vf(pf, bkt, vf) {
1787 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1788 if (reg & VP_MDET_TX_PQM_VALID_M) {
1789 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1790 vf->mdd_tx_events.count++;
1791 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1792 if (netif_msg_tx_err(pf))
1793 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1794 vf->vf_id);
1795 }
1796
1797 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1798 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1799 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1800 vf->mdd_tx_events.count++;
1801 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1802 if (netif_msg_tx_err(pf))
1803 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1804 vf->vf_id);
1805 }
1806
1807 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1808 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1809 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1810 vf->mdd_tx_events.count++;
1811 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1812 if (netif_msg_tx_err(pf))
1813 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1814 vf->vf_id);
1815 }
1816
1817 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1818 if (reg & VP_MDET_RX_VALID_M) {
1819 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1820 vf->mdd_rx_events.count++;
1821 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1822 if (netif_msg_rx_err(pf))
1823 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1824 vf->vf_id);
1825
1826
1827
1828
1829
1830 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1831
1832
1833
1834 ice_print_vf_rx_mdd_event(vf);
1835 ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1836 }
1837 }
1838 }
1839 mutex_unlock(&pf->vfs.table_lock);
1840
1841 ice_print_vfs_mdd_events(pf);
1842 }
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1857 {
1858 struct ice_aqc_get_phy_caps_data *pcaps;
1859 struct ice_aqc_set_phy_cfg_data *cfg;
1860 struct ice_port_info *pi;
1861 struct device *dev;
1862 int retcode;
1863
1864 if (!vsi || !vsi->port_info || !vsi->back)
1865 return -EINVAL;
1866 if (vsi->type != ICE_VSI_PF)
1867 return 0;
1868
1869 dev = ice_pf_to_dev(vsi->back);
1870
1871 pi = vsi->port_info;
1872
1873 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1874 if (!pcaps)
1875 return -ENOMEM;
1876
1877 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1878 NULL);
1879 if (retcode) {
1880 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1881 vsi->vsi_num, retcode);
1882 retcode = -EIO;
1883 goto out;
1884 }
1885
1886
1887 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1888 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1889 goto out;
1890
1891
1892
1893
1894
1895 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1896 if (!cfg) {
1897 retcode = -ENOMEM;
1898 goto out;
1899 }
1900
1901 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1902 if (link_up)
1903 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1904 else
1905 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1906
1907 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1908 if (retcode) {
1909 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1910 vsi->vsi_num, retcode);
1911 retcode = -EIO;
1912 }
1913
1914 kfree(cfg);
1915 out:
1916 kfree(pcaps);
1917 return retcode;
1918 }
1919
1920
1921
1922
1923
1924
1925
1926 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1927 {
1928 struct ice_aqc_get_phy_caps_data *pcaps;
1929 struct ice_pf *pf = pi->hw->back;
1930 int err;
1931
1932 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1933 if (!pcaps)
1934 return -ENOMEM;
1935
1936 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1937 pcaps, NULL);
1938
1939 if (err) {
1940 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1941 goto out;
1942 }
1943
1944 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1945 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1946
1947 out:
1948 kfree(pcaps);
1949 return err;
1950 }
1951
1952
1953
1954
1955
1956
1957
1958 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1959 {
1960 struct ice_link_default_override_tlv *ldo;
1961 struct ice_pf *pf = pi->hw->back;
1962
1963 ldo = &pf->link_dflt_override;
1964 if (ice_get_link_default_override(ldo, pi))
1965 return;
1966
1967 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1968 return;
1969
1970
1971
1972
1973 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1974 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1975 }
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1995 {
1996 struct ice_link_default_override_tlv *ldo;
1997 struct ice_aqc_set_phy_cfg_data *cfg;
1998 struct ice_phy_info *phy = &pi->phy;
1999 struct ice_pf *pf = pi->hw->back;
2000
2001 ldo = &pf->link_dflt_override;
2002
2003
2004
2005
2006 cfg = &phy->curr_user_phy_cfg;
2007
2008 if (ldo->phy_type_low || ldo->phy_type_high) {
2009 cfg->phy_type_low = pf->nvm_phy_type_lo &
2010 cpu_to_le64(ldo->phy_type_low);
2011 cfg->phy_type_high = pf->nvm_phy_type_hi &
2012 cpu_to_le64(ldo->phy_type_high);
2013 }
2014 cfg->link_fec_opt = ldo->fec_options;
2015 phy->curr_user_fec_req = ICE_FEC_AUTO;
2016
2017 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2018 }
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2035 {
2036 struct ice_aqc_get_phy_caps_data *pcaps;
2037 struct ice_phy_info *phy = &pi->phy;
2038 struct ice_pf *pf = pi->hw->back;
2039 int err;
2040
2041 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2042 return -EIO;
2043
2044 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2045 if (!pcaps)
2046 return -ENOMEM;
2047
2048 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2049 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2050 pcaps, NULL);
2051 else
2052 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2053 pcaps, NULL);
2054 if (err) {
2055 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2056 goto err_out;
2057 }
2058
2059 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2060
2061
2062 if (ice_fw_supports_link_override(pi->hw) &&
2063 !(pcaps->module_compliance_enforcement &
2064 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2065 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2066
2067
2068
2069
2070
2071 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2072 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2073 ice_init_phy_cfg_dflt_override(pi);
2074 goto out;
2075 }
2076 }
2077
2078
2079
2080
2081 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2082 pcaps->link_fec_options);
2083 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2084
2085 out:
2086 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2087 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2088 err_out:
2089 kfree(pcaps);
2090 return err;
2091 }
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101 static int ice_configure_phy(struct ice_vsi *vsi)
2102 {
2103 struct device *dev = ice_pf_to_dev(vsi->back);
2104 struct ice_port_info *pi = vsi->port_info;
2105 struct ice_aqc_get_phy_caps_data *pcaps;
2106 struct ice_aqc_set_phy_cfg_data *cfg;
2107 struct ice_phy_info *phy = &pi->phy;
2108 struct ice_pf *pf = vsi->back;
2109 int err;
2110
2111
2112 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2113 return -EPERM;
2114
2115 ice_print_topo_conflict(vsi);
2116
2117 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2118 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2119 return -EPERM;
2120
2121 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2122 return ice_force_phys_link_state(vsi, true);
2123
2124 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2125 if (!pcaps)
2126 return -ENOMEM;
2127
2128
2129 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2130 NULL);
2131 if (err) {
2132 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2133 vsi->vsi_num, err);
2134 goto done;
2135 }
2136
2137
2138
2139
2140 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2141 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2142 goto done;
2143
2144
2145 memset(pcaps, 0, sizeof(*pcaps));
2146 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2147 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2148 pcaps, NULL);
2149 else
2150 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2151 pcaps, NULL);
2152 if (err) {
2153 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2154 vsi->vsi_num, err);
2155 goto done;
2156 }
2157
2158 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2159 if (!cfg) {
2160 err = -ENOMEM;
2161 goto done;
2162 }
2163
2164 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2165
2166
2167
2168
2169 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2170 vsi->back->state)) {
2171 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2172 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2173 } else {
2174 u64 phy_low = 0, phy_high = 0;
2175
2176 ice_update_phy_type(&phy_low, &phy_high,
2177 pi->phy.curr_user_speed_req);
2178 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2179 cfg->phy_type_high = pcaps->phy_type_high &
2180 cpu_to_le64(phy_high);
2181 }
2182
2183
2184 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2185 cfg->phy_type_low = pcaps->phy_type_low;
2186 cfg->phy_type_high = pcaps->phy_type_high;
2187 }
2188
2189
2190 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2191
2192
2193 if (cfg->link_fec_opt !=
2194 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2195 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2196 cfg->link_fec_opt = pcaps->link_fec_options;
2197 }
2198
2199
2200
2201
2202 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2203
2204
2205 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2206
2207 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2208 if (err)
2209 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2210 vsi->vsi_num, err);
2211
2212 kfree(cfg);
2213 done:
2214 kfree(pcaps);
2215 return err;
2216 }
2217
2218
2219
2220
2221
2222
2223
2224
2225 static void ice_check_media_subtask(struct ice_pf *pf)
2226 {
2227 struct ice_port_info *pi;
2228 struct ice_vsi *vsi;
2229 int err;
2230
2231
2232 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2233 return;
2234
2235 vsi = ice_get_main_vsi(pf);
2236 if (!vsi)
2237 return;
2238
2239
2240 pi = vsi->port_info;
2241 err = ice_update_link_info(pi);
2242 if (err)
2243 return;
2244
2245 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2246
2247 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2248 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2249 ice_init_phy_user_cfg(pi);
2250
2251
2252
2253
2254 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2255 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2256 return;
2257
2258 err = ice_configure_phy(vsi);
2259 if (!err)
2260 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2261
2262
2263
2264
2265 }
2266 }
2267
2268
2269
2270
2271
2272 static void ice_service_task(struct work_struct *work)
2273 {
2274 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2275 unsigned long start_time = jiffies;
2276
2277
2278
2279
2280 ice_reset_subtask(pf);
2281
2282
2283 if (ice_is_reset_in_progress(pf->state) ||
2284 test_bit(ICE_SUSPENDED, pf->state) ||
2285 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2286 ice_service_task_complete(pf);
2287 return;
2288 }
2289
2290 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2291 struct iidc_event *event;
2292
2293 event = kzalloc(sizeof(*event), GFP_KERNEL);
2294 if (event) {
2295 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2296
2297 swap(event->reg, pf->oicr_err_reg);
2298 ice_send_event_to_aux(pf, event);
2299 kfree(event);
2300 }
2301 }
2302
2303 if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
2304
2305 ice_plug_aux_dev(pf);
2306
2307
2308
2309
2310
2311
2312 if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2313 ice_unplug_aux_dev(pf);
2314 }
2315
2316 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2317 struct iidc_event *event;
2318
2319 event = kzalloc(sizeof(*event), GFP_KERNEL);
2320 if (event) {
2321 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2322 ice_send_event_to_aux(pf, event);
2323 kfree(event);
2324 }
2325 }
2326
2327 ice_clean_adminq_subtask(pf);
2328 ice_check_media_subtask(pf);
2329 ice_check_for_hang_subtask(pf);
2330 ice_sync_fltr_subtask(pf);
2331 ice_handle_mdd_event(pf);
2332 ice_watchdog_subtask(pf);
2333
2334 if (ice_is_safe_mode(pf)) {
2335 ice_service_task_complete(pf);
2336 return;
2337 }
2338
2339 ice_process_vflr_event(pf);
2340 ice_clean_mailboxq_subtask(pf);
2341 ice_clean_sbq_subtask(pf);
2342 ice_sync_arfs_fltrs(pf);
2343 ice_flush_fdir_ctx(pf);
2344
2345
2346 ice_service_task_complete(pf);
2347
2348
2349
2350
2351
2352 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2353 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2354 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2355 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2356 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2357 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2358 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2359 mod_timer(&pf->serv_tmr, jiffies);
2360 }
2361
2362
2363
2364
2365
2366 static void ice_set_ctrlq_len(struct ice_hw *hw)
2367 {
2368 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2369 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2370 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2371 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2372 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2373 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2374 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2375 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2376 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2377 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2378 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2379 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2380 }
2381
2382
2383
2384
2385
2386
2387 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2388 {
2389 struct device *dev = ice_pf_to_dev(pf);
2390
2391
2392 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2393 dev_dbg(dev, "earlier reset has failed\n");
2394 return -EIO;
2395 }
2396
2397 if (ice_is_reset_in_progress(pf->state)) {
2398 dev_dbg(dev, "Reset already in progress\n");
2399 return -EBUSY;
2400 }
2401
2402 switch (reset) {
2403 case ICE_RESET_PFR:
2404 set_bit(ICE_PFR_REQ, pf->state);
2405 break;
2406 case ICE_RESET_CORER:
2407 set_bit(ICE_CORER_REQ, pf->state);
2408 break;
2409 case ICE_RESET_GLOBR:
2410 set_bit(ICE_GLOBR_REQ, pf->state);
2411 break;
2412 default:
2413 return -EINVAL;
2414 }
2415
2416 ice_service_task_schedule(pf);
2417 return 0;
2418 }
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428 static void
2429 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2430 const cpumask_t *mask)
2431 {
2432 struct ice_q_vector *q_vector =
2433 container_of(notify, struct ice_q_vector, affinity_notify);
2434
2435 cpumask_copy(&q_vector->affinity_mask, mask);
2436 }
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2447
2448
2449
2450
2451
2452 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2453 {
2454 struct ice_hw *hw = &vsi->back->hw;
2455 int i;
2456
2457 ice_for_each_q_vector(vsi, i)
2458 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2459
2460 ice_flush(hw);
2461 return 0;
2462 }
2463
2464
2465
2466
2467
2468
2469 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2470 {
2471 int q_vectors = vsi->num_q_vectors;
2472 struct ice_pf *pf = vsi->back;
2473 int base = vsi->base_vector;
2474 struct device *dev;
2475 int rx_int_idx = 0;
2476 int tx_int_idx = 0;
2477 int vector, err;
2478 int irq_num;
2479
2480 dev = ice_pf_to_dev(pf);
2481 for (vector = 0; vector < q_vectors; vector++) {
2482 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2483
2484 irq_num = pf->msix_entries[base + vector].vector;
2485
2486 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2487 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2488 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2489 tx_int_idx++;
2490 } else if (q_vector->rx.rx_ring) {
2491 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2492 "%s-%s-%d", basename, "rx", rx_int_idx++);
2493 } else if (q_vector->tx.tx_ring) {
2494 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2495 "%s-%s-%d", basename, "tx", tx_int_idx++);
2496 } else {
2497
2498 continue;
2499 }
2500 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2501 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2502 IRQF_SHARED, q_vector->name,
2503 q_vector);
2504 else
2505 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2506 0, q_vector->name, q_vector);
2507 if (err) {
2508 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2509 err);
2510 goto free_q_irqs;
2511 }
2512
2513
2514 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2515 struct irq_affinity_notify *affinity_notify;
2516
2517 affinity_notify = &q_vector->affinity_notify;
2518 affinity_notify->notify = ice_irq_affinity_notify;
2519 affinity_notify->release = ice_irq_affinity_release;
2520 irq_set_affinity_notifier(irq_num, affinity_notify);
2521 }
2522
2523
2524 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2525 }
2526
2527 err = ice_set_cpu_rx_rmap(vsi);
2528 if (err) {
2529 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2530 vsi->vsi_num, ERR_PTR(err));
2531 goto free_q_irqs;
2532 }
2533
2534 vsi->irqs_ready = true;
2535 return 0;
2536
2537 free_q_irqs:
2538 while (vector) {
2539 vector--;
2540 irq_num = pf->msix_entries[base + vector].vector;
2541 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2542 irq_set_affinity_notifier(irq_num, NULL);
2543 irq_set_affinity_hint(irq_num, NULL);
2544 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2545 }
2546 return err;
2547 }
2548
2549
2550
2551
2552
2553
2554
2555 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2556 {
2557 struct device *dev = ice_pf_to_dev(vsi->back);
2558 struct ice_tx_desc *tx_desc;
2559 int i, j;
2560
2561 ice_for_each_xdp_txq(vsi, i) {
2562 u16 xdp_q_idx = vsi->alloc_txq + i;
2563 struct ice_tx_ring *xdp_ring;
2564
2565 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2566
2567 if (!xdp_ring)
2568 goto free_xdp_rings;
2569
2570 xdp_ring->q_index = xdp_q_idx;
2571 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2572 xdp_ring->vsi = vsi;
2573 xdp_ring->netdev = NULL;
2574 xdp_ring->dev = dev;
2575 xdp_ring->count = vsi->num_tx_desc;
2576 xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
2577 xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
2578 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2579 if (ice_setup_tx_ring(xdp_ring))
2580 goto free_xdp_rings;
2581 ice_set_ring_xdp(xdp_ring);
2582 spin_lock_init(&xdp_ring->tx_lock);
2583 for (j = 0; j < xdp_ring->count; j++) {
2584 tx_desc = ICE_TX_DESC(xdp_ring, j);
2585 tx_desc->cmd_type_offset_bsz = 0;
2586 }
2587 }
2588
2589 return 0;
2590
2591 free_xdp_rings:
2592 for (; i >= 0; i--)
2593 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2594 ice_free_tx_ring(vsi->xdp_rings[i]);
2595 return -ENOMEM;
2596 }
2597
2598
2599
2600
2601
2602
2603 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2604 {
2605 struct bpf_prog *old_prog;
2606 int i;
2607
2608 old_prog = xchg(&vsi->xdp_prog, prog);
2609 if (old_prog)
2610 bpf_prog_put(old_prog);
2611
2612 ice_for_each_rxq(vsi, i)
2613 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2614 }
2615
2616
2617
2618
2619
2620
2621
2622
2623 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2624 {
2625 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2626 int xdp_rings_rem = vsi->num_xdp_txq;
2627 struct ice_pf *pf = vsi->back;
2628 struct ice_qs_cfg xdp_qs_cfg = {
2629 .qs_mutex = &pf->avail_q_mutex,
2630 .pf_map = pf->avail_txqs,
2631 .pf_map_size = pf->max_pf_txqs,
2632 .q_count = vsi->num_xdp_txq,
2633 .scatter_count = ICE_MAX_SCATTER_TXQS,
2634 .vsi_map = vsi->txq_map,
2635 .vsi_map_offset = vsi->alloc_txq,
2636 .mapping_mode = ICE_VSI_MAP_CONTIG
2637 };
2638 struct device *dev;
2639 int i, v_idx;
2640 int status;
2641
2642 dev = ice_pf_to_dev(pf);
2643 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2644 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2645 if (!vsi->xdp_rings)
2646 return -ENOMEM;
2647
2648 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2649 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2650 goto err_map_xdp;
2651
2652 if (static_key_enabled(&ice_xdp_locking_key))
2653 netdev_warn(vsi->netdev,
2654 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2655
2656 if (ice_xdp_alloc_setup_rings(vsi))
2657 goto clear_xdp_rings;
2658
2659
2660 ice_for_each_q_vector(vsi, v_idx) {
2661 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2662 int xdp_rings_per_v, q_id, q_base;
2663
2664 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2665 vsi->num_q_vectors - v_idx);
2666 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2667
2668 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2669 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2670
2671 xdp_ring->q_vector = q_vector;
2672 xdp_ring->next = q_vector->tx.tx_ring;
2673 q_vector->tx.tx_ring = xdp_ring;
2674 }
2675 xdp_rings_rem -= xdp_rings_per_v;
2676 }
2677
2678 ice_for_each_rxq(vsi, i) {
2679 if (static_key_enabled(&ice_xdp_locking_key)) {
2680 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2681 } else {
2682 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2683 struct ice_tx_ring *ring;
2684
2685 ice_for_each_tx_ring(ring, q_vector->tx) {
2686 if (ice_ring_is_xdp(ring)) {
2687 vsi->rx_rings[i]->xdp_ring = ring;
2688 break;
2689 }
2690 }
2691 }
2692 ice_tx_xsk_pool(vsi, i);
2693 }
2694
2695
2696
2697
2698
2699 if (ice_is_reset_in_progress(pf->state))
2700 return 0;
2701
2702
2703
2704
2705 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2706 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2707
2708 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2709 max_txqs);
2710 if (status) {
2711 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2712 status);
2713 goto clear_xdp_rings;
2714 }
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725 if (!ice_is_xdp_ena_vsi(vsi))
2726 ice_vsi_assign_bpf_prog(vsi, prog);
2727
2728 return 0;
2729 clear_xdp_rings:
2730 ice_for_each_xdp_txq(vsi, i)
2731 if (vsi->xdp_rings[i]) {
2732 kfree_rcu(vsi->xdp_rings[i], rcu);
2733 vsi->xdp_rings[i] = NULL;
2734 }
2735
2736 err_map_xdp:
2737 mutex_lock(&pf->avail_q_mutex);
2738 ice_for_each_xdp_txq(vsi, i) {
2739 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2740 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2741 }
2742 mutex_unlock(&pf->avail_q_mutex);
2743
2744 devm_kfree(dev, vsi->xdp_rings);
2745 return -ENOMEM;
2746 }
2747
2748
2749
2750
2751
2752
2753
2754
2755 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2756 {
2757 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2758 struct ice_pf *pf = vsi->back;
2759 int i, v_idx;
2760
2761
2762
2763
2764
2765
2766 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2767 goto free_qmap;
2768
2769 ice_for_each_q_vector(vsi, v_idx) {
2770 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2771 struct ice_tx_ring *ring;
2772
2773 ice_for_each_tx_ring(ring, q_vector->tx)
2774 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2775 break;
2776
2777
2778 q_vector->tx.tx_ring = ring;
2779 }
2780
2781 free_qmap:
2782 mutex_lock(&pf->avail_q_mutex);
2783 ice_for_each_xdp_txq(vsi, i) {
2784 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2785 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2786 }
2787 mutex_unlock(&pf->avail_q_mutex);
2788
2789 ice_for_each_xdp_txq(vsi, i)
2790 if (vsi->xdp_rings[i]) {
2791 if (vsi->xdp_rings[i]->desc) {
2792 synchronize_rcu();
2793 ice_free_tx_ring(vsi->xdp_rings[i]);
2794 }
2795 kfree_rcu(vsi->xdp_rings[i], rcu);
2796 vsi->xdp_rings[i] = NULL;
2797 }
2798
2799 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2800 vsi->xdp_rings = NULL;
2801
2802 if (static_key_enabled(&ice_xdp_locking_key))
2803 static_branch_dec(&ice_xdp_locking_key);
2804
2805 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2806 return 0;
2807
2808 ice_vsi_assign_bpf_prog(vsi, NULL);
2809
2810
2811
2812
2813 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2814 max_txqs[i] = vsi->num_txq;
2815
2816
2817 vsi->num_xdp_txq = 0;
2818
2819 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2820 max_txqs);
2821 }
2822
2823
2824
2825
2826
2827 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2828 {
2829 int i;
2830
2831 ice_for_each_rxq(vsi, i) {
2832 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2833
2834 if (rx_ring->xsk_pool)
2835 napi_schedule(&rx_ring->q_vector->napi);
2836 }
2837 }
2838
2839
2840
2841
2842
2843
2844
2845
2846 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2847 {
2848 u16 avail = ice_get_avail_txq_count(vsi->back);
2849 u16 cpus = num_possible_cpus();
2850
2851 if (avail < cpus / 2)
2852 return -ENOMEM;
2853
2854 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2855
2856 if (vsi->num_xdp_txq < cpus)
2857 static_branch_inc(&ice_xdp_locking_key);
2858
2859 return 0;
2860 }
2861
2862
2863
2864
2865
2866
2867
2868 static int
2869 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2870 struct netlink_ext_ack *extack)
2871 {
2872 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2873 bool if_running = netif_running(vsi->netdev);
2874 int ret = 0, xdp_ring_err = 0;
2875
2876 if (frame_size > vsi->rx_buf_len) {
2877 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2878 return -EOPNOTSUPP;
2879 }
2880
2881
2882 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2883 ret = ice_down(vsi);
2884 if (ret) {
2885 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2886 return ret;
2887 }
2888 }
2889
2890 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2891 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2892 if (xdp_ring_err) {
2893 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2894 } else {
2895 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2896 if (xdp_ring_err)
2897 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2898 }
2899
2900 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2901 if (xdp_ring_err)
2902 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2903 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2904 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2905 if (xdp_ring_err)
2906 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2907
2908 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2909 if (xdp_ring_err)
2910 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2911 } else {
2912
2913
2914
2915
2916
2917 ice_vsi_assign_bpf_prog(vsi, prog);
2918 }
2919
2920 if (if_running)
2921 ret = ice_up(vsi);
2922
2923 if (!ret && prog)
2924 ice_vsi_rx_napi_schedule(vsi);
2925
2926 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2927 }
2928
2929
2930
2931
2932
2933
2934 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2935 struct netdev_bpf *xdp)
2936 {
2937 NL_SET_ERR_MSG_MOD(xdp->extack,
2938 "Please provide working DDP firmware package in order to use XDP\n"
2939 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2940 return -EOPNOTSUPP;
2941 }
2942
2943
2944
2945
2946
2947
2948 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2949 {
2950 struct ice_netdev_priv *np = netdev_priv(dev);
2951 struct ice_vsi *vsi = np->vsi;
2952
2953 if (vsi->type != ICE_VSI_PF) {
2954 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2955 return -EINVAL;
2956 }
2957
2958 switch (xdp->command) {
2959 case XDP_SETUP_PROG:
2960 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2961 case XDP_SETUP_XSK_POOL:
2962 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2963 xdp->xsk.queue_id);
2964 default:
2965 return -EINVAL;
2966 }
2967 }
2968
2969
2970
2971
2972
2973 static void ice_ena_misc_vector(struct ice_pf *pf)
2974 {
2975 struct ice_hw *hw = &pf->hw;
2976 u32 val;
2977
2978
2979
2980
2981
2982 val = rd32(hw, GL_MDCK_TX_TDPU);
2983 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2984 wr32(hw, GL_MDCK_TX_TDPU, val);
2985
2986
2987 wr32(hw, PFINT_OICR_ENA, 0);
2988 rd32(hw, PFINT_OICR);
2989
2990 val = (PFINT_OICR_ECC_ERR_M |
2991 PFINT_OICR_MAL_DETECT_M |
2992 PFINT_OICR_GRST_M |
2993 PFINT_OICR_PCI_EXCEPTION_M |
2994 PFINT_OICR_VFLR_M |
2995 PFINT_OICR_HMC_ERR_M |
2996 PFINT_OICR_PE_PUSH_M |
2997 PFINT_OICR_PE_CRITERR_M);
2998
2999 wr32(hw, PFINT_OICR_ENA, val);
3000
3001
3002 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
3003 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3004 }
3005
3006
3007
3008
3009
3010
3011 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3012 {
3013 struct ice_pf *pf = (struct ice_pf *)data;
3014 struct ice_hw *hw = &pf->hw;
3015 irqreturn_t ret = IRQ_NONE;
3016 struct device *dev;
3017 u32 oicr, ena_mask;
3018
3019 dev = ice_pf_to_dev(pf);
3020 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3021 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3022 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3023
3024 oicr = rd32(hw, PFINT_OICR);
3025 ena_mask = rd32(hw, PFINT_OICR_ENA);
3026
3027 if (oicr & PFINT_OICR_SWINT_M) {
3028 ena_mask &= ~PFINT_OICR_SWINT_M;
3029 pf->sw_int_count++;
3030 }
3031
3032 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3033 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3034 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3035 }
3036 if (oicr & PFINT_OICR_VFLR_M) {
3037
3038 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3039 u32 reg = rd32(hw, PFINT_OICR_ENA);
3040
3041 reg &= ~PFINT_OICR_VFLR_M;
3042 wr32(hw, PFINT_OICR_ENA, reg);
3043 } else {
3044 ena_mask &= ~PFINT_OICR_VFLR_M;
3045 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3046 }
3047 }
3048
3049 if (oicr & PFINT_OICR_GRST_M) {
3050 u32 reset;
3051
3052
3053 ena_mask &= ~PFINT_OICR_GRST_M;
3054 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3055 GLGEN_RSTAT_RESET_TYPE_S;
3056
3057 if (reset == ICE_RESET_CORER)
3058 pf->corer_count++;
3059 else if (reset == ICE_RESET_GLOBR)
3060 pf->globr_count++;
3061 else if (reset == ICE_RESET_EMPR)
3062 pf->empr_count++;
3063 else
3064 dev_dbg(dev, "Invalid reset type %d\n", reset);
3065
3066
3067
3068
3069 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3070 if (reset == ICE_RESET_CORER)
3071 set_bit(ICE_CORER_RECV, pf->state);
3072 else if (reset == ICE_RESET_GLOBR)
3073 set_bit(ICE_GLOBR_RECV, pf->state);
3074 else
3075 set_bit(ICE_EMPR_RECV, pf->state);
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090 hw->reset_ongoing = true;
3091 }
3092 }
3093
3094 if (oicr & PFINT_OICR_TSYN_TX_M) {
3095 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3096 ice_ptp_process_ts(pf);
3097 }
3098
3099 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3100 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3101 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3102
3103
3104 pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
3105 GLTSYN_STAT_EVENT1_M |
3106 GLTSYN_STAT_EVENT2_M);
3107 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3108 kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
3109 }
3110
3111 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3112 if (oicr & ICE_AUX_CRIT_ERR) {
3113 pf->oicr_err_reg |= oicr;
3114 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3115 ena_mask &= ~ICE_AUX_CRIT_ERR;
3116 }
3117
3118
3119 oicr &= ena_mask;
3120 if (oicr) {
3121 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3122
3123
3124
3125 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3126 PFINT_OICR_ECC_ERR_M)) {
3127 set_bit(ICE_PFR_REQ, pf->state);
3128 ice_service_task_schedule(pf);
3129 }
3130 }
3131 ret = IRQ_HANDLED;
3132
3133 ice_service_task_schedule(pf);
3134 ice_irq_dynamic_ena(hw, NULL, NULL);
3135
3136 return ret;
3137 }
3138
3139
3140
3141
3142
3143 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3144 {
3145
3146 wr32(hw, PFINT_FW_CTL,
3147 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3148
3149
3150 wr32(hw, PFINT_MBX_CTL,
3151 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3152
3153 wr32(hw, PFINT_SB_CTL,
3154 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3155
3156
3157 wr32(hw, PFINT_OICR_CTL,
3158 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3159
3160 ice_flush(hw);
3161 }
3162
3163
3164
3165
3166
3167 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3168 {
3169 struct ice_hw *hw = &pf->hw;
3170
3171 ice_dis_ctrlq_interrupts(hw);
3172
3173
3174 wr32(hw, PFINT_OICR_ENA, 0);
3175 ice_flush(hw);
3176
3177 if (pf->msix_entries) {
3178 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
3179 devm_free_irq(ice_pf_to_dev(pf),
3180 pf->msix_entries[pf->oicr_idx].vector, pf);
3181 }
3182
3183 pf->num_avail_sw_msix += 1;
3184 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
3185 }
3186
3187
3188
3189
3190
3191
3192 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3193 {
3194 u32 val;
3195
3196 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3197 PFINT_OICR_CTL_CAUSE_ENA_M);
3198 wr32(hw, PFINT_OICR_CTL, val);
3199
3200
3201 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3202 PFINT_FW_CTL_CAUSE_ENA_M);
3203 wr32(hw, PFINT_FW_CTL, val);
3204
3205
3206 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3207 PFINT_MBX_CTL_CAUSE_ENA_M);
3208 wr32(hw, PFINT_MBX_CTL, val);
3209
3210
3211 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3212 PFINT_SB_CTL_CAUSE_ENA_M);
3213 wr32(hw, PFINT_SB_CTL, val);
3214
3215 ice_flush(hw);
3216 }
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3227 {
3228 struct device *dev = ice_pf_to_dev(pf);
3229 struct ice_hw *hw = &pf->hw;
3230 int oicr_idx, err = 0;
3231
3232 if (!pf->int_name[0])
3233 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3234 dev_driver_string(dev), dev_name(dev));
3235
3236
3237
3238
3239
3240 if (ice_is_reset_in_progress(pf->state))
3241 goto skip_req_irq;
3242
3243
3244 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3245 if (oicr_idx < 0)
3246 return oicr_idx;
3247
3248 pf->num_avail_sw_msix -= 1;
3249 pf->oicr_idx = (u16)oicr_idx;
3250
3251 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
3252 ice_misc_intr, 0, pf->int_name, pf);
3253 if (err) {
3254 dev_err(dev, "devm_request_irq for %s failed: %d\n",
3255 pf->int_name, err);
3256 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3257 pf->num_avail_sw_msix += 1;
3258 return err;
3259 }
3260
3261 skip_req_irq:
3262 ice_ena_misc_vector(pf);
3263
3264 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3265 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
3266 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3267
3268 ice_flush(hw);
3269 ice_irq_dynamic_ena(hw, NULL, NULL);
3270
3271 return 0;
3272 }
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282 static void ice_napi_add(struct ice_vsi *vsi)
3283 {
3284 int v_idx;
3285
3286 if (!vsi->netdev)
3287 return;
3288
3289 ice_for_each_q_vector(vsi, v_idx)
3290 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3291 ice_napi_poll, NAPI_POLL_WEIGHT);
3292 }
3293
3294
3295
3296
3297
3298 static void ice_set_ops(struct net_device *netdev)
3299 {
3300 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3301
3302 if (ice_is_safe_mode(pf)) {
3303 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3304 ice_set_ethtool_safe_mode_ops(netdev);
3305 return;
3306 }
3307
3308 netdev->netdev_ops = &ice_netdev_ops;
3309 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3310 ice_set_ethtool_ops(netdev);
3311 }
3312
3313
3314
3315
3316
3317 static void ice_set_netdev_features(struct net_device *netdev)
3318 {
3319 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3320 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3321 netdev_features_t csumo_features;
3322 netdev_features_t vlano_features;
3323 netdev_features_t dflt_features;
3324 netdev_features_t tso_features;
3325
3326 if (ice_is_safe_mode(pf)) {
3327
3328 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3329 netdev->hw_features = netdev->features;
3330 return;
3331 }
3332
3333 dflt_features = NETIF_F_SG |
3334 NETIF_F_HIGHDMA |
3335 NETIF_F_NTUPLE |
3336 NETIF_F_RXHASH;
3337
3338 csumo_features = NETIF_F_RXCSUM |
3339 NETIF_F_IP_CSUM |
3340 NETIF_F_SCTP_CRC |
3341 NETIF_F_IPV6_CSUM;
3342
3343 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3344 NETIF_F_HW_VLAN_CTAG_TX |
3345 NETIF_F_HW_VLAN_CTAG_RX;
3346
3347
3348 if (is_dvm_ena)
3349 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3350
3351 tso_features = NETIF_F_TSO |
3352 NETIF_F_TSO_ECN |
3353 NETIF_F_TSO6 |
3354 NETIF_F_GSO_GRE |
3355 NETIF_F_GSO_UDP_TUNNEL |
3356 NETIF_F_GSO_GRE_CSUM |
3357 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3358 NETIF_F_GSO_PARTIAL |
3359 NETIF_F_GSO_IPXIP4 |
3360 NETIF_F_GSO_IPXIP6 |
3361 NETIF_F_GSO_UDP_L4;
3362
3363 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3364 NETIF_F_GSO_GRE_CSUM;
3365
3366 netdev->hw_features = dflt_features | csumo_features |
3367 vlano_features | tso_features;
3368
3369
3370 netdev->mpls_features = NETIF_F_HW_CSUM |
3371 NETIF_F_TSO |
3372 NETIF_F_TSO6;
3373
3374
3375 netdev->features |= netdev->hw_features;
3376
3377 netdev->hw_features |= NETIF_F_HW_TC;
3378 netdev->hw_features |= NETIF_F_LOOPBACK;
3379
3380
3381 netdev->hw_enc_features |= dflt_features | csumo_features |
3382 tso_features;
3383 netdev->vlan_features |= dflt_features | csumo_features |
3384 tso_features;
3385
3386
3387
3388
3389
3390
3391 if (is_dvm_ena)
3392 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3393 NETIF_F_HW_VLAN_STAG_TX;
3394 }
3395
3396
3397
3398
3399
3400
3401
3402 static int ice_cfg_netdev(struct ice_vsi *vsi)
3403 {
3404 struct ice_netdev_priv *np;
3405 struct net_device *netdev;
3406 u8 mac_addr[ETH_ALEN];
3407
3408 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3409 vsi->alloc_rxq);
3410 if (!netdev)
3411 return -ENOMEM;
3412
3413 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3414 vsi->netdev = netdev;
3415 np = netdev_priv(netdev);
3416 np->vsi = vsi;
3417
3418 ice_set_netdev_features(netdev);
3419
3420 ice_set_ops(netdev);
3421
3422 if (vsi->type == ICE_VSI_PF) {
3423 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3424 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3425 eth_hw_addr_set(netdev, mac_addr);
3426 ether_addr_copy(netdev->perm_addr, mac_addr);
3427 }
3428
3429 netdev->priv_flags |= IFF_UNICAST_FLT;
3430
3431
3432 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3433
3434
3435 netdev->watchdog_timeo = 5 * HZ;
3436
3437 netdev->min_mtu = ETH_MIN_MTU;
3438 netdev->max_mtu = ICE_MAX_MTU;
3439
3440 return 0;
3441 }
3442
3443
3444
3445
3446
3447
3448
3449 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3450 {
3451 u16 i;
3452
3453 for (i = 0; i < rss_table_size; i++)
3454 lut[i] = i % rss_size;
3455 }
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465 static struct ice_vsi *
3466 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3467 {
3468 return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
3469 }
3470
3471 static struct ice_vsi *
3472 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3473 struct ice_channel *ch)
3474 {
3475 return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
3476 }
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486 static struct ice_vsi *
3487 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3488 {
3489 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
3490 }
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500 struct ice_vsi *
3501 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3502 {
3503 return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
3504 }
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514 static int
3515 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3516 {
3517 struct ice_netdev_priv *np = netdev_priv(netdev);
3518 struct ice_vsi_vlan_ops *vlan_ops;
3519 struct ice_vsi *vsi = np->vsi;
3520 struct ice_vlan vlan;
3521 int ret;
3522
3523
3524 if (!vid)
3525 return 0;
3526
3527 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3528 usleep_range(1000, 2000);
3529
3530
3531
3532
3533 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3534 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3535 ICE_MCAST_VLAN_PROMISC_BITS,
3536 vid);
3537 if (ret)
3538 goto finish;
3539 }
3540
3541 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3542
3543
3544
3545
3546 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3547 ret = vlan_ops->add_vlan(vsi, &vlan);
3548 if (ret)
3549 goto finish;
3550
3551
3552
3553
3554
3555 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3556 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3557 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3558 ICE_MCAST_PROMISC_BITS, 0);
3559 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3560 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3561 }
3562
3563 finish:
3564 clear_bit(ICE_CFG_BUSY, vsi->state);
3565
3566 return ret;
3567 }
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577 static int
3578 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3579 {
3580 struct ice_netdev_priv *np = netdev_priv(netdev);
3581 struct ice_vsi_vlan_ops *vlan_ops;
3582 struct ice_vsi *vsi = np->vsi;
3583 struct ice_vlan vlan;
3584 int ret;
3585
3586
3587 if (!vid)
3588 return 0;
3589
3590 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3591 usleep_range(1000, 2000);
3592
3593 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3594 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3595 if (ret) {
3596 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3597 vsi->vsi_num);
3598 vsi->current_netdev_flags |= IFF_ALLMULTI;
3599 }
3600
3601 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3602
3603
3604
3605
3606 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3607 ret = vlan_ops->del_vlan(vsi, &vlan);
3608 if (ret)
3609 goto finish;
3610
3611
3612
3613
3614 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3615 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3616 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3617
3618 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3619
3620
3621
3622
3623 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3624 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3625 ICE_MCAST_VLAN_PROMISC_BITS,
3626 0);
3627 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3628 ICE_MCAST_PROMISC_BITS, 0);
3629 }
3630 }
3631
3632 finish:
3633 clear_bit(ICE_CFG_BUSY, vsi->state);
3634
3635 return ret;
3636 }
3637
3638
3639
3640
3641
3642 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3643 {
3644 struct ice_indr_block_priv *indr_priv = cb_priv;
3645
3646 list_del(&indr_priv->list);
3647 kfree(indr_priv);
3648 }
3649
3650
3651
3652
3653
3654 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3655 {
3656 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3657
3658 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3659 ice_rep_indr_tc_block_unbind);
3660 }
3661
3662
3663
3664
3665
3666 static void ice_tc_indir_block_remove(struct ice_pf *pf)
3667 {
3668 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3669
3670 if (!pf_vsi)
3671 return;
3672
3673 ice_tc_indir_block_unregister(pf_vsi);
3674 }
3675
3676
3677
3678
3679
3680
3681
3682 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3683 {
3684 struct ice_netdev_priv *np;
3685
3686 if (!vsi || !vsi->netdev)
3687 return -EINVAL;
3688
3689 np = netdev_priv(vsi->netdev);
3690
3691 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3692 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3693 }
3694
3695
3696
3697
3698
3699
3700
3701 static int ice_setup_pf_sw(struct ice_pf *pf)
3702 {
3703 struct device *dev = ice_pf_to_dev(pf);
3704 bool dvm = ice_is_dvm_ena(&pf->hw);
3705 struct ice_vsi *vsi;
3706 int status;
3707
3708 if (ice_is_reset_in_progress(pf->state))
3709 return -EBUSY;
3710
3711 status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
3712 if (status)
3713 return -EIO;
3714
3715 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3716 if (!vsi)
3717 return -ENOMEM;
3718
3719
3720 INIT_LIST_HEAD(&vsi->ch_list);
3721
3722 status = ice_cfg_netdev(vsi);
3723 if (status)
3724 goto unroll_vsi_setup;
3725
3726 ice_vsi_cfg_frame_size(vsi);
3727
3728
3729 status = ice_tc_indir_block_register(vsi);
3730 if (status) {
3731 dev_err(dev, "Failed to register netdev notifier\n");
3732 goto unroll_cfg_netdev;
3733 }
3734
3735
3736 ice_dcbnl_setup(vsi);
3737
3738
3739
3740
3741
3742 ice_napi_add(vsi);
3743
3744 status = ice_init_mac_fltr(pf);
3745 if (status)
3746 goto unroll_napi_add;
3747
3748 return 0;
3749
3750 unroll_napi_add:
3751 ice_tc_indir_block_unregister(vsi);
3752 unroll_cfg_netdev:
3753 if (vsi) {
3754 ice_napi_del(vsi);
3755 if (vsi->netdev) {
3756 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3757 free_netdev(vsi->netdev);
3758 vsi->netdev = NULL;
3759 }
3760 }
3761
3762 unroll_vsi_setup:
3763 ice_vsi_release(vsi);
3764 return status;
3765 }
3766
3767
3768
3769
3770
3771
3772
3773 static u16
3774 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3775 {
3776 unsigned long bit;
3777 u16 count = 0;
3778
3779 mutex_lock(lock);
3780 for_each_clear_bit(bit, pf_qmap, size)
3781 count++;
3782 mutex_unlock(lock);
3783
3784 return count;
3785 }
3786
3787
3788
3789
3790
3791 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3792 {
3793 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3794 pf->max_pf_txqs);
3795 }
3796
3797
3798
3799
3800
3801 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3802 {
3803 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3804 pf->max_pf_rxqs);
3805 }
3806
3807
3808
3809
3810
3811 static void ice_deinit_pf(struct ice_pf *pf)
3812 {
3813 ice_service_task_stop(pf);
3814 mutex_destroy(&pf->adev_mutex);
3815 mutex_destroy(&pf->sw_mutex);
3816 mutex_destroy(&pf->tc_mutex);
3817 mutex_destroy(&pf->avail_q_mutex);
3818 mutex_destroy(&pf->vfs.table_lock);
3819
3820 if (pf->avail_txqs) {
3821 bitmap_free(pf->avail_txqs);
3822 pf->avail_txqs = NULL;
3823 }
3824
3825 if (pf->avail_rxqs) {
3826 bitmap_free(pf->avail_rxqs);
3827 pf->avail_rxqs = NULL;
3828 }
3829
3830 if (pf->ptp.clock)
3831 ptp_clock_unregister(pf->ptp.clock);
3832 }
3833
3834
3835
3836
3837
3838 static void ice_set_pf_caps(struct ice_pf *pf)
3839 {
3840 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3841
3842 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3843 if (func_caps->common_cap.rdma)
3844 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3845 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3846 if (func_caps->common_cap.dcb)
3847 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3848 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3849 if (func_caps->common_cap.sr_iov_1_1) {
3850 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3851 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3852 ICE_MAX_SRIOV_VFS);
3853 }
3854 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3855 if (func_caps->common_cap.rss_table_size)
3856 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3857
3858 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3859 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3860 u16 unused;
3861
3862
3863
3864
3865 pf->ctrl_vsi_idx = ICE_NO_VSI;
3866 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3867
3868 ice_alloc_fd_guar_item(&pf->hw, &unused,
3869 func_caps->fd_fltr_guar);
3870
3871 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3872 func_caps->fd_fltr_best_effort);
3873 }
3874
3875 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3876 if (func_caps->common_cap.ieee_1588)
3877 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3878
3879 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3880 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3881 }
3882
3883
3884
3885
3886
3887 static int ice_init_pf(struct ice_pf *pf)
3888 {
3889 ice_set_pf_caps(pf);
3890
3891 mutex_init(&pf->sw_mutex);
3892 mutex_init(&pf->tc_mutex);
3893 mutex_init(&pf->adev_mutex);
3894
3895 INIT_HLIST_HEAD(&pf->aq_wait_list);
3896 spin_lock_init(&pf->aq_wait_lock);
3897 init_waitqueue_head(&pf->aq_wait_queue);
3898
3899 init_waitqueue_head(&pf->reset_wait_queue);
3900
3901
3902 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3903 pf->serv_tmr_period = HZ;
3904 INIT_WORK(&pf->serv_task, ice_service_task);
3905 clear_bit(ICE_SERVICE_SCHED, pf->state);
3906
3907 mutex_init(&pf->avail_q_mutex);
3908 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3909 if (!pf->avail_txqs)
3910 return -ENOMEM;
3911
3912 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3913 if (!pf->avail_rxqs) {
3914 bitmap_free(pf->avail_txqs);
3915 pf->avail_txqs = NULL;
3916 return -ENOMEM;
3917 }
3918
3919 mutex_init(&pf->vfs.table_lock);
3920 hash_init(pf->vfs.table);
3921
3922 return 0;
3923 }
3924
3925
3926
3927
3928
3929
3930
3931
3932 static int ice_ena_msix_range(struct ice_pf *pf)
3933 {
3934 int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3935 struct device *dev = ice_pf_to_dev(pf);
3936 int needed, err, i;
3937
3938 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3939 num_cpus = num_online_cpus();
3940
3941
3942 needed = ICE_MIN_LAN_OICR_MSIX;
3943 if (v_left < needed)
3944 goto no_hw_vecs_left_err;
3945 v_budget += needed;
3946 v_left -= needed;
3947
3948
3949 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3950 needed = ICE_FDIR_MSIX;
3951 if (v_left < needed)
3952 goto no_hw_vecs_left_err;
3953 v_budget += needed;
3954 v_left -= needed;
3955 }
3956
3957
3958 needed = ICE_ESWITCH_MSIX;
3959 if (v_left < needed)
3960 goto no_hw_vecs_left_err;
3961 v_budget += needed;
3962 v_left -= needed;
3963
3964
3965 v_other = v_budget;
3966
3967
3968 needed = num_cpus;
3969 if (v_left < needed)
3970 goto no_hw_vecs_left_err;
3971 pf->num_lan_msix = needed;
3972 v_budget += needed;
3973 v_left -= needed;
3974
3975
3976 if (ice_is_rdma_ena(pf)) {
3977 needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3978 if (v_left < needed)
3979 goto no_hw_vecs_left_err;
3980 pf->num_rdma_msix = needed;
3981 v_budget += needed;
3982 v_left -= needed;
3983 }
3984
3985 pf->msix_entries = devm_kcalloc(dev, v_budget,
3986 sizeof(*pf->msix_entries), GFP_KERNEL);
3987 if (!pf->msix_entries) {
3988 err = -ENOMEM;
3989 goto exit_err;
3990 }
3991
3992 for (i = 0; i < v_budget; i++)
3993 pf->msix_entries[i].entry = i;
3994
3995
3996 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3997 ICE_MIN_MSIX, v_budget);
3998 if (v_actual < 0) {
3999 dev_err(dev, "unable to reserve MSI-X vectors\n");
4000 err = v_actual;
4001 goto msix_err;
4002 }
4003
4004 if (v_actual < v_budget) {
4005 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
4006 v_budget, v_actual);
4007
4008 if (v_actual < ICE_MIN_MSIX) {
4009
4010 pci_disable_msix(pf->pdev);
4011 err = -ERANGE;
4012 goto msix_err;
4013 } else {
4014 int v_remain = v_actual - v_other;
4015 int v_rdma = 0, v_min_rdma = 0;
4016
4017 if (ice_is_rdma_ena(pf)) {
4018
4019
4020
4021 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
4022 v_min_rdma = ICE_MIN_RDMA_MSIX;
4023 }
4024
4025 if (v_actual == ICE_MIN_MSIX ||
4026 v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
4027 dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
4028 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4029
4030 pf->num_rdma_msix = 0;
4031 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
4032 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
4033 (v_remain - v_rdma < v_rdma)) {
4034
4035
4036
4037 pf->num_rdma_msix = v_min_rdma;
4038 pf->num_lan_msix = v_remain - v_min_rdma;
4039 } else {
4040
4041
4042
4043 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
4044 ICE_RDMA_NUM_AEQ_MSIX;
4045 pf->num_lan_msix = v_remain - pf->num_rdma_msix;
4046 }
4047
4048 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
4049 pf->num_lan_msix);
4050
4051 if (ice_is_rdma_ena(pf))
4052 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
4053 pf->num_rdma_msix);
4054 }
4055 }
4056
4057 return v_actual;
4058
4059 msix_err:
4060 devm_kfree(dev, pf->msix_entries);
4061 goto exit_err;
4062
4063 no_hw_vecs_left_err:
4064 dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
4065 needed, v_left);
4066 err = -ERANGE;
4067 exit_err:
4068 pf->num_rdma_msix = 0;
4069 pf->num_lan_msix = 0;
4070 return err;
4071 }
4072
4073
4074
4075
4076
4077 static void ice_dis_msix(struct ice_pf *pf)
4078 {
4079 pci_disable_msix(pf->pdev);
4080 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
4081 pf->msix_entries = NULL;
4082 }
4083
4084
4085
4086
4087
4088 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
4089 {
4090 ice_dis_msix(pf);
4091
4092 if (pf->irq_tracker) {
4093 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
4094 pf->irq_tracker = NULL;
4095 }
4096 }
4097
4098
4099
4100
4101
4102 static int ice_init_interrupt_scheme(struct ice_pf *pf)
4103 {
4104 int vectors;
4105
4106 vectors = ice_ena_msix_range(pf);
4107
4108 if (vectors < 0)
4109 return vectors;
4110
4111
4112 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
4113 struct_size(pf->irq_tracker, list, vectors),
4114 GFP_KERNEL);
4115 if (!pf->irq_tracker) {
4116 ice_dis_msix(pf);
4117 return -ENOMEM;
4118 }
4119
4120
4121 pf->num_avail_sw_msix = (u16)vectors;
4122 pf->irq_tracker->num_entries = (u16)vectors;
4123 pf->irq_tracker->end = pf->irq_tracker->num_entries;
4124
4125 return 0;
4126 }
4127
4128
4129
4130
4131
4132
4133
4134
4135 bool ice_is_wol_supported(struct ice_hw *hw)
4136 {
4137 u16 wol_ctrl;
4138
4139
4140
4141
4142 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4143 return false;
4144
4145 return !(BIT(hw->port_info->lport) & wol_ctrl);
4146 }
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
4159 {
4160 struct ice_pf *pf = vsi->back;
4161 int err = 0, timeout = 50;
4162
4163 if (!new_rx && !new_tx)
4164 return -EINVAL;
4165
4166 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4167 timeout--;
4168 if (!timeout)
4169 return -EBUSY;
4170 usleep_range(1000, 2000);
4171 }
4172
4173 if (new_tx)
4174 vsi->req_txq = (u16)new_tx;
4175 if (new_rx)
4176 vsi->req_rxq = (u16)new_rx;
4177
4178
4179 if (!netif_running(vsi->netdev)) {
4180 ice_vsi_rebuild(vsi, false);
4181 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4182 goto done;
4183 }
4184
4185 ice_vsi_close(vsi);
4186 ice_vsi_rebuild(vsi, false);
4187 ice_pf_dcb_recfg(pf);
4188 ice_vsi_open(vsi);
4189 done:
4190 clear_bit(ICE_CFG_BUSY, pf->state);
4191 return err;
4192 }
4193
4194
4195
4196
4197
4198
4199
4200
4201 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4202 {
4203 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4204 struct ice_vsi_ctx *ctxt;
4205 struct ice_hw *hw;
4206 int status;
4207
4208 if (!vsi)
4209 return;
4210
4211 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4212 if (!ctxt)
4213 return;
4214
4215 hw = &pf->hw;
4216 ctxt->info = vsi->info;
4217
4218 ctxt->info.valid_sections =
4219 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4220 ICE_AQ_VSI_PROP_SECURITY_VALID |
4221 ICE_AQ_VSI_PROP_SW_VALID);
4222
4223
4224 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4225 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4226
4227
4228 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4229
4230
4231 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4232 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4233
4234 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4235 if (status) {
4236 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4237 status, ice_aq_str(hw->adminq.sq_last_status));
4238 } else {
4239 vsi->info.sec_flags = ctxt->info.sec_flags;
4240 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4241 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4242 }
4243
4244 kfree(ctxt);
4245 }
4246
4247
4248
4249
4250
4251
4252 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4253 {
4254 struct ice_pf *pf = hw->back;
4255 struct device *dev;
4256
4257 dev = ice_pf_to_dev(pf);
4258
4259 switch (state) {
4260 case ICE_DDP_PKG_SUCCESS:
4261 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4262 hw->active_pkg_name,
4263 hw->active_pkg_ver.major,
4264 hw->active_pkg_ver.minor,
4265 hw->active_pkg_ver.update,
4266 hw->active_pkg_ver.draft);
4267 break;
4268 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4269 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4270 hw->active_pkg_name,
4271 hw->active_pkg_ver.major,
4272 hw->active_pkg_ver.minor,
4273 hw->active_pkg_ver.update,
4274 hw->active_pkg_ver.draft);
4275 break;
4276 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4277 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4278 hw->active_pkg_name,
4279 hw->active_pkg_ver.major,
4280 hw->active_pkg_ver.minor,
4281 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4282 break;
4283 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4284 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4285 hw->active_pkg_name,
4286 hw->active_pkg_ver.major,
4287 hw->active_pkg_ver.minor,
4288 hw->active_pkg_ver.update,
4289 hw->active_pkg_ver.draft,
4290 hw->pkg_name,
4291 hw->pkg_ver.major,
4292 hw->pkg_ver.minor,
4293 hw->pkg_ver.update,
4294 hw->pkg_ver.draft);
4295 break;
4296 case ICE_DDP_PKG_FW_MISMATCH:
4297 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4298 break;
4299 case ICE_DDP_PKG_INVALID_FILE:
4300 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4301 break;
4302 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4303 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4304 break;
4305 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4306 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4307 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4308 break;
4309 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4310 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4311 break;
4312 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4313 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4314 break;
4315 case ICE_DDP_PKG_LOAD_ERROR:
4316 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4317
4318 if (ice_check_reset(hw))
4319 dev_err(dev, "Error resetting device. Please reload the driver\n");
4320 break;
4321 case ICE_DDP_PKG_ERR:
4322 default:
4323 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4324 break;
4325 }
4326 }
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336 static void
4337 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4338 {
4339 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4340 struct device *dev = ice_pf_to_dev(pf);
4341 struct ice_hw *hw = &pf->hw;
4342
4343
4344 if (firmware && !hw->pkg_copy) {
4345 state = ice_copy_and_init_pkg(hw, firmware->data,
4346 firmware->size);
4347 ice_log_pkg_init(hw, state);
4348 } else if (!firmware && hw->pkg_copy) {
4349
4350 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4351 ice_log_pkg_init(hw, state);
4352 } else {
4353 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4354 }
4355
4356 if (!ice_is_init_pkg_successful(state)) {
4357
4358 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4359 return;
4360 }
4361
4362
4363
4364
4365 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4366 }
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376 static void ice_verify_cacheline_size(struct ice_pf *pf)
4377 {
4378 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4379 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4380 ICE_CACHE_LINE_BYTES);
4381 }
4382
4383
4384
4385
4386
4387
4388
4389 static int ice_send_version(struct ice_pf *pf)
4390 {
4391 struct ice_driver_ver dv;
4392
4393 dv.major_ver = 0xff;
4394 dv.minor_ver = 0xff;
4395 dv.build_ver = 0xff;
4396 dv.subbuild_ver = 0;
4397 strscpy((char *)dv.driver_string, UTS_RELEASE,
4398 sizeof(dv.driver_string));
4399 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4400 }
4401
4402
4403
4404
4405
4406
4407
4408 static int ice_init_fdir(struct ice_pf *pf)
4409 {
4410 struct device *dev = ice_pf_to_dev(pf);
4411 struct ice_vsi *ctrl_vsi;
4412 int err;
4413
4414
4415
4416
4417 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4418 if (!ctrl_vsi) {
4419 dev_dbg(dev, "could not create control VSI\n");
4420 return -ENOMEM;
4421 }
4422
4423 err = ice_vsi_open_ctrl(ctrl_vsi);
4424 if (err) {
4425 dev_dbg(dev, "could not open control VSI\n");
4426 goto err_vsi_open;
4427 }
4428
4429 mutex_init(&pf->hw.fdir_fltr_lock);
4430
4431 err = ice_fdir_create_dflt_rules(pf);
4432 if (err)
4433 goto err_fdir_rule;
4434
4435 return 0;
4436
4437 err_fdir_rule:
4438 ice_fdir_release_flows(&pf->hw);
4439 ice_vsi_close(ctrl_vsi);
4440 err_vsi_open:
4441 ice_vsi_release(ctrl_vsi);
4442 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4443 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4444 pf->ctrl_vsi_idx = ICE_NO_VSI;
4445 }
4446 return err;
4447 }
4448
4449
4450
4451
4452
4453 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4454 {
4455
4456
4457
4458 struct pci_dev *pdev = pf->pdev;
4459 char *opt_fw_filename;
4460 u64 dsn;
4461
4462
4463
4464
4465 dsn = pci_get_dsn(pdev);
4466 if (!dsn)
4467 return NULL;
4468
4469 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4470 if (!opt_fw_filename)
4471 return NULL;
4472
4473 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4474 ICE_DDP_PKG_PATH, dsn);
4475
4476 return opt_fw_filename;
4477 }
4478
4479
4480
4481
4482
4483 static void ice_request_fw(struct ice_pf *pf)
4484 {
4485 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4486 const struct firmware *firmware = NULL;
4487 struct device *dev = ice_pf_to_dev(pf);
4488 int err = 0;
4489
4490
4491
4492
4493
4494 if (opt_fw_filename) {
4495 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4496 if (err) {
4497 kfree(opt_fw_filename);
4498 goto dflt_pkg_load;
4499 }
4500
4501
4502 ice_load_pkg(firmware, pf);
4503 kfree(opt_fw_filename);
4504 release_firmware(firmware);
4505 return;
4506 }
4507
4508 dflt_pkg_load:
4509 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4510 if (err) {
4511 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4512 return;
4513 }
4514
4515
4516 ice_load_pkg(firmware, pf);
4517 release_firmware(firmware);
4518 }
4519
4520
4521
4522
4523
4524 static void ice_print_wake_reason(struct ice_pf *pf)
4525 {
4526 u32 wus = pf->wakeup_reason;
4527 const char *wake_str;
4528
4529
4530 if (!wus)
4531 return;
4532
4533 if (wus & PFPM_WUS_LNKC_M)
4534 wake_str = "Link\n";
4535 else if (wus & PFPM_WUS_MAG_M)
4536 wake_str = "Magic Packet\n";
4537 else if (wus & PFPM_WUS_MNG_M)
4538 wake_str = "Management\n";
4539 else if (wus & PFPM_WUS_FW_RST_WK_M)
4540 wake_str = "Firmware Reset\n";
4541 else
4542 wake_str = "Unknown\n";
4543
4544 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4545 }
4546
4547
4548
4549
4550
4551 static int ice_register_netdev(struct ice_pf *pf)
4552 {
4553 struct ice_vsi *vsi;
4554 int err = 0;
4555
4556 vsi = ice_get_main_vsi(pf);
4557 if (!vsi || !vsi->netdev)
4558 return -EIO;
4559
4560 err = register_netdev(vsi->netdev);
4561 if (err)
4562 goto err_register_netdev;
4563
4564 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4565 netif_carrier_off(vsi->netdev);
4566 netif_tx_stop_all_queues(vsi->netdev);
4567 err = ice_devlink_create_pf_port(pf);
4568 if (err)
4569 goto err_devlink_create;
4570
4571 devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
4572
4573 return 0;
4574 err_devlink_create:
4575 unregister_netdev(vsi->netdev);
4576 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4577 err_register_netdev:
4578 free_netdev(vsi->netdev);
4579 vsi->netdev = NULL;
4580 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4581 return err;
4582 }
4583
4584
4585
4586
4587
4588
4589
4590
4591 static int
4592 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4593 {
4594 struct device *dev = &pdev->dev;
4595 struct ice_pf *pf;
4596 struct ice_hw *hw;
4597 int i, err;
4598
4599 if (pdev->is_virtfn) {
4600 dev_err(dev, "can't probe a virtual function\n");
4601 return -EINVAL;
4602 }
4603
4604
4605
4606
4607 err = pcim_enable_device(pdev);
4608 if (err)
4609 return err;
4610
4611 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4612 if (err) {
4613 dev_err(dev, "BAR0 I/O map error %d\n", err);
4614 return err;
4615 }
4616
4617 pf = ice_allocate_pf(dev);
4618 if (!pf)
4619 return -ENOMEM;
4620
4621
4622 pf->aux_idx = -1;
4623
4624
4625 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4626 if (err) {
4627 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4628 return err;
4629 }
4630
4631 pci_enable_pcie_error_reporting(pdev);
4632 pci_set_master(pdev);
4633
4634 pf->pdev = pdev;
4635 pci_set_drvdata(pdev, pf);
4636 set_bit(ICE_DOWN, pf->state);
4637
4638 set_bit(ICE_SERVICE_DIS, pf->state);
4639
4640 hw = &pf->hw;
4641 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4642 pci_save_state(pdev);
4643
4644 hw->back = pf;
4645 hw->vendor_id = pdev->vendor;
4646 hw->device_id = pdev->device;
4647 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4648 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4649 hw->subsystem_device_id = pdev->subsystem_device;
4650 hw->bus.device = PCI_SLOT(pdev->devfn);
4651 hw->bus.func = PCI_FUNC(pdev->devfn);
4652 ice_set_ctrlq_len(hw);
4653
4654 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4655
4656 #ifndef CONFIG_DYNAMIC_DEBUG
4657 if (debug < -1)
4658 hw->debug_mask = debug;
4659 #endif
4660
4661 err = ice_init_hw(hw);
4662 if (err) {
4663 dev_err(dev, "ice_init_hw failed: %d\n", err);
4664 err = -EIO;
4665 goto err_exit_unroll;
4666 }
4667
4668 ice_init_feature_support(pf);
4669
4670 ice_request_fw(pf);
4671
4672
4673
4674
4675
4676 if (ice_is_safe_mode(pf)) {
4677
4678
4679
4680
4681
4682 ice_set_safe_mode_caps(hw);
4683 }
4684
4685 hw->ucast_shared = true;
4686
4687 err = ice_init_pf(pf);
4688 if (err) {
4689 dev_err(dev, "ice_init_pf failed: %d\n", err);
4690 goto err_init_pf_unroll;
4691 }
4692
4693 ice_devlink_init_regions(pf);
4694
4695 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4696 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4697 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4698 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4699 i = 0;
4700 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4701 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4702 pf->hw.tnl.valid_count[TNL_VXLAN];
4703 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4704 UDP_TUNNEL_TYPE_VXLAN;
4705 i++;
4706 }
4707 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4708 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4709 pf->hw.tnl.valid_count[TNL_GENEVE];
4710 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4711 UDP_TUNNEL_TYPE_GENEVE;
4712 i++;
4713 }
4714
4715 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4716 if (!pf->num_alloc_vsi) {
4717 err = -EIO;
4718 goto err_init_pf_unroll;
4719 }
4720 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4721 dev_warn(&pf->pdev->dev,
4722 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4723 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4724 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4725 }
4726
4727 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4728 GFP_KERNEL);
4729 if (!pf->vsi) {
4730 err = -ENOMEM;
4731 goto err_init_pf_unroll;
4732 }
4733
4734 err = ice_init_interrupt_scheme(pf);
4735 if (err) {
4736 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4737 err = -EIO;
4738 goto err_init_vsi_unroll;
4739 }
4740
4741
4742
4743
4744
4745
4746 err = ice_req_irq_msix_misc(pf);
4747 if (err) {
4748 dev_err(dev, "setup of misc vector failed: %d\n", err);
4749 goto err_init_interrupt_unroll;
4750 }
4751
4752
4753 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4754 if (!pf->first_sw) {
4755 err = -ENOMEM;
4756 goto err_msix_misc_unroll;
4757 }
4758
4759 if (hw->evb_veb)
4760 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4761 else
4762 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4763
4764 pf->first_sw->pf = pf;
4765
4766
4767 pf->first_sw->sw_id = hw->port_info->sw_id;
4768
4769 err = ice_setup_pf_sw(pf);
4770 if (err) {
4771 dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4772 goto err_alloc_sw_unroll;
4773 }
4774
4775 clear_bit(ICE_SERVICE_DIS, pf->state);
4776
4777
4778 err = ice_send_version(pf);
4779 if (err) {
4780 dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4781 UTS_RELEASE, err);
4782 goto err_send_version_unroll;
4783 }
4784
4785
4786 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4787
4788 err = ice_init_link_events(pf->hw.port_info);
4789 if (err) {
4790 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4791 goto err_send_version_unroll;
4792 }
4793
4794
4795 err = ice_init_nvm_phy_type(pf->hw.port_info);
4796 if (err)
4797 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4798
4799
4800 err = ice_update_link_info(pf->hw.port_info);
4801 if (err)
4802 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4803
4804 ice_init_link_dflt_override(pf->hw.port_info);
4805
4806 ice_check_link_cfg_err(pf,
4807 pf->hw.port_info->phy.link_info.link_cfg_err);
4808
4809
4810 if (pf->hw.port_info->phy.link_info.link_info &
4811 ICE_AQ_MEDIA_AVAILABLE) {
4812
4813 err = ice_init_phy_user_cfg(pf->hw.port_info);
4814 if (err)
4815 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4816
4817 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4818 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4819
4820 if (vsi)
4821 ice_configure_phy(vsi);
4822 }
4823 } else {
4824 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4825 }
4826
4827 ice_verify_cacheline_size(pf);
4828
4829
4830 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4831
4832
4833 ice_print_wake_reason(pf);
4834
4835
4836 wr32(hw, PFPM_WUS, U32_MAX);
4837
4838
4839 device_set_wakeup_enable(dev, false);
4840
4841 if (ice_is_safe_mode(pf)) {
4842 ice_set_safe_mode_vlan_cfg(pf);
4843 goto probe_done;
4844 }
4845
4846
4847 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4848 ice_ptp_init(pf);
4849
4850 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4851 ice_gnss_init(pf);
4852
4853
4854 if (ice_init_fdir(pf))
4855 dev_err(dev, "could not initialize flow director\n");
4856
4857
4858 if (ice_init_pf_dcb(pf, false)) {
4859 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4860 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4861 } else {
4862 ice_cfg_lldp_mib_change(&pf->hw, true);
4863 }
4864
4865 if (ice_init_lag(pf))
4866 dev_warn(dev, "Failed to init link aggregation support\n");
4867
4868
4869 pcie_print_link_status(pf->pdev);
4870
4871 probe_done:
4872 err = ice_register_netdev(pf);
4873 if (err)
4874 goto err_netdev_reg;
4875
4876 err = ice_devlink_register_params(pf);
4877 if (err)
4878 goto err_netdev_reg;
4879
4880
4881 clear_bit(ICE_DOWN, pf->state);
4882 if (ice_is_rdma_ena(pf)) {
4883 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4884 if (pf->aux_idx < 0) {
4885 dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4886 err = -ENOMEM;
4887 goto err_devlink_reg_param;
4888 }
4889
4890 err = ice_init_rdma(pf);
4891 if (err) {
4892 dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4893 err = -EIO;
4894 goto err_init_aux_unroll;
4895 }
4896 } else {
4897 dev_warn(dev, "RDMA is not supported on this device\n");
4898 }
4899
4900 ice_devlink_register(pf);
4901 return 0;
4902
4903 err_init_aux_unroll:
4904 pf->adev = NULL;
4905 ida_free(&ice_aux_ida, pf->aux_idx);
4906 err_devlink_reg_param:
4907 ice_devlink_unregister_params(pf);
4908 err_netdev_reg:
4909 err_send_version_unroll:
4910 ice_vsi_release_all(pf);
4911 err_alloc_sw_unroll:
4912 set_bit(ICE_SERVICE_DIS, pf->state);
4913 set_bit(ICE_DOWN, pf->state);
4914 devm_kfree(dev, pf->first_sw);
4915 err_msix_misc_unroll:
4916 ice_free_irq_msix_misc(pf);
4917 err_init_interrupt_unroll:
4918 ice_clear_interrupt_scheme(pf);
4919 err_init_vsi_unroll:
4920 devm_kfree(dev, pf->vsi);
4921 err_init_pf_unroll:
4922 ice_deinit_pf(pf);
4923 ice_devlink_destroy_regions(pf);
4924 ice_deinit_hw(hw);
4925 err_exit_unroll:
4926 pci_disable_pcie_error_reporting(pdev);
4927 pci_disable_device(pdev);
4928 return err;
4929 }
4930
4931
4932
4933
4934
4935
4936
4937 static void ice_set_wake(struct ice_pf *pf)
4938 {
4939 struct ice_hw *hw = &pf->hw;
4940 bool wol = pf->wol_ena;
4941
4942
4943 wr32(hw, PFPM_WUS, U32_MAX);
4944
4945
4946 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4947
4948
4949 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4950 }
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4961 {
4962 struct device *dev = ice_pf_to_dev(pf);
4963 struct ice_hw *hw = &pf->hw;
4964 u8 mac_addr[ETH_ALEN];
4965 struct ice_vsi *vsi;
4966 int status;
4967 u8 flags;
4968
4969 if (!pf->wol_ena)
4970 return;
4971
4972 vsi = ice_get_main_vsi(pf);
4973 if (!vsi)
4974 return;
4975
4976
4977 if (vsi->netdev)
4978 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4979 else
4980 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4981
4982 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4983 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4984 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4985
4986 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4987 if (status)
4988 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
4989 status, ice_aq_str(hw->adminq.sq_last_status));
4990 }
4991
4992
4993
4994
4995
4996 static void ice_remove(struct pci_dev *pdev)
4997 {
4998 struct ice_pf *pf = pci_get_drvdata(pdev);
4999 int i;
5000
5001 ice_devlink_unregister(pf);
5002 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5003 if (!ice_is_reset_in_progress(pf->state))
5004 break;
5005 msleep(100);
5006 }
5007
5008 ice_tc_indir_block_remove(pf);
5009
5010 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5011 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5012 ice_free_vfs(pf);
5013 }
5014
5015 ice_service_task_stop(pf);
5016
5017 ice_aq_cancel_waiting_tasks(pf);
5018 ice_unplug_aux_dev(pf);
5019 if (pf->aux_idx >= 0)
5020 ida_free(&ice_aux_ida, pf->aux_idx);
5021 ice_devlink_unregister_params(pf);
5022 set_bit(ICE_DOWN, pf->state);
5023
5024 ice_deinit_lag(pf);
5025 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
5026 ice_ptp_release(pf);
5027 if (ice_is_feature_supported(pf, ICE_F_GNSS))
5028 ice_gnss_exit(pf);
5029 if (!ice_is_safe_mode(pf))
5030 ice_remove_arfs(pf);
5031 ice_setup_mc_magic_wake(pf);
5032 ice_vsi_release_all(pf);
5033 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
5034 ice_set_wake(pf);
5035 ice_free_irq_msix_misc(pf);
5036 ice_for_each_vsi(pf, i) {
5037 if (!pf->vsi[i])
5038 continue;
5039 ice_vsi_free_q_vectors(pf->vsi[i]);
5040 }
5041 ice_deinit_pf(pf);
5042 ice_devlink_destroy_regions(pf);
5043 ice_deinit_hw(&pf->hw);
5044
5045
5046
5047
5048
5049 ice_reset(&pf->hw, ICE_RESET_PFR);
5050 pci_wait_for_pending_transaction(pdev);
5051 ice_clear_interrupt_scheme(pf);
5052 pci_disable_pcie_error_reporting(pdev);
5053 pci_disable_device(pdev);
5054 }
5055
5056
5057
5058
5059
5060 static void ice_shutdown(struct pci_dev *pdev)
5061 {
5062 struct ice_pf *pf = pci_get_drvdata(pdev);
5063
5064 ice_remove(pdev);
5065
5066 if (system_state == SYSTEM_POWER_OFF) {
5067 pci_wake_from_d3(pdev, pf->wol_ena);
5068 pci_set_power_state(pdev, PCI_D3hot);
5069 }
5070 }
5071
5072 #ifdef CONFIG_PM
5073
5074
5075
5076
5077
5078
5079 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5080 {
5081 struct ice_hw *hw = &pf->hw;
5082 u32 v;
5083
5084
5085 if (ice_check_sq_alive(hw, &hw->mailboxq))
5086 ice_vc_notify_reset(pf);
5087
5088 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5089
5090
5091 ice_pf_dis_all_vsi(pf, false);
5092
5093 ice_for_each_vsi(pf, v)
5094 if (pf->vsi[v])
5095 pf->vsi[v]->vsi_num = 0;
5096
5097 ice_shutdown_all_ctrlq(hw);
5098 }
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5111 {
5112 struct device *dev = ice_pf_to_dev(pf);
5113 int ret, v;
5114
5115
5116
5117
5118
5119 ret = ice_init_interrupt_scheme(pf);
5120 if (ret) {
5121 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5122 return ret;
5123 }
5124
5125
5126 ice_for_each_vsi(pf, v) {
5127 if (!pf->vsi[v])
5128 continue;
5129
5130 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5131 if (ret)
5132 goto err_reinit;
5133 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5134 }
5135
5136 ret = ice_req_irq_msix_misc(pf);
5137 if (ret) {
5138 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5139 ret);
5140 goto err_reinit;
5141 }
5142
5143 return 0;
5144
5145 err_reinit:
5146 while (v--)
5147 if (pf->vsi[v])
5148 ice_vsi_free_q_vectors(pf->vsi[v]);
5149
5150 return ret;
5151 }
5152
5153
5154
5155
5156
5157
5158
5159
5160 static int __maybe_unused ice_suspend(struct device *dev)
5161 {
5162 struct pci_dev *pdev = to_pci_dev(dev);
5163 struct ice_pf *pf;
5164 int disabled, v;
5165
5166 pf = pci_get_drvdata(pdev);
5167
5168 if (!ice_pf_state_is_nominal(pf)) {
5169 dev_err(dev, "Device is not ready, no need to suspend it\n");
5170 return -EBUSY;
5171 }
5172
5173
5174
5175
5176
5177
5178
5179 disabled = ice_service_task_stop(pf);
5180
5181 ice_unplug_aux_dev(pf);
5182
5183
5184 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5185 if (!disabled)
5186 ice_service_task_restart(pf);
5187 return 0;
5188 }
5189
5190 if (test_bit(ICE_DOWN, pf->state) ||
5191 ice_is_reset_in_progress(pf->state)) {
5192 dev_err(dev, "can't suspend device in reset or already down\n");
5193 if (!disabled)
5194 ice_service_task_restart(pf);
5195 return 0;
5196 }
5197
5198 ice_setup_mc_magic_wake(pf);
5199
5200 ice_prepare_for_shutdown(pf);
5201
5202 ice_set_wake(pf);
5203
5204
5205
5206
5207
5208
5209 ice_free_irq_msix_misc(pf);
5210 ice_for_each_vsi(pf, v) {
5211 if (!pf->vsi[v])
5212 continue;
5213 ice_vsi_free_q_vectors(pf->vsi[v]);
5214 }
5215 ice_clear_interrupt_scheme(pf);
5216
5217 pci_save_state(pdev);
5218 pci_wake_from_d3(pdev, pf->wol_ena);
5219 pci_set_power_state(pdev, PCI_D3hot);
5220 return 0;
5221 }
5222
5223
5224
5225
5226
5227 static int __maybe_unused ice_resume(struct device *dev)
5228 {
5229 struct pci_dev *pdev = to_pci_dev(dev);
5230 enum ice_reset_req reset_type;
5231 struct ice_pf *pf;
5232 struct ice_hw *hw;
5233 int ret;
5234
5235 pci_set_power_state(pdev, PCI_D0);
5236 pci_restore_state(pdev);
5237 pci_save_state(pdev);
5238
5239 if (!pci_device_is_present(pdev))
5240 return -ENODEV;
5241
5242 ret = pci_enable_device_mem(pdev);
5243 if (ret) {
5244 dev_err(dev, "Cannot enable device after suspend\n");
5245 return ret;
5246 }
5247
5248 pf = pci_get_drvdata(pdev);
5249 hw = &pf->hw;
5250
5251 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5252 ice_print_wake_reason(pf);
5253
5254
5255
5256
5257 ret = ice_reinit_interrupt_scheme(pf);
5258 if (ret)
5259 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5260
5261 clear_bit(ICE_DOWN, pf->state);
5262
5263 reset_type = ICE_RESET_PFR;
5264
5265 clear_bit(ICE_SERVICE_DIS, pf->state);
5266
5267 if (ice_schedule_reset(pf, reset_type))
5268 dev_err(dev, "Reset during resume failed.\n");
5269
5270 clear_bit(ICE_SUSPENDED, pf->state);
5271 ice_service_task_restart(pf);
5272
5273
5274 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5275
5276 return 0;
5277 }
5278 #endif
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288 static pci_ers_result_t
5289 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5290 {
5291 struct ice_pf *pf = pci_get_drvdata(pdev);
5292
5293 if (!pf) {
5294 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5295 __func__, err);
5296 return PCI_ERS_RESULT_DISCONNECT;
5297 }
5298
5299 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5300 ice_service_task_stop(pf);
5301
5302 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5303 set_bit(ICE_PFR_REQ, pf->state);
5304 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5305 }
5306 }
5307
5308 return PCI_ERS_RESULT_NEED_RESET;
5309 }
5310
5311
5312
5313
5314
5315
5316
5317
5318 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5319 {
5320 struct ice_pf *pf = pci_get_drvdata(pdev);
5321 pci_ers_result_t result;
5322 int err;
5323 u32 reg;
5324
5325 err = pci_enable_device_mem(pdev);
5326 if (err) {
5327 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5328 err);
5329 result = PCI_ERS_RESULT_DISCONNECT;
5330 } else {
5331 pci_set_master(pdev);
5332 pci_restore_state(pdev);
5333 pci_save_state(pdev);
5334 pci_wake_from_d3(pdev, false);
5335
5336
5337 reg = rd32(&pf->hw, GLGEN_RTRIG);
5338 if (!reg)
5339 result = PCI_ERS_RESULT_RECOVERED;
5340 else
5341 result = PCI_ERS_RESULT_DISCONNECT;
5342 }
5343
5344 return result;
5345 }
5346
5347
5348
5349
5350
5351
5352
5353
5354 static void ice_pci_err_resume(struct pci_dev *pdev)
5355 {
5356 struct ice_pf *pf = pci_get_drvdata(pdev);
5357
5358 if (!pf) {
5359 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5360 __func__);
5361 return;
5362 }
5363
5364 if (test_bit(ICE_SUSPENDED, pf->state)) {
5365 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5366 __func__);
5367 return;
5368 }
5369
5370 ice_restore_all_vfs_msi_state(pdev);
5371
5372 ice_do_reset(pf, ICE_RESET_PFR);
5373 ice_service_task_restart(pf);
5374 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5375 }
5376
5377
5378
5379
5380
5381 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5382 {
5383 struct ice_pf *pf = pci_get_drvdata(pdev);
5384
5385 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5386 ice_service_task_stop(pf);
5387
5388 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5389 set_bit(ICE_PFR_REQ, pf->state);
5390 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5391 }
5392 }
5393 }
5394
5395
5396
5397
5398
5399 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5400 {
5401 ice_pci_err_resume(pdev);
5402 }
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412 static const struct pci_device_id ice_pci_tbl[] = {
5413 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5414 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5415 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5416 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5417 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5418 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5419 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5420 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5421 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5422 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5423 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5424 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5425 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5426 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5427 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5428 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5429 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5430 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5431 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5432 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5433 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5434 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5435 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5436 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5437 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5438 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5439
5440 { 0, }
5441 };
5442 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5443
5444 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5445
5446 static const struct pci_error_handlers ice_pci_err_handler = {
5447 .error_detected = ice_pci_err_detected,
5448 .slot_reset = ice_pci_err_slot_reset,
5449 .reset_prepare = ice_pci_err_reset_prepare,
5450 .reset_done = ice_pci_err_reset_done,
5451 .resume = ice_pci_err_resume
5452 };
5453
5454 static struct pci_driver ice_driver = {
5455 .name = KBUILD_MODNAME,
5456 .id_table = ice_pci_tbl,
5457 .probe = ice_probe,
5458 .remove = ice_remove,
5459 #ifdef CONFIG_PM
5460 .driver.pm = &ice_pm_ops,
5461 #endif
5462 .shutdown = ice_shutdown,
5463 .sriov_configure = ice_sriov_configure,
5464 .err_handler = &ice_pci_err_handler
5465 };
5466
5467
5468
5469
5470
5471
5472
5473 static int __init ice_module_init(void)
5474 {
5475 int status;
5476
5477 pr_info("%s\n", ice_driver_string);
5478 pr_info("%s\n", ice_copyright);
5479
5480 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
5481 if (!ice_wq) {
5482 pr_err("Failed to create workqueue\n");
5483 return -ENOMEM;
5484 }
5485
5486 status = pci_register_driver(&ice_driver);
5487 if (status) {
5488 pr_err("failed to register PCI driver, err %d\n", status);
5489 destroy_workqueue(ice_wq);
5490 }
5491
5492 return status;
5493 }
5494 module_init(ice_module_init);
5495
5496
5497
5498
5499
5500
5501
5502 static void __exit ice_module_exit(void)
5503 {
5504 pci_unregister_driver(&ice_driver);
5505 destroy_workqueue(ice_wq);
5506 pr_info("module unloaded\n");
5507 }
5508 module_exit(ice_module_exit);
5509
5510
5511
5512
5513
5514
5515
5516
5517 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5518 {
5519 struct ice_netdev_priv *np = netdev_priv(netdev);
5520 struct ice_vsi *vsi = np->vsi;
5521 struct ice_pf *pf = vsi->back;
5522 struct ice_hw *hw = &pf->hw;
5523 struct sockaddr *addr = pi;
5524 u8 old_mac[ETH_ALEN];
5525 u8 flags = 0;
5526 u8 *mac;
5527 int err;
5528
5529 mac = (u8 *)addr->sa_data;
5530
5531 if (!is_valid_ether_addr(mac))
5532 return -EADDRNOTAVAIL;
5533
5534 if (ether_addr_equal(netdev->dev_addr, mac)) {
5535 netdev_dbg(netdev, "already using mac %pM\n", mac);
5536 return 0;
5537 }
5538
5539 if (test_bit(ICE_DOWN, pf->state) ||
5540 ice_is_reset_in_progress(pf->state)) {
5541 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5542 mac);
5543 return -EBUSY;
5544 }
5545
5546 if (ice_chnl_dmac_fltr_cnt(pf)) {
5547 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5548 mac);
5549 return -EAGAIN;
5550 }
5551
5552 netif_addr_lock_bh(netdev);
5553 ether_addr_copy(old_mac, netdev->dev_addr);
5554
5555 eth_hw_addr_set(netdev, mac);
5556 netif_addr_unlock_bh(netdev);
5557
5558
5559 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5560 if (err && err != -ENOENT) {
5561 err = -EADDRNOTAVAIL;
5562 goto err_update_filters;
5563 }
5564
5565
5566 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5567 if (err == -EEXIST) {
5568
5569
5570
5571
5572
5573 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5574
5575 return 0;
5576 } else if (err) {
5577
5578 err = -EADDRNOTAVAIL;
5579 }
5580
5581 err_update_filters:
5582 if (err) {
5583 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5584 mac);
5585 netif_addr_lock_bh(netdev);
5586 eth_hw_addr_set(netdev, old_mac);
5587 netif_addr_unlock_bh(netdev);
5588 return err;
5589 }
5590
5591 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5592 netdev->dev_addr);
5593
5594
5595 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5596 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5597 if (err) {
5598 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5599 mac, err);
5600 }
5601 return 0;
5602 }
5603
5604
5605
5606
5607
5608 static void ice_set_rx_mode(struct net_device *netdev)
5609 {
5610 struct ice_netdev_priv *np = netdev_priv(netdev);
5611 struct ice_vsi *vsi = np->vsi;
5612
5613 if (!vsi)
5614 return;
5615
5616
5617
5618
5619
5620 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5621 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5622 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5623
5624
5625
5626
5627 ice_service_task_schedule(vsi->back);
5628 }
5629
5630
5631
5632
5633
5634
5635
5636 static int
5637 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5638 {
5639 struct ice_netdev_priv *np = netdev_priv(netdev);
5640 struct ice_vsi *vsi = np->vsi;
5641 u16 q_handle;
5642 int status;
5643 u8 tc;
5644
5645
5646 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5647 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5648 maxrate, queue_index);
5649 return -EINVAL;
5650 }
5651
5652 q_handle = vsi->tx_rings[queue_index]->q_handle;
5653 tc = ice_dcb_get_tc(vsi, queue_index);
5654
5655
5656 if (!maxrate)
5657 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5658 q_handle, ICE_MAX_BW);
5659 else
5660 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5661 q_handle, ICE_MAX_BW, maxrate * 1000);
5662 if (status)
5663 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5664 status);
5665
5666 return status;
5667 }
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679 static int
5680 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5681 struct net_device *dev, const unsigned char *addr, u16 vid,
5682 u16 flags, struct netlink_ext_ack __always_unused *extack)
5683 {
5684 int err;
5685
5686 if (vid) {
5687 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5688 return -EINVAL;
5689 }
5690 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5691 netdev_err(dev, "FDB only supports static addresses\n");
5692 return -EINVAL;
5693 }
5694
5695 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5696 err = dev_uc_add_excl(dev, addr);
5697 else if (is_multicast_ether_addr(addr))
5698 err = dev_mc_add_excl(dev, addr);
5699 else
5700 err = -EINVAL;
5701
5702
5703 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5704 err = 0;
5705
5706 return err;
5707 }
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718 static int
5719 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5720 struct net_device *dev, const unsigned char *addr,
5721 __always_unused u16 vid, struct netlink_ext_ack *extack)
5722 {
5723 int err;
5724
5725 if (ndm->ndm_state & NUD_PERMANENT) {
5726 netdev_err(dev, "FDB only supports static addresses\n");
5727 return -EINVAL;
5728 }
5729
5730 if (is_unicast_ether_addr(addr))
5731 err = dev_uc_del(dev, addr);
5732 else if (is_multicast_ether_addr(addr))
5733 err = dev_mc_del(dev, addr);
5734 else
5735 err = -EINVAL;
5736
5737 return err;
5738 }
5739
5740 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5741 NETIF_F_HW_VLAN_CTAG_TX | \
5742 NETIF_F_HW_VLAN_STAG_RX | \
5743 NETIF_F_HW_VLAN_STAG_TX)
5744
5745 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
5746 NETIF_F_HW_VLAN_STAG_FILTER)
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785 static netdev_features_t
5786 ice_fix_features(struct net_device *netdev, netdev_features_t features)
5787 {
5788 struct ice_netdev_priv *np = netdev_priv(netdev);
5789 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5790 bool cur_ctag, cur_stag, req_ctag, req_stag;
5791
5792 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5793 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5794 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5795
5796 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5797 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5798 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5799
5800 if (req_vlan_fltr != cur_vlan_fltr) {
5801 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
5802 if (req_ctag && req_stag) {
5803 features |= NETIF_VLAN_FILTERING_FEATURES;
5804 } else if (!req_ctag && !req_stag) {
5805 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5806 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
5807 (!cur_stag && req_stag && !cur_ctag)) {
5808 features |= NETIF_VLAN_FILTERING_FEATURES;
5809 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
5810 } else if ((cur_ctag && !req_ctag && cur_stag) ||
5811 (cur_stag && !req_stag && cur_ctag)) {
5812 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5813 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
5814 }
5815 } else {
5816 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
5817 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
5818
5819 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
5820 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5821 }
5822 }
5823
5824 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
5825 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
5826 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
5827 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
5828 NETIF_F_HW_VLAN_STAG_TX);
5829 }
5830
5831 return features;
5832 }
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843 static int
5844 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
5845 {
5846 bool enable_stripping = true, enable_insertion = true;
5847 struct ice_vsi_vlan_ops *vlan_ops;
5848 int strip_err = 0, insert_err = 0;
5849 u16 vlan_ethertype = 0;
5850
5851 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5852
5853 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
5854 vlan_ethertype = ETH_P_8021AD;
5855 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
5856 vlan_ethertype = ETH_P_8021Q;
5857
5858 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
5859 enable_stripping = false;
5860 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
5861 enable_insertion = false;
5862
5863 if (enable_stripping)
5864 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
5865 else
5866 strip_err = vlan_ops->dis_stripping(vsi);
5867
5868 if (enable_insertion)
5869 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
5870 else
5871 insert_err = vlan_ops->dis_insertion(vsi);
5872
5873 if (strip_err || insert_err)
5874 return -EIO;
5875
5876 return 0;
5877 }
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887 static int
5888 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
5889 {
5890 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5891 int err = 0;
5892
5893
5894
5895
5896 if (features &
5897 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
5898 err = vlan_ops->ena_rx_filtering(vsi);
5899 else
5900 err = vlan_ops->dis_rx_filtering(vsi);
5901
5902 return err;
5903 }
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913 static int
5914 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
5915 {
5916 netdev_features_t current_vlan_features, requested_vlan_features;
5917 struct ice_netdev_priv *np = netdev_priv(netdev);
5918 struct ice_vsi *vsi = np->vsi;
5919 int err;
5920
5921 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
5922 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
5923 if (current_vlan_features ^ requested_vlan_features) {
5924 err = ice_set_vlan_offload_features(vsi, features);
5925 if (err)
5926 return err;
5927 }
5928
5929 current_vlan_features = netdev->features &
5930 NETIF_VLAN_FILTERING_FEATURES;
5931 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
5932 if (current_vlan_features ^ requested_vlan_features) {
5933 err = ice_set_vlan_filtering_features(vsi, features);
5934 if (err)
5935 return err;
5936 }
5937
5938 return 0;
5939 }
5940
5941
5942
5943
5944
5945
5946 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
5947 {
5948 bool if_running = netif_running(vsi->netdev);
5949 int ret;
5950
5951 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
5952 ret = ice_down(vsi);
5953 if (ret) {
5954 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
5955 return ret;
5956 }
5957 }
5958 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
5959 if (ret)
5960 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
5961 if (if_running)
5962 ret = ice_up(vsi);
5963
5964 return ret;
5965 }
5966
5967
5968
5969
5970
5971
5972 static int
5973 ice_set_features(struct net_device *netdev, netdev_features_t features)
5974 {
5975 netdev_features_t changed = netdev->features ^ features;
5976 struct ice_netdev_priv *np = netdev_priv(netdev);
5977 struct ice_vsi *vsi = np->vsi;
5978 struct ice_pf *pf = vsi->back;
5979 int ret = 0;
5980
5981
5982 if (ice_is_safe_mode(pf)) {
5983 dev_err(ice_pf_to_dev(pf),
5984 "Device is in Safe Mode - not enabling advanced netdev features\n");
5985 return ret;
5986 }
5987
5988
5989 if (ice_is_reset_in_progress(pf->state)) {
5990 dev_err(ice_pf_to_dev(pf),
5991 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5992 return -EBUSY;
5993 }
5994
5995
5996
5997
5998 if (changed & NETIF_F_RXHASH)
5999 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6000
6001 ret = ice_set_vlan_features(netdev, features);
6002 if (ret)
6003 return ret;
6004
6005 if (changed & NETIF_F_NTUPLE) {
6006 bool ena = !!(features & NETIF_F_NTUPLE);
6007
6008 ice_vsi_manage_fdir(vsi, ena);
6009 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6010 }
6011
6012
6013 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6014 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6015 return -EACCES;
6016 }
6017
6018 if (changed & NETIF_F_HW_TC) {
6019 bool ena = !!(features & NETIF_F_HW_TC);
6020
6021 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6022 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6023 }
6024
6025 if (changed & NETIF_F_LOOPBACK)
6026 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6027
6028 return ret;
6029 }
6030
6031
6032
6033
6034
6035 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6036 {
6037 int err;
6038
6039 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6040 if (err)
6041 return err;
6042
6043 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6044 if (err)
6045 return err;
6046
6047 return ice_vsi_add_vlan_zero(vsi);
6048 }
6049
6050
6051
6052
6053
6054
6055
6056 int ice_vsi_cfg(struct ice_vsi *vsi)
6057 {
6058 int err;
6059
6060 if (vsi->netdev) {
6061 ice_set_rx_mode(vsi->netdev);
6062
6063 if (vsi->type != ICE_VSI_LB) {
6064 err = ice_vsi_vlan_setup(vsi);
6065
6066 if (err)
6067 return err;
6068 }
6069 }
6070 ice_vsi_cfg_dcb_rings(vsi);
6071
6072 err = ice_vsi_cfg_lan_txqs(vsi);
6073 if (!err && ice_is_xdp_ena_vsi(vsi))
6074 err = ice_vsi_cfg_xdp_txqs(vsi);
6075 if (!err)
6076 err = ice_vsi_cfg_rxqs(vsi);
6077
6078 return err;
6079 }
6080
6081
6082
6083
6084
6085
6086
6087
6088
6089 struct ice_dim {
6090
6091
6092
6093 u16 itr;
6094 };
6095
6096
6097
6098
6099
6100 static const struct ice_dim rx_profile[] = {
6101 {2},
6102 {8},
6103 {16},
6104 {62},
6105 {126}
6106 };
6107
6108
6109
6110
6111 static const struct ice_dim tx_profile[] = {
6112 {2},
6113 {8},
6114 {40},
6115 {128},
6116 {256}
6117 };
6118
6119 static void ice_tx_dim_work(struct work_struct *work)
6120 {
6121 struct ice_ring_container *rc;
6122 struct dim *dim;
6123 u16 itr;
6124
6125 dim = container_of(work, struct dim, work);
6126 rc = (struct ice_ring_container *)dim->priv;
6127
6128 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6129
6130
6131 itr = tx_profile[dim->profile_ix].itr;
6132
6133 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6134 ice_write_itr(rc, itr);
6135
6136 dim->state = DIM_START_MEASURE;
6137 }
6138
6139 static void ice_rx_dim_work(struct work_struct *work)
6140 {
6141 struct ice_ring_container *rc;
6142 struct dim *dim;
6143 u16 itr;
6144
6145 dim = container_of(work, struct dim, work);
6146 rc = (struct ice_ring_container *)dim->priv;
6147
6148 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6149
6150
6151 itr = rx_profile[dim->profile_ix].itr;
6152
6153 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6154 ice_write_itr(rc, itr);
6155
6156 dim->state = DIM_START_MEASURE;
6157 }
6158
6159 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171 static void ice_init_moderation(struct ice_q_vector *q_vector)
6172 {
6173 struct ice_ring_container *rc;
6174 bool tx_dynamic, rx_dynamic;
6175
6176 rc = &q_vector->tx;
6177 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6178 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6179 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6180 rc->dim.priv = rc;
6181 tx_dynamic = ITR_IS_DYNAMIC(rc);
6182
6183
6184 ice_write_itr(rc, tx_dynamic ?
6185 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6186
6187 rc = &q_vector->rx;
6188 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6189 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6190 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6191 rc->dim.priv = rc;
6192 rx_dynamic = ITR_IS_DYNAMIC(rc);
6193
6194
6195 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6196 rc->itr_setting);
6197
6198 ice_set_q_vector_intrl(q_vector);
6199 }
6200
6201
6202
6203
6204
6205 static void ice_napi_enable_all(struct ice_vsi *vsi)
6206 {
6207 int q_idx;
6208
6209 if (!vsi->netdev)
6210 return;
6211
6212 ice_for_each_q_vector(vsi, q_idx) {
6213 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6214
6215 ice_init_moderation(q_vector);
6216
6217 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6218 napi_enable(&q_vector->napi);
6219 }
6220 }
6221
6222
6223
6224
6225
6226
6227
6228 static int ice_up_complete(struct ice_vsi *vsi)
6229 {
6230 struct ice_pf *pf = vsi->back;
6231 int err;
6232
6233 ice_vsi_cfg_msix(vsi);
6234
6235
6236
6237
6238
6239 err = ice_vsi_start_all_rx_rings(vsi);
6240 if (err)
6241 return err;
6242
6243 clear_bit(ICE_VSI_DOWN, vsi->state);
6244 ice_napi_enable_all(vsi);
6245 ice_vsi_ena_irq(vsi);
6246
6247 if (vsi->port_info &&
6248 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6249 vsi->netdev) {
6250 ice_print_link_msg(vsi, true);
6251 netif_tx_start_all_queues(vsi->netdev);
6252 netif_carrier_on(vsi->netdev);
6253 if (!ice_is_e810(&pf->hw))
6254 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6255 }
6256
6257
6258
6259
6260 ice_update_eth_stats(vsi);
6261 ice_service_task_schedule(pf);
6262
6263 return 0;
6264 }
6265
6266
6267
6268
6269
6270 int ice_up(struct ice_vsi *vsi)
6271 {
6272 int err;
6273
6274 err = ice_vsi_cfg(vsi);
6275 if (!err)
6276 err = ice_up_complete(vsi);
6277
6278 return err;
6279 }
6280
6281
6282
6283
6284
6285
6286
6287
6288
6289
6290
6291 void
6292 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6293 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6294 {
6295 unsigned int start;
6296
6297 do {
6298 start = u64_stats_fetch_begin_irq(syncp);
6299 *pkts = stats.pkts;
6300 *bytes = stats.bytes;
6301 } while (u64_stats_fetch_retry_irq(syncp, start));
6302 }
6303
6304
6305
6306
6307
6308
6309
6310
6311 static void
6312 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6313 struct rtnl_link_stats64 *vsi_stats,
6314 struct ice_tx_ring **rings, u16 count)
6315 {
6316 u16 i;
6317
6318 for (i = 0; i < count; i++) {
6319 struct ice_tx_ring *ring;
6320 u64 pkts = 0, bytes = 0;
6321
6322 ring = READ_ONCE(rings[i]);
6323 if (!ring)
6324 continue;
6325 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
6326 vsi_stats->tx_packets += pkts;
6327 vsi_stats->tx_bytes += bytes;
6328 vsi->tx_restart += ring->tx_stats.restart_q;
6329 vsi->tx_busy += ring->tx_stats.tx_busy;
6330 vsi->tx_linearize += ring->tx_stats.tx_linearize;
6331 }
6332 }
6333
6334
6335
6336
6337
6338 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6339 {
6340 struct rtnl_link_stats64 *vsi_stats;
6341 u64 pkts, bytes;
6342 int i;
6343
6344 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6345 if (!vsi_stats)
6346 return;
6347
6348
6349 vsi->tx_restart = 0;
6350 vsi->tx_busy = 0;
6351 vsi->tx_linearize = 0;
6352 vsi->rx_buf_failed = 0;
6353 vsi->rx_page_failed = 0;
6354
6355 rcu_read_lock();
6356
6357
6358 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6359 vsi->num_txq);
6360
6361
6362 ice_for_each_rxq(vsi, i) {
6363 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6364
6365 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
6366 vsi_stats->rx_packets += pkts;
6367 vsi_stats->rx_bytes += bytes;
6368 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
6369 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
6370 }
6371
6372
6373 if (ice_is_xdp_ena_vsi(vsi))
6374 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6375 vsi->num_xdp_txq);
6376
6377 rcu_read_unlock();
6378
6379 vsi->net_stats.tx_packets = vsi_stats->tx_packets;
6380 vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
6381 vsi->net_stats.rx_packets = vsi_stats->rx_packets;
6382 vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
6383
6384 kfree(vsi_stats);
6385 }
6386
6387
6388
6389
6390
6391 void ice_update_vsi_stats(struct ice_vsi *vsi)
6392 {
6393 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6394 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6395 struct ice_pf *pf = vsi->back;
6396
6397 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6398 test_bit(ICE_CFG_BUSY, pf->state))
6399 return;
6400
6401
6402 ice_update_vsi_ring_stats(vsi);
6403
6404
6405 ice_update_eth_stats(vsi);
6406
6407 cur_ns->tx_errors = cur_es->tx_errors;
6408 cur_ns->rx_dropped = cur_es->rx_discards;
6409 cur_ns->tx_dropped = cur_es->tx_discards;
6410 cur_ns->multicast = cur_es->rx_multicast;
6411
6412
6413 if (vsi->type == ICE_VSI_PF) {
6414 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6415 cur_ns->rx_errors = pf->stats.crc_errors +
6416 pf->stats.illegal_bytes +
6417 pf->stats.rx_len_errors +
6418 pf->stats.rx_undersize +
6419 pf->hw_csum_rx_error +
6420 pf->stats.rx_jabber +
6421 pf->stats.rx_fragments +
6422 pf->stats.rx_oversize;
6423 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6424
6425 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6426 }
6427 }
6428
6429
6430
6431
6432
6433 void ice_update_pf_stats(struct ice_pf *pf)
6434 {
6435 struct ice_hw_port_stats *prev_ps, *cur_ps;
6436 struct ice_hw *hw = &pf->hw;
6437 u16 fd_ctr_base;
6438 u8 port;
6439
6440 port = hw->port_info->lport;
6441 prev_ps = &pf->stats_prev;
6442 cur_ps = &pf->stats;
6443
6444 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6445 &prev_ps->eth.rx_bytes,
6446 &cur_ps->eth.rx_bytes);
6447
6448 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6449 &prev_ps->eth.rx_unicast,
6450 &cur_ps->eth.rx_unicast);
6451
6452 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6453 &prev_ps->eth.rx_multicast,
6454 &cur_ps->eth.rx_multicast);
6455
6456 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6457 &prev_ps->eth.rx_broadcast,
6458 &cur_ps->eth.rx_broadcast);
6459
6460 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6461 &prev_ps->eth.rx_discards,
6462 &cur_ps->eth.rx_discards);
6463
6464 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6465 &prev_ps->eth.tx_bytes,
6466 &cur_ps->eth.tx_bytes);
6467
6468 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6469 &prev_ps->eth.tx_unicast,
6470 &cur_ps->eth.tx_unicast);
6471
6472 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6473 &prev_ps->eth.tx_multicast,
6474 &cur_ps->eth.tx_multicast);
6475
6476 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6477 &prev_ps->eth.tx_broadcast,
6478 &cur_ps->eth.tx_broadcast);
6479
6480 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6481 &prev_ps->tx_dropped_link_down,
6482 &cur_ps->tx_dropped_link_down);
6483
6484 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6485 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6486
6487 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6488 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6489
6490 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6491 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6492
6493 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6494 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6495
6496 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6497 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6498
6499 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6500 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6501
6502 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6503 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6504
6505 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6506 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6507
6508 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6509 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6510
6511 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6512 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6513
6514 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6515 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6516
6517 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6518 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6519
6520 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6521 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6522
6523 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6524 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6525
6526 fd_ctr_base = hw->fd_ctr_base;
6527
6528 ice_stat_update40(hw,
6529 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6530 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6531 &cur_ps->fd_sb_match);
6532 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6533 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6534
6535 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6536 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6537
6538 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6539 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6540
6541 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6542 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6543
6544 ice_update_dcb_stats(pf);
6545
6546 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6547 &prev_ps->crc_errors, &cur_ps->crc_errors);
6548
6549 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6550 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6551
6552 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6553 &prev_ps->mac_local_faults,
6554 &cur_ps->mac_local_faults);
6555
6556 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6557 &prev_ps->mac_remote_faults,
6558 &cur_ps->mac_remote_faults);
6559
6560 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6561 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6562
6563 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6564 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6565
6566 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6567 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6568
6569 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6570 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6571
6572 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6573 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6574
6575 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6576
6577 pf->stat_prev_loaded = true;
6578 }
6579
6580
6581
6582
6583
6584
6585 static
6586 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6587 {
6588 struct ice_netdev_priv *np = netdev_priv(netdev);
6589 struct rtnl_link_stats64 *vsi_stats;
6590 struct ice_vsi *vsi = np->vsi;
6591
6592 vsi_stats = &vsi->net_stats;
6593
6594 if (!vsi->num_txq || !vsi->num_rxq)
6595 return;
6596
6597
6598
6599
6600
6601
6602 if (!test_bit(ICE_VSI_DOWN, vsi->state))
6603 ice_update_vsi_ring_stats(vsi);
6604 stats->tx_packets = vsi_stats->tx_packets;
6605 stats->tx_bytes = vsi_stats->tx_bytes;
6606 stats->rx_packets = vsi_stats->rx_packets;
6607 stats->rx_bytes = vsi_stats->rx_bytes;
6608
6609
6610
6611
6612
6613 stats->multicast = vsi_stats->multicast;
6614 stats->tx_errors = vsi_stats->tx_errors;
6615 stats->tx_dropped = vsi_stats->tx_dropped;
6616 stats->rx_errors = vsi_stats->rx_errors;
6617 stats->rx_dropped = vsi_stats->rx_dropped;
6618 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6619 stats->rx_length_errors = vsi_stats->rx_length_errors;
6620 }
6621
6622
6623
6624
6625
6626 static void ice_napi_disable_all(struct ice_vsi *vsi)
6627 {
6628 int q_idx;
6629
6630 if (!vsi->netdev)
6631 return;
6632
6633 ice_for_each_q_vector(vsi, q_idx) {
6634 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6635
6636 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6637 napi_disable(&q_vector->napi);
6638
6639 cancel_work_sync(&q_vector->tx.dim.work);
6640 cancel_work_sync(&q_vector->rx.dim.work);
6641 }
6642 }
6643
6644
6645
6646
6647
6648
6649
6650 int ice_down(struct ice_vsi *vsi)
6651 {
6652 int i, tx_err, rx_err, vlan_err = 0;
6653
6654 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6655
6656 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6657 vlan_err = ice_vsi_del_vlan_zero(vsi);
6658 if (!ice_is_e810(&vsi->back->hw))
6659 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6660 netif_carrier_off(vsi->netdev);
6661 netif_tx_disable(vsi->netdev);
6662 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6663 ice_eswitch_stop_all_tx_queues(vsi->back);
6664 }
6665
6666 ice_vsi_dis_irq(vsi);
6667
6668 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6669 if (tx_err)
6670 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6671 vsi->vsi_num, tx_err);
6672 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6673 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6674 if (tx_err)
6675 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6676 vsi->vsi_num, tx_err);
6677 }
6678
6679 rx_err = ice_vsi_stop_all_rx_rings(vsi);
6680 if (rx_err)
6681 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6682 vsi->vsi_num, rx_err);
6683
6684 ice_napi_disable_all(vsi);
6685
6686 ice_for_each_txq(vsi, i)
6687 ice_clean_tx_ring(vsi->tx_rings[i]);
6688
6689 ice_for_each_rxq(vsi, i)
6690 ice_clean_rx_ring(vsi->rx_rings[i]);
6691
6692 if (tx_err || rx_err || vlan_err) {
6693 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6694 vsi->vsi_num, vsi->vsw->sw_id);
6695 return -EIO;
6696 }
6697
6698 return 0;
6699 }
6700
6701
6702
6703
6704
6705
6706
6707 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6708 {
6709 int i, err = 0;
6710
6711 if (!vsi->num_txq) {
6712 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6713 vsi->vsi_num);
6714 return -EINVAL;
6715 }
6716
6717 ice_for_each_txq(vsi, i) {
6718 struct ice_tx_ring *ring = vsi->tx_rings[i];
6719
6720 if (!ring)
6721 return -EINVAL;
6722
6723 if (vsi->netdev)
6724 ring->netdev = vsi->netdev;
6725 err = ice_setup_tx_ring(ring);
6726 if (err)
6727 break;
6728 }
6729
6730 return err;
6731 }
6732
6733
6734
6735
6736
6737
6738
6739 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6740 {
6741 int i, err = 0;
6742
6743 if (!vsi->num_rxq) {
6744 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6745 vsi->vsi_num);
6746 return -EINVAL;
6747 }
6748
6749 ice_for_each_rxq(vsi, i) {
6750 struct ice_rx_ring *ring = vsi->rx_rings[i];
6751
6752 if (!ring)
6753 return -EINVAL;
6754
6755 if (vsi->netdev)
6756 ring->netdev = vsi->netdev;
6757 err = ice_setup_rx_ring(ring);
6758 if (err)
6759 break;
6760 }
6761
6762 return err;
6763 }
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6774 {
6775 char int_name[ICE_INT_NAME_STR_LEN];
6776 struct ice_pf *pf = vsi->back;
6777 struct device *dev;
6778 int err;
6779
6780 dev = ice_pf_to_dev(pf);
6781
6782 err = ice_vsi_setup_tx_rings(vsi);
6783 if (err)
6784 goto err_setup_tx;
6785
6786 err = ice_vsi_setup_rx_rings(vsi);
6787 if (err)
6788 goto err_setup_rx;
6789
6790 err = ice_vsi_cfg(vsi);
6791 if (err)
6792 goto err_setup_rx;
6793
6794 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6795 dev_driver_string(dev), dev_name(dev));
6796 err = ice_vsi_req_irq_msix(vsi, int_name);
6797 if (err)
6798 goto err_setup_rx;
6799
6800 ice_vsi_cfg_msix(vsi);
6801
6802 err = ice_vsi_start_all_rx_rings(vsi);
6803 if (err)
6804 goto err_up_complete;
6805
6806 clear_bit(ICE_VSI_DOWN, vsi->state);
6807 ice_vsi_ena_irq(vsi);
6808
6809 return 0;
6810
6811 err_up_complete:
6812 ice_down(vsi);
6813 err_setup_rx:
6814 ice_vsi_free_rx_rings(vsi);
6815 err_setup_tx:
6816 ice_vsi_free_tx_rings(vsi);
6817
6818 return err;
6819 }
6820
6821
6822
6823
6824
6825
6826
6827
6828
6829 int ice_vsi_open(struct ice_vsi *vsi)
6830 {
6831 char int_name[ICE_INT_NAME_STR_LEN];
6832 struct ice_pf *pf = vsi->back;
6833 int err;
6834
6835
6836 err = ice_vsi_setup_tx_rings(vsi);
6837 if (err)
6838 goto err_setup_tx;
6839
6840 err = ice_vsi_setup_rx_rings(vsi);
6841 if (err)
6842 goto err_setup_rx;
6843
6844 err = ice_vsi_cfg(vsi);
6845 if (err)
6846 goto err_setup_rx;
6847
6848 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6849 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6850 err = ice_vsi_req_irq_msix(vsi, int_name);
6851 if (err)
6852 goto err_setup_rx;
6853
6854 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
6855
6856 if (vsi->type == ICE_VSI_PF) {
6857
6858 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6859 if (err)
6860 goto err_set_qs;
6861
6862 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6863 if (err)
6864 goto err_set_qs;
6865 }
6866
6867 err = ice_up_complete(vsi);
6868 if (err)
6869 goto err_up_complete;
6870
6871 return 0;
6872
6873 err_up_complete:
6874 ice_down(vsi);
6875 err_set_qs:
6876 ice_vsi_free_irq(vsi);
6877 err_setup_rx:
6878 ice_vsi_free_rx_rings(vsi);
6879 err_setup_tx:
6880 ice_vsi_free_tx_rings(vsi);
6881
6882 return err;
6883 }
6884
6885
6886
6887
6888
6889 static void ice_vsi_release_all(struct ice_pf *pf)
6890 {
6891 int err, i;
6892
6893 if (!pf->vsi)
6894 return;
6895
6896 ice_for_each_vsi(pf, i) {
6897 if (!pf->vsi[i])
6898 continue;
6899
6900 if (pf->vsi[i]->type == ICE_VSI_CHNL)
6901 continue;
6902
6903 err = ice_vsi_release(pf->vsi[i]);
6904 if (err)
6905 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6906 i, err, pf->vsi[i]->vsi_num);
6907 }
6908 }
6909
6910
6911
6912
6913
6914
6915
6916
6917 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6918 {
6919 struct device *dev = ice_pf_to_dev(pf);
6920 int i, err;
6921
6922 ice_for_each_vsi(pf, i) {
6923 struct ice_vsi *vsi = pf->vsi[i];
6924
6925 if (!vsi || vsi->type != type)
6926 continue;
6927
6928
6929 err = ice_vsi_rebuild(vsi, true);
6930 if (err) {
6931 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6932 err, vsi->idx, ice_vsi_type_str(type));
6933 return err;
6934 }
6935
6936
6937 err = ice_replay_vsi(&pf->hw, vsi->idx);
6938 if (err) {
6939 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
6940 err, vsi->idx, ice_vsi_type_str(type));
6941 return err;
6942 }
6943
6944
6945
6946
6947 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6948
6949
6950 err = ice_ena_vsi(vsi, false);
6951 if (err) {
6952 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6953 err, vsi->idx, ice_vsi_type_str(type));
6954 return err;
6955 }
6956
6957 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6958 ice_vsi_type_str(type));
6959 }
6960
6961 return 0;
6962 }
6963
6964
6965
6966
6967
6968 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6969 {
6970 bool link_up;
6971 int i;
6972
6973 ice_for_each_vsi(pf, i) {
6974 struct ice_vsi *vsi = pf->vsi[i];
6975
6976 if (!vsi || vsi->type != ICE_VSI_PF)
6977 return;
6978
6979 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6980 if (link_up) {
6981 netif_carrier_on(pf->vsi[i]->netdev);
6982 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6983 } else {
6984 netif_carrier_off(pf->vsi[i]->netdev);
6985 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6986 }
6987 }
6988 }
6989
6990
6991
6992
6993
6994
6995
6996
6997
6998
6999
7000 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7001 {
7002 struct device *dev = ice_pf_to_dev(pf);
7003 struct ice_hw *hw = &pf->hw;
7004 bool dvm;
7005 int err;
7006
7007 if (test_bit(ICE_DOWN, pf->state))
7008 goto clear_recovery;
7009
7010 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7011
7012 #define ICE_EMP_RESET_SLEEP_MS 5000
7013 if (reset_type == ICE_RESET_EMPR) {
7014
7015
7016
7017
7018 pf->fw_emp_reset_disabled = false;
7019
7020 msleep(ICE_EMP_RESET_SLEEP_MS);
7021 }
7022
7023 err = ice_init_all_ctrlq(hw);
7024 if (err) {
7025 dev_err(dev, "control queues init failed %d\n", err);
7026 goto err_init_ctrlq;
7027 }
7028
7029
7030 if (!ice_is_safe_mode(pf)) {
7031
7032 if (reset_type == ICE_RESET_PFR)
7033 ice_fill_blk_tbls(hw);
7034 else
7035
7036 ice_load_pkg(NULL, pf);
7037 }
7038
7039 err = ice_clear_pf_cfg(hw);
7040 if (err) {
7041 dev_err(dev, "clear PF configuration failed %d\n", err);
7042 goto err_init_ctrlq;
7043 }
7044
7045 ice_clear_pxe_mode(hw);
7046
7047 err = ice_init_nvm(hw);
7048 if (err) {
7049 dev_err(dev, "ice_init_nvm failed %d\n", err);
7050 goto err_init_ctrlq;
7051 }
7052
7053 err = ice_get_caps(hw);
7054 if (err) {
7055 dev_err(dev, "ice_get_caps failed %d\n", err);
7056 goto err_init_ctrlq;
7057 }
7058
7059 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7060 if (err) {
7061 dev_err(dev, "set_mac_cfg failed %d\n", err);
7062 goto err_init_ctrlq;
7063 }
7064
7065 dvm = ice_is_dvm_ena(hw);
7066
7067 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7068 if (err)
7069 goto err_init_ctrlq;
7070
7071 err = ice_sched_init_port(hw->port_info);
7072 if (err)
7073 goto err_sched_init_port;
7074
7075
7076 err = ice_req_irq_msix_misc(pf);
7077 if (err) {
7078 dev_err(dev, "misc vector setup failed: %d\n", err);
7079 goto err_sched_init_port;
7080 }
7081
7082 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7083 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7084 if (!rd32(hw, PFQF_FD_SIZE)) {
7085 u16 unused, guar, b_effort;
7086
7087 guar = hw->func_caps.fd_fltr_guar;
7088 b_effort = hw->func_caps.fd_fltr_best_effort;
7089
7090
7091 ice_alloc_fd_guar_item(hw, &unused, guar);
7092
7093 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7094 }
7095 }
7096
7097 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7098 ice_dcb_rebuild(pf);
7099
7100
7101
7102
7103
7104 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7105 ice_ptp_reset(pf);
7106
7107 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7108 ice_gnss_init(pf);
7109
7110
7111 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7112 if (err) {
7113 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7114 goto err_vsi_rebuild;
7115 }
7116
7117
7118 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7119 ice_ptp_cfg_timestamp(pf, false);
7120
7121 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7122 if (err) {
7123 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7124 goto err_vsi_rebuild;
7125 }
7126
7127 if (reset_type == ICE_RESET_PFR) {
7128 err = ice_rebuild_channels(pf);
7129 if (err) {
7130 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7131 err);
7132 goto err_vsi_rebuild;
7133 }
7134 }
7135
7136
7137 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7138 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7139 if (err) {
7140 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7141 goto err_vsi_rebuild;
7142 }
7143
7144
7145 if (hw->fdir_prof)
7146 ice_fdir_replay_flows(hw);
7147
7148
7149 ice_fdir_replay_fltrs(pf);
7150
7151 ice_rebuild_arfs(pf);
7152 }
7153
7154 ice_update_pf_netdev_link(pf);
7155
7156
7157 err = ice_send_version(pf);
7158 if (err) {
7159 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7160 err);
7161 goto err_vsi_rebuild;
7162 }
7163
7164 ice_replay_post(hw);
7165
7166
7167 clear_bit(ICE_RESET_FAILED, pf->state);
7168
7169 ice_plug_aux_dev(pf);
7170 return;
7171
7172 err_vsi_rebuild:
7173 err_sched_init_port:
7174 ice_sched_cleanup_all(hw);
7175 err_init_ctrlq:
7176 ice_shutdown_all_ctrlq(hw);
7177 set_bit(ICE_RESET_FAILED, pf->state);
7178 clear_recovery:
7179
7180 set_bit(ICE_NEEDS_RESTART, pf->state);
7181 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7182 }
7183
7184
7185
7186
7187
7188 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
7189 {
7190 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
7191 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
7192 else
7193 return ICE_RXBUF_3072;
7194 }
7195
7196
7197
7198
7199
7200
7201
7202
7203 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7204 {
7205 struct ice_netdev_priv *np = netdev_priv(netdev);
7206 struct ice_vsi *vsi = np->vsi;
7207 struct ice_pf *pf = vsi->back;
7208 u8 count = 0;
7209 int err = 0;
7210
7211 if (new_mtu == (int)netdev->mtu) {
7212 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7213 return 0;
7214 }
7215
7216 if (ice_is_xdp_ena_vsi(vsi)) {
7217 int frame_size = ice_max_xdp_frame_size(vsi);
7218
7219 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7220 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7221 frame_size - ICE_ETH_PKT_HDR_PAD);
7222 return -EINVAL;
7223 }
7224 }
7225
7226
7227 do {
7228 if (ice_is_reset_in_progress(pf->state)) {
7229 count++;
7230 usleep_range(1000, 2000);
7231 } else {
7232 break;
7233 }
7234
7235 } while (count < 100);
7236
7237 if (count == 100) {
7238 netdev_err(netdev, "can't change MTU. Device is busy\n");
7239 return -EBUSY;
7240 }
7241
7242 netdev->mtu = (unsigned int)new_mtu;
7243
7244
7245 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
7246 err = ice_down(vsi);
7247 if (err) {
7248 netdev_err(netdev, "change MTU if_down err %d\n", err);
7249 return err;
7250 }
7251
7252 err = ice_up(vsi);
7253 if (err) {
7254 netdev_err(netdev, "change MTU if_up err %d\n", err);
7255 return err;
7256 }
7257 }
7258
7259 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7260 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7261
7262 return err;
7263 }
7264
7265
7266
7267
7268
7269
7270
7271 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7272 {
7273 struct ice_netdev_priv *np = netdev_priv(netdev);
7274 struct ice_pf *pf = np->vsi->back;
7275
7276 switch (cmd) {
7277 case SIOCGHWTSTAMP:
7278 return ice_ptp_get_ts_config(pf, ifr);
7279 case SIOCSHWTSTAMP:
7280 return ice_ptp_set_ts_config(pf, ifr);
7281 default:
7282 return -EOPNOTSUPP;
7283 }
7284 }
7285
7286
7287
7288
7289
7290 const char *ice_aq_str(enum ice_aq_err aq_err)
7291 {
7292 switch (aq_err) {
7293 case ICE_AQ_RC_OK:
7294 return "OK";
7295 case ICE_AQ_RC_EPERM:
7296 return "ICE_AQ_RC_EPERM";
7297 case ICE_AQ_RC_ENOENT:
7298 return "ICE_AQ_RC_ENOENT";
7299 case ICE_AQ_RC_ENOMEM:
7300 return "ICE_AQ_RC_ENOMEM";
7301 case ICE_AQ_RC_EBUSY:
7302 return "ICE_AQ_RC_EBUSY";
7303 case ICE_AQ_RC_EEXIST:
7304 return "ICE_AQ_RC_EEXIST";
7305 case ICE_AQ_RC_EINVAL:
7306 return "ICE_AQ_RC_EINVAL";
7307 case ICE_AQ_RC_ENOSPC:
7308 return "ICE_AQ_RC_ENOSPC";
7309 case ICE_AQ_RC_ENOSYS:
7310 return "ICE_AQ_RC_ENOSYS";
7311 case ICE_AQ_RC_EMODE:
7312 return "ICE_AQ_RC_EMODE";
7313 case ICE_AQ_RC_ENOSEC:
7314 return "ICE_AQ_RC_ENOSEC";
7315 case ICE_AQ_RC_EBADSIG:
7316 return "ICE_AQ_RC_EBADSIG";
7317 case ICE_AQ_RC_ESVN:
7318 return "ICE_AQ_RC_ESVN";
7319 case ICE_AQ_RC_EBADMAN:
7320 return "ICE_AQ_RC_EBADMAN";
7321 case ICE_AQ_RC_EBADBUF:
7322 return "ICE_AQ_RC_EBADBUF";
7323 }
7324
7325 return "ICE_AQ_RC_UNKNOWN";
7326 }
7327
7328
7329
7330
7331
7332
7333
7334
7335
7336 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7337 {
7338 struct ice_aq_get_set_rss_lut_params params = {};
7339 struct ice_hw *hw = &vsi->back->hw;
7340 int status;
7341
7342 if (!lut)
7343 return -EINVAL;
7344
7345 params.vsi_handle = vsi->idx;
7346 params.lut_size = lut_size;
7347 params.lut_type = vsi->rss_lut_type;
7348 params.lut = lut;
7349
7350 status = ice_aq_set_rss_lut(hw, ¶ms);
7351 if (status)
7352 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7353 status, ice_aq_str(hw->adminq.sq_last_status));
7354
7355 return status;
7356 }
7357
7358
7359
7360
7361
7362
7363
7364
7365 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7366 {
7367 struct ice_hw *hw = &vsi->back->hw;
7368 int status;
7369
7370 if (!seed)
7371 return -EINVAL;
7372
7373 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7374 if (status)
7375 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7376 status, ice_aq_str(hw->adminq.sq_last_status));
7377
7378 return status;
7379 }
7380
7381
7382
7383
7384
7385
7386
7387
7388
7389 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7390 {
7391 struct ice_aq_get_set_rss_lut_params params = {};
7392 struct ice_hw *hw = &vsi->back->hw;
7393 int status;
7394
7395 if (!lut)
7396 return -EINVAL;
7397
7398 params.vsi_handle = vsi->idx;
7399 params.lut_size = lut_size;
7400 params.lut_type = vsi->rss_lut_type;
7401 params.lut = lut;
7402
7403 status = ice_aq_get_rss_lut(hw, ¶ms);
7404 if (status)
7405 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7406 status, ice_aq_str(hw->adminq.sq_last_status));
7407
7408 return status;
7409 }
7410
7411
7412
7413
7414
7415
7416
7417
7418 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7419 {
7420 struct ice_hw *hw = &vsi->back->hw;
7421 int status;
7422
7423 if (!seed)
7424 return -EINVAL;
7425
7426 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7427 if (status)
7428 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7429 status, ice_aq_str(hw->adminq.sq_last_status));
7430
7431 return status;
7432 }
7433
7434
7435
7436
7437
7438
7439
7440
7441
7442
7443
7444
7445 static int
7446 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7447 struct net_device *dev, u32 filter_mask, int nlflags)
7448 {
7449 struct ice_netdev_priv *np = netdev_priv(dev);
7450 struct ice_vsi *vsi = np->vsi;
7451 struct ice_pf *pf = vsi->back;
7452 u16 bmode;
7453
7454 bmode = pf->first_sw->bridge_mode;
7455
7456 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7457 filter_mask, NULL);
7458 }
7459
7460
7461
7462
7463
7464
7465
7466
7467 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7468 {
7469 struct ice_aqc_vsi_props *vsi_props;
7470 struct ice_hw *hw = &vsi->back->hw;
7471 struct ice_vsi_ctx *ctxt;
7472 int ret;
7473
7474 vsi_props = &vsi->info;
7475
7476 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7477 if (!ctxt)
7478 return -ENOMEM;
7479
7480 ctxt->info = vsi->info;
7481
7482 if (bmode == BRIDGE_MODE_VEB)
7483
7484 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7485 else
7486
7487 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7488 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7489
7490 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7491 if (ret) {
7492 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7493 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7494 goto out;
7495 }
7496
7497 vsi_props->sw_flags = ctxt->info.sw_flags;
7498
7499 out:
7500 kfree(ctxt);
7501 return ret;
7502 }
7503
7504
7505
7506
7507
7508
7509
7510
7511
7512
7513
7514
7515
7516 static int
7517 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7518 u16 __always_unused flags,
7519 struct netlink_ext_ack __always_unused *extack)
7520 {
7521 struct ice_netdev_priv *np = netdev_priv(dev);
7522 struct ice_pf *pf = np->vsi->back;
7523 struct nlattr *attr, *br_spec;
7524 struct ice_hw *hw = &pf->hw;
7525 struct ice_sw *pf_sw;
7526 int rem, v, err = 0;
7527
7528 pf_sw = pf->first_sw;
7529
7530 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7531
7532 nla_for_each_nested(attr, br_spec, rem) {
7533 __u16 mode;
7534
7535 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7536 continue;
7537 mode = nla_get_u16(attr);
7538 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7539 return -EINVAL;
7540
7541 if (mode == pf_sw->bridge_mode)
7542 continue;
7543
7544
7545
7546 ice_for_each_vsi(pf, v) {
7547 if (!pf->vsi[v])
7548 continue;
7549 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7550 if (err)
7551 return err;
7552 }
7553
7554 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7555
7556
7557
7558 err = ice_update_sw_rule_bridge_mode(hw);
7559 if (err) {
7560 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7561 mode, err,
7562 ice_aq_str(hw->adminq.sq_last_status));
7563
7564 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7565 return err;
7566 }
7567
7568 pf_sw->bridge_mode = mode;
7569 }
7570
7571 return 0;
7572 }
7573
7574
7575
7576
7577
7578
7579 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7580 {
7581 struct ice_netdev_priv *np = netdev_priv(netdev);
7582 struct ice_tx_ring *tx_ring = NULL;
7583 struct ice_vsi *vsi = np->vsi;
7584 struct ice_pf *pf = vsi->back;
7585 u32 i;
7586
7587 pf->tx_timeout_count++;
7588
7589
7590
7591
7592
7593 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7594 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7595 txqueue);
7596 return;
7597 }
7598
7599
7600 ice_for_each_txq(vsi, i)
7601 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7602 if (txqueue == vsi->tx_rings[i]->q_index) {
7603 tx_ring = vsi->tx_rings[i];
7604 break;
7605 }
7606
7607
7608
7609
7610 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7611 pf->tx_timeout_recovery_level = 1;
7612 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7613 netdev->watchdog_timeo)))
7614 return;
7615
7616 if (tx_ring) {
7617 struct ice_hw *hw = &pf->hw;
7618 u32 head, val = 0;
7619
7620 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7621 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7622
7623 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7624
7625 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7626 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7627 head, tx_ring->next_to_use, val);
7628 }
7629
7630 pf->tx_timeout_last_recovery = jiffies;
7631 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7632 pf->tx_timeout_recovery_level, txqueue);
7633
7634 switch (pf->tx_timeout_recovery_level) {
7635 case 1:
7636 set_bit(ICE_PFR_REQ, pf->state);
7637 break;
7638 case 2:
7639 set_bit(ICE_CORER_REQ, pf->state);
7640 break;
7641 case 3:
7642 set_bit(ICE_GLOBR_REQ, pf->state);
7643 break;
7644 default:
7645 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7646 set_bit(ICE_DOWN, pf->state);
7647 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7648 set_bit(ICE_SERVICE_DIS, pf->state);
7649 break;
7650 }
7651
7652 ice_service_task_schedule(pf);
7653 pf->tx_timeout_recovery_level++;
7654 }
7655
7656
7657
7658
7659
7660
7661
7662 static int
7663 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7664 struct net_device *filter_dev,
7665 struct flow_cls_offload *cls_flower)
7666 {
7667 struct ice_vsi *vsi = np->vsi;
7668
7669 if (cls_flower->common.chain_index)
7670 return -EOPNOTSUPP;
7671
7672 switch (cls_flower->command) {
7673 case FLOW_CLS_REPLACE:
7674 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7675 case FLOW_CLS_DESTROY:
7676 return ice_del_cls_flower(vsi, cls_flower);
7677 default:
7678 return -EINVAL;
7679 }
7680 }
7681
7682
7683
7684
7685
7686
7687
7688 static int
7689 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7690 {
7691 struct ice_netdev_priv *np = cb_priv;
7692
7693 switch (type) {
7694 case TC_SETUP_CLSFLOWER:
7695 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7696 type_data);
7697 default:
7698 return -EOPNOTSUPP;
7699 }
7700 }
7701
7702
7703
7704
7705
7706
7707
7708
7709
7710
7711 static int
7712 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7713 struct tc_mqprio_qopt_offload *mqprio_qopt)
7714 {
7715 u64 sum_max_rate = 0, sum_min_rate = 0;
7716 int non_power_of_2_qcount = 0;
7717 struct ice_pf *pf = vsi->back;
7718 int max_rss_q_cnt = 0;
7719 struct device *dev;
7720 int i, speed;
7721 u8 num_tc;
7722
7723 if (vsi->type != ICE_VSI_PF)
7724 return -EINVAL;
7725
7726 if (mqprio_qopt->qopt.offset[0] != 0 ||
7727 mqprio_qopt->qopt.num_tc < 1 ||
7728 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7729 return -EINVAL;
7730
7731 dev = ice_pf_to_dev(pf);
7732 vsi->ch_rss_size = 0;
7733 num_tc = mqprio_qopt->qopt.num_tc;
7734
7735 for (i = 0; num_tc; i++) {
7736 int qcount = mqprio_qopt->qopt.count[i];
7737 u64 max_rate, min_rate, rem;
7738
7739 if (!qcount)
7740 return -EINVAL;
7741
7742 if (is_power_of_2(qcount)) {
7743 if (non_power_of_2_qcount &&
7744 qcount > non_power_of_2_qcount) {
7745 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7746 qcount, non_power_of_2_qcount);
7747 return -EINVAL;
7748 }
7749 if (qcount > max_rss_q_cnt)
7750 max_rss_q_cnt = qcount;
7751 } else {
7752 if (non_power_of_2_qcount &&
7753 qcount != non_power_of_2_qcount) {
7754 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
7755 qcount, non_power_of_2_qcount);
7756 return -EINVAL;
7757 }
7758 if (qcount < max_rss_q_cnt) {
7759 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
7760 qcount, max_rss_q_cnt);
7761 return -EINVAL;
7762 }
7763 max_rss_q_cnt = qcount;
7764 non_power_of_2_qcount = qcount;
7765 }
7766
7767
7768
7769
7770
7771
7772 max_rate = mqprio_qopt->max_rate[i];
7773 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
7774 sum_max_rate += max_rate;
7775
7776
7777 min_rate = mqprio_qopt->min_rate[i];
7778 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
7779 sum_min_rate += min_rate;
7780
7781 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
7782 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
7783 min_rate, ICE_MIN_BW_LIMIT);
7784 return -EINVAL;
7785 }
7786
7787 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
7788 if (rem) {
7789 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
7790 i, ICE_MIN_BW_LIMIT);
7791 return -EINVAL;
7792 }
7793
7794 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
7795 if (rem) {
7796 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
7797 i, ICE_MIN_BW_LIMIT);
7798 return -EINVAL;
7799 }
7800
7801
7802
7803
7804
7805 if (max_rate && min_rate > max_rate) {
7806 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
7807 min_rate, max_rate);
7808 return -EINVAL;
7809 }
7810
7811 if (i >= mqprio_qopt->qopt.num_tc - 1)
7812 break;
7813 if (mqprio_qopt->qopt.offset[i + 1] !=
7814 (mqprio_qopt->qopt.offset[i] + qcount))
7815 return -EINVAL;
7816 }
7817 if (vsi->num_rxq <
7818 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7819 return -EINVAL;
7820 if (vsi->num_txq <
7821 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7822 return -EINVAL;
7823
7824 speed = ice_get_link_speed_kbps(vsi);
7825 if (sum_max_rate && sum_max_rate > (u64)speed) {
7826 dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
7827 sum_max_rate, speed);
7828 return -EINVAL;
7829 }
7830 if (sum_min_rate && sum_min_rate > (u64)speed) {
7831 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
7832 sum_min_rate, speed);
7833 return -EINVAL;
7834 }
7835
7836
7837 vsi->ch_rss_size = max_rss_q_cnt;
7838
7839 return 0;
7840 }
7841
7842
7843
7844
7845
7846
7847 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
7848 {
7849 struct device *dev = ice_pf_to_dev(pf);
7850 bool added = false;
7851 struct ice_hw *hw;
7852 int flow;
7853
7854 if (!(vsi->num_gfltr || vsi->num_bfltr))
7855 return -EINVAL;
7856
7857 hw = &pf->hw;
7858 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
7859 struct ice_fd_hw_prof *prof;
7860 int tun, status;
7861 u64 entry_h;
7862
7863 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
7864 hw->fdir_prof[flow]->cnt))
7865 continue;
7866
7867 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
7868 enum ice_flow_priority prio;
7869 u64 prof_id;
7870
7871
7872 prio = ICE_FLOW_PRIO_NORMAL;
7873 prof = hw->fdir_prof[flow];
7874 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
7875 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
7876 prof->vsi_h[0], vsi->idx,
7877 prio, prof->fdir_seg[tun],
7878 &entry_h);
7879 if (status) {
7880 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
7881 vsi->idx, flow);
7882 continue;
7883 }
7884
7885 prof->entry_h[prof->cnt][tun] = entry_h;
7886 }
7887
7888
7889 prof->vsi_h[prof->cnt] = vsi->idx;
7890 prof->cnt++;
7891
7892 added = true;
7893 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
7894 flow);
7895 }
7896
7897 if (!added)
7898 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
7899
7900 return 0;
7901 }
7902
7903
7904
7905
7906
7907
7908
7909
7910
7911 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
7912 {
7913 struct device *dev = ice_pf_to_dev(pf);
7914 struct ice_vsi *vsi;
7915
7916 if (ch->type != ICE_VSI_CHNL) {
7917 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
7918 return -EINVAL;
7919 }
7920
7921 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
7922 if (!vsi || vsi->type != ICE_VSI_CHNL) {
7923 dev_err(dev, "create chnl VSI failure\n");
7924 return -EINVAL;
7925 }
7926
7927 ice_add_vsi_to_fdir(pf, vsi);
7928
7929 ch->sw_id = sw_id;
7930 ch->vsi_num = vsi->vsi_num;
7931 ch->info.mapping_flags = vsi->info.mapping_flags;
7932 ch->ch_vsi = vsi;
7933
7934 vsi->ch = ch;
7935
7936 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
7937 sizeof(vsi->info.q_mapping));
7938 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
7939 sizeof(vsi->info.tc_mapping));
7940
7941 return 0;
7942 }
7943
7944
7945
7946
7947
7948
7949
7950
7951 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
7952 {
7953 int i;
7954
7955 for (i = 0; i < ch->num_txq; i++) {
7956 struct ice_q_vector *tx_q_vector, *rx_q_vector;
7957 struct ice_ring_container *rc;
7958 struct ice_tx_ring *tx_ring;
7959 struct ice_rx_ring *rx_ring;
7960
7961 tx_ring = vsi->tx_rings[ch->base_q + i];
7962 rx_ring = vsi->rx_rings[ch->base_q + i];
7963 if (!tx_ring || !rx_ring)
7964 continue;
7965
7966
7967 tx_ring->ch = ch;
7968 rx_ring->ch = ch;
7969
7970
7971 tx_q_vector = tx_ring->q_vector;
7972 rx_q_vector = rx_ring->q_vector;
7973 if (!tx_q_vector && !rx_q_vector)
7974 continue;
7975
7976 if (tx_q_vector) {
7977 tx_q_vector->ch = ch;
7978
7979 rc = &tx_q_vector->tx;
7980 if (!ITR_IS_DYNAMIC(rc))
7981 ice_write_itr(rc, rc->itr_setting);
7982 }
7983 if (rx_q_vector) {
7984 rx_q_vector->ch = ch;
7985
7986 rc = &rx_q_vector->rx;
7987 if (!ITR_IS_DYNAMIC(rc))
7988 ice_write_itr(rc, rc->itr_setting);
7989 }
7990 }
7991
7992
7993
7994
7995
7996 if (ch->num_txq || ch->num_rxq)
7997 ice_flush(&vsi->back->hw);
7998 }
7999
8000
8001
8002
8003
8004
8005
8006
8007
8008 static void
8009 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8010 {
8011
8012
8013
8014 ice_chnl_cfg_res(vsi, ch);
8015 }
8016
8017
8018
8019
8020
8021
8022
8023
8024
8025
8026
8027
8028 static int
8029 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8030 struct ice_channel *ch, u16 sw_id, u8 type)
8031 {
8032 struct device *dev = ice_pf_to_dev(pf);
8033 int ret;
8034
8035 ch->base_q = vsi->next_base_q;
8036 ch->type = type;
8037
8038 ret = ice_add_channel(pf, sw_id, ch);
8039 if (ret) {
8040 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8041 return ret;
8042 }
8043
8044
8045 ice_cfg_chnl_all_res(vsi, ch);
8046
8047
8048
8049
8050 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8051 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8052 ch->num_rxq);
8053
8054 return 0;
8055 }
8056
8057
8058
8059
8060
8061
8062
8063
8064
8065
8066 static bool
8067 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8068 struct ice_channel *ch)
8069 {
8070 struct device *dev = ice_pf_to_dev(pf);
8071 u16 sw_id;
8072 int ret;
8073
8074 if (vsi->type != ICE_VSI_PF) {
8075 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8076 return false;
8077 }
8078
8079 sw_id = pf->first_sw->sw_id;
8080
8081
8082 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8083 if (ret) {
8084 dev_err(dev, "failed to setup hw_channel\n");
8085 return false;
8086 }
8087 dev_dbg(dev, "successfully created channel()\n");
8088
8089 return ch->ch_vsi ? true : false;
8090 }
8091
8092
8093
8094
8095
8096
8097
8098 static int
8099 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8100 {
8101 int err;
8102
8103 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8104 if (err)
8105 return err;
8106
8107 return ice_set_max_bw_limit(vsi, max_tx_rate);
8108 }
8109
8110
8111
8112
8113
8114
8115
8116
8117
8118 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8119 {
8120 struct ice_pf *pf = vsi->back;
8121 struct device *dev;
8122
8123 if (!ch)
8124 return -EINVAL;
8125
8126 dev = ice_pf_to_dev(pf);
8127 if (!ch->num_txq || !ch->num_rxq) {
8128 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8129 return -EINVAL;
8130 }
8131
8132 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8133 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8134 vsi->cnt_q_avail, ch->num_txq);
8135 return -EINVAL;
8136 }
8137
8138 if (!ice_setup_channel(pf, vsi, ch)) {
8139 dev_info(dev, "Failed to setup channel\n");
8140 return -EINVAL;
8141 }
8142
8143 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8144 int ret;
8145
8146 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8147 ch->min_tx_rate);
8148 if (ret)
8149 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8150 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8151 else
8152 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8153 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8154 }
8155
8156 vsi->cnt_q_avail -= ch->num_txq;
8157
8158 return 0;
8159 }
8160
8161
8162
8163
8164
8165
8166
8167
8168 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8169 {
8170 struct ice_tc_flower_fltr *fltr;
8171 struct hlist_node *node;
8172
8173
8174 hlist_for_each_entry_safe(fltr, node,
8175 &pf->tc_flower_fltr_list,
8176 tc_flower_node) {
8177 struct ice_rule_query_data rule;
8178 int status;
8179
8180
8181 if (!ice_is_chnl_fltr(fltr))
8182 continue;
8183
8184 rule.rid = fltr->rid;
8185 rule.rule_id = fltr->rule_id;
8186 rule.vsi_handle = fltr->dest_id;
8187 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8188 if (status) {
8189 if (status == -ENOENT)
8190 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8191 rule.rule_id);
8192 else
8193 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8194 status);
8195 } else if (fltr->dest_vsi) {
8196
8197 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8198 u32 flags = fltr->flags;
8199
8200 fltr->dest_vsi->num_chnl_fltr--;
8201 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8202 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8203 pf->num_dmac_chnl_fltrs--;
8204 }
8205 }
8206
8207 hlist_del(&fltr->tc_flower_node);
8208 kfree(fltr);
8209 }
8210 }
8211
8212
8213
8214
8215
8216
8217
8218
8219 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8220 {
8221 struct ice_channel *ch, *ch_tmp;
8222 struct ice_pf *pf = vsi->back;
8223 int i;
8224
8225
8226 if (rem_fltr)
8227 ice_rem_all_chnl_fltrs(pf);
8228
8229
8230 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8231 struct ice_hw *hw = &pf->hw;
8232
8233 mutex_lock(&hw->fdir_fltr_lock);
8234 ice_fdir_del_all_fltrs(vsi);
8235 mutex_unlock(&hw->fdir_fltr_lock);
8236 }
8237
8238
8239 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8240 struct ice_vsi *ch_vsi;
8241
8242 list_del(&ch->list);
8243 ch_vsi = ch->ch_vsi;
8244 if (!ch_vsi) {
8245 kfree(ch);
8246 continue;
8247 }
8248
8249
8250 for (i = 0; i < ch->num_rxq; i++) {
8251 struct ice_tx_ring *tx_ring;
8252 struct ice_rx_ring *rx_ring;
8253
8254 tx_ring = vsi->tx_rings[ch->base_q + i];
8255 rx_ring = vsi->rx_rings[ch->base_q + i];
8256 if (tx_ring) {
8257 tx_ring->ch = NULL;
8258 if (tx_ring->q_vector)
8259 tx_ring->q_vector->ch = NULL;
8260 }
8261 if (rx_ring) {
8262 rx_ring->ch = NULL;
8263 if (rx_ring->q_vector)
8264 rx_ring->q_vector->ch = NULL;
8265 }
8266 }
8267
8268
8269 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8270
8271
8272 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8273
8274
8275 ice_vsi_delete(ch->ch_vsi);
8276
8277
8278 ice_vsi_clear(ch->ch_vsi);
8279
8280
8281 kfree(ch);
8282 }
8283
8284
8285 ice_for_each_chnl_tc(i)
8286 vsi->tc_map_vsi[i] = NULL;
8287
8288
8289 vsi->all_enatc = 0;
8290 vsi->all_numtc = 0;
8291 }
8292
8293
8294
8295
8296
8297
8298
8299 static int ice_rebuild_channels(struct ice_pf *pf)
8300 {
8301 struct device *dev = ice_pf_to_dev(pf);
8302 struct ice_vsi *main_vsi;
8303 bool rem_adv_fltr = true;
8304 struct ice_channel *ch;
8305 struct ice_vsi *vsi;
8306 int tc_idx = 1;
8307 int i, err;
8308
8309 main_vsi = ice_get_main_vsi(pf);
8310 if (!main_vsi)
8311 return 0;
8312
8313 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8314 main_vsi->old_numtc == 1)
8315 return 0;
8316
8317
8318
8319
8320 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8321 if (err) {
8322 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8323 main_vsi->old_ena_tc, main_vsi->vsi_num);
8324 return err;
8325 }
8326
8327
8328 ice_for_each_vsi(pf, i) {
8329 enum ice_vsi_type type;
8330
8331 vsi = pf->vsi[i];
8332 if (!vsi || vsi->type != ICE_VSI_CHNL)
8333 continue;
8334
8335 type = vsi->type;
8336
8337
8338 err = ice_vsi_rebuild(vsi, true);
8339 if (err) {
8340 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8341 ice_vsi_type_str(type), vsi->idx, err);
8342 goto cleanup;
8343 }
8344
8345
8346
8347
8348 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8349
8350
8351 err = ice_replay_vsi(&pf->hw, vsi->idx);
8352 if (err) {
8353 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8354 ice_vsi_type_str(type), err, vsi->idx);
8355 rem_adv_fltr = false;
8356 goto cleanup;
8357 }
8358 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8359 ice_vsi_type_str(type), vsi->idx);
8360
8361
8362
8363
8364 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8365 }
8366
8367
8368
8369
8370 list_for_each_entry(ch, &main_vsi->ch_list, list) {
8371 struct ice_vsi *ch_vsi;
8372
8373 ch_vsi = ch->ch_vsi;
8374 if (!ch_vsi)
8375 continue;
8376
8377
8378 ice_cfg_chnl_all_res(main_vsi, ch);
8379
8380
8381 if (!ch->max_tx_rate && !ch->min_tx_rate)
8382 continue;
8383
8384 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8385 ch->min_tx_rate);
8386 if (err)
8387 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8388 err, ch->max_tx_rate, ch->min_tx_rate,
8389 ch_vsi->vsi_num);
8390 else
8391 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8392 ch->max_tx_rate, ch->min_tx_rate,
8393 ch_vsi->vsi_num);
8394 }
8395
8396
8397 if (main_vsi->ch_rss_size)
8398 ice_vsi_cfg_rss_lut_key(main_vsi);
8399
8400 return 0;
8401
8402 cleanup:
8403 ice_remove_q_channels(main_vsi, rem_adv_fltr);
8404 return err;
8405 }
8406
8407
8408
8409
8410
8411
8412
8413 static int ice_create_q_channels(struct ice_vsi *vsi)
8414 {
8415 struct ice_pf *pf = vsi->back;
8416 struct ice_channel *ch;
8417 int ret = 0, i;
8418
8419 ice_for_each_chnl_tc(i) {
8420 if (!(vsi->all_enatc & BIT(i)))
8421 continue;
8422
8423 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8424 if (!ch) {
8425 ret = -ENOMEM;
8426 goto err_free;
8427 }
8428 INIT_LIST_HEAD(&ch->list);
8429 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8430 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8431 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8432 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8433 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8434
8435
8436 if (ch->max_tx_rate)
8437 ch->max_tx_rate = div_u64(ch->max_tx_rate,
8438 ICE_BW_KBPS_DIVISOR);
8439 if (ch->min_tx_rate)
8440 ch->min_tx_rate = div_u64(ch->min_tx_rate,
8441 ICE_BW_KBPS_DIVISOR);
8442
8443 ret = ice_create_q_channel(vsi, ch);
8444 if (ret) {
8445 dev_err(ice_pf_to_dev(pf),
8446 "failed creating channel TC:%d\n", i);
8447 kfree(ch);
8448 goto err_free;
8449 }
8450 list_add_tail(&ch->list, &vsi->ch_list);
8451 vsi->tc_map_vsi[i] = ch->ch_vsi;
8452 dev_dbg(ice_pf_to_dev(pf),
8453 "successfully created channel: VSI %pK\n", ch->ch_vsi);
8454 }
8455 return 0;
8456
8457 err_free:
8458 ice_remove_q_channels(vsi, false);
8459
8460 return ret;
8461 }
8462
8463
8464
8465
8466
8467
8468 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8469 {
8470 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8471 struct ice_netdev_priv *np = netdev_priv(netdev);
8472 struct ice_vsi *vsi = np->vsi;
8473 struct ice_pf *pf = vsi->back;
8474 u16 mode, ena_tc_qdisc = 0;
8475 int cur_txq, cur_rxq;
8476 u8 hw = 0, num_tcf;
8477 struct device *dev;
8478 int ret, i;
8479
8480 dev = ice_pf_to_dev(pf);
8481 num_tcf = mqprio_qopt->qopt.num_tc;
8482 hw = mqprio_qopt->qopt.hw;
8483 mode = mqprio_qopt->mode;
8484 if (!hw) {
8485 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8486 vsi->ch_rss_size = 0;
8487 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8488 goto config_tcf;
8489 }
8490
8491
8492 for (i = 0; i < num_tcf; i++)
8493 ena_tc_qdisc |= BIT(i);
8494
8495 switch (mode) {
8496 case TC_MQPRIO_MODE_CHANNEL:
8497
8498 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8499 if (ret) {
8500 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8501 ret);
8502 return ret;
8503 }
8504 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8505 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8506
8507
8508
8509
8510 if (vsi->netdev->features & NETIF_F_HW_TC)
8511 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8512 break;
8513 default:
8514 return -EINVAL;
8515 }
8516
8517 config_tcf:
8518
8519
8520 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8521 mode != TC_MQPRIO_MODE_CHANNEL)
8522 return 0;
8523
8524
8525 ice_dis_vsi(vsi, true);
8526
8527 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8528 ice_remove_q_channels(vsi, true);
8529
8530 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8531 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8532 num_online_cpus());
8533 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8534 num_online_cpus());
8535 } else {
8536
8537 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8538
8539 for (i = 0; i < num_tcf; i++) {
8540 if (!(ena_tc_qdisc & BIT(i)))
8541 continue;
8542
8543 offset = vsi->mqprio_qopt.qopt.offset[i];
8544 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8545 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8546 }
8547 vsi->req_txq = offset + qcount_tx;
8548 vsi->req_rxq = offset + qcount_rx;
8549
8550
8551
8552
8553
8554 vsi->orig_rss_size = vsi->rss_size;
8555 }
8556
8557
8558
8559
8560 cur_txq = vsi->num_txq;
8561 cur_rxq = vsi->num_rxq;
8562
8563
8564 ret = ice_vsi_rebuild(vsi, false);
8565 if (ret) {
8566
8567 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8568 vsi->req_txq = cur_txq;
8569 vsi->req_rxq = cur_rxq;
8570 clear_bit(ICE_RESET_FAILED, pf->state);
8571 if (ice_vsi_rebuild(vsi, false)) {
8572 dev_err(dev, "Rebuild of main VSI failed again\n");
8573 return ret;
8574 }
8575 }
8576
8577 vsi->all_numtc = num_tcf;
8578 vsi->all_enatc = ena_tc_qdisc;
8579 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8580 if (ret) {
8581 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8582 vsi->vsi_num);
8583 goto exit;
8584 }
8585
8586 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8587 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8588 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8589
8590
8591 if (max_tx_rate || min_tx_rate) {
8592
8593 if (max_tx_rate)
8594 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8595 if (min_tx_rate)
8596 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8597
8598 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8599 if (!ret) {
8600 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8601 max_tx_rate, min_tx_rate, vsi->vsi_num);
8602 } else {
8603 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8604 max_tx_rate, min_tx_rate, vsi->vsi_num);
8605 goto exit;
8606 }
8607 }
8608 ret = ice_create_q_channels(vsi);
8609 if (ret) {
8610 netdev_err(netdev, "failed configuring queue channels\n");
8611 goto exit;
8612 } else {
8613 netdev_dbg(netdev, "successfully configured channels\n");
8614 }
8615 }
8616
8617 if (vsi->ch_rss_size)
8618 ice_vsi_cfg_rss_lut_key(vsi);
8619
8620 exit:
8621
8622 if (ret) {
8623 vsi->all_numtc = 0;
8624 vsi->all_enatc = 0;
8625 }
8626
8627 ice_ena_vsi(vsi, true);
8628
8629 return ret;
8630 }
8631
8632 static LIST_HEAD(ice_block_cb_list);
8633
8634 static int
8635 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8636 void *type_data)
8637 {
8638 struct ice_netdev_priv *np = netdev_priv(netdev);
8639 struct ice_pf *pf = np->vsi->back;
8640 int err;
8641
8642 switch (type) {
8643 case TC_SETUP_BLOCK:
8644 return flow_block_cb_setup_simple(type_data,
8645 &ice_block_cb_list,
8646 ice_setup_tc_block_cb,
8647 np, np, true);
8648 case TC_SETUP_QDISC_MQPRIO:
8649
8650 mutex_lock(&pf->tc_mutex);
8651 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8652 mutex_unlock(&pf->tc_mutex);
8653 return err;
8654 default:
8655 return -EOPNOTSUPP;
8656 }
8657 return -EOPNOTSUPP;
8658 }
8659
8660 static struct ice_indr_block_priv *
8661 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8662 struct net_device *netdev)
8663 {
8664 struct ice_indr_block_priv *cb_priv;
8665
8666 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8667 if (!cb_priv->netdev)
8668 return NULL;
8669 if (cb_priv->netdev == netdev)
8670 return cb_priv;
8671 }
8672 return NULL;
8673 }
8674
8675 static int
8676 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8677 void *indr_priv)
8678 {
8679 struct ice_indr_block_priv *priv = indr_priv;
8680 struct ice_netdev_priv *np = priv->np;
8681
8682 switch (type) {
8683 case TC_SETUP_CLSFLOWER:
8684 return ice_setup_tc_cls_flower(np, priv->netdev,
8685 (struct flow_cls_offload *)
8686 type_data);
8687 default:
8688 return -EOPNOTSUPP;
8689 }
8690 }
8691
8692 static int
8693 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8694 struct ice_netdev_priv *np,
8695 struct flow_block_offload *f, void *data,
8696 void (*cleanup)(struct flow_block_cb *block_cb))
8697 {
8698 struct ice_indr_block_priv *indr_priv;
8699 struct flow_block_cb *block_cb;
8700
8701 if (!ice_is_tunnel_supported(netdev) &&
8702 !(is_vlan_dev(netdev) &&
8703 vlan_dev_real_dev(netdev) == np->vsi->netdev))
8704 return -EOPNOTSUPP;
8705
8706 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8707 return -EOPNOTSUPP;
8708
8709 switch (f->command) {
8710 case FLOW_BLOCK_BIND:
8711 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8712 if (indr_priv)
8713 return -EEXIST;
8714
8715 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8716 if (!indr_priv)
8717 return -ENOMEM;
8718
8719 indr_priv->netdev = netdev;
8720 indr_priv->np = np;
8721 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8722
8723 block_cb =
8724 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
8725 indr_priv, indr_priv,
8726 ice_rep_indr_tc_block_unbind,
8727 f, netdev, sch, data, np,
8728 cleanup);
8729
8730 if (IS_ERR(block_cb)) {
8731 list_del(&indr_priv->list);
8732 kfree(indr_priv);
8733 return PTR_ERR(block_cb);
8734 }
8735 flow_block_cb_add(block_cb, f);
8736 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
8737 break;
8738 case FLOW_BLOCK_UNBIND:
8739 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8740 if (!indr_priv)
8741 return -ENOENT;
8742
8743 block_cb = flow_block_cb_lookup(f->block,
8744 ice_indr_setup_block_cb,
8745 indr_priv);
8746 if (!block_cb)
8747 return -ENOENT;
8748
8749 flow_indr_block_cb_remove(block_cb, f);
8750
8751 list_del(&block_cb->driver_list);
8752 break;
8753 default:
8754 return -EOPNOTSUPP;
8755 }
8756 return 0;
8757 }
8758
8759 static int
8760 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
8761 void *cb_priv, enum tc_setup_type type, void *type_data,
8762 void *data,
8763 void (*cleanup)(struct flow_block_cb *block_cb))
8764 {
8765 switch (type) {
8766 case TC_SETUP_BLOCK:
8767 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
8768 data, cleanup);
8769
8770 default:
8771 return -EOPNOTSUPP;
8772 }
8773 }
8774
8775
8776
8777
8778
8779
8780
8781
8782
8783
8784
8785
8786
8787 int ice_open(struct net_device *netdev)
8788 {
8789 struct ice_netdev_priv *np = netdev_priv(netdev);
8790 struct ice_pf *pf = np->vsi->back;
8791
8792 if (ice_is_reset_in_progress(pf->state)) {
8793 netdev_err(netdev, "can't open net device while reset is in progress");
8794 return -EBUSY;
8795 }
8796
8797 return ice_open_internal(netdev);
8798 }
8799
8800
8801
8802
8803
8804
8805
8806
8807
8808
8809 int ice_open_internal(struct net_device *netdev)
8810 {
8811 struct ice_netdev_priv *np = netdev_priv(netdev);
8812 struct ice_vsi *vsi = np->vsi;
8813 struct ice_pf *pf = vsi->back;
8814 struct ice_port_info *pi;
8815 int err;
8816
8817 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
8818 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
8819 return -EIO;
8820 }
8821
8822 netif_carrier_off(netdev);
8823
8824 pi = vsi->port_info;
8825 err = ice_update_link_info(pi);
8826 if (err) {
8827 netdev_err(netdev, "Failed to get link info, error %d\n", err);
8828 return err;
8829 }
8830
8831 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
8832
8833
8834 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
8835 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
8836 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
8837 err = ice_init_phy_user_cfg(pi);
8838 if (err) {
8839 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
8840 err);
8841 return err;
8842 }
8843 }
8844
8845 err = ice_configure_phy(vsi);
8846 if (err) {
8847 netdev_err(netdev, "Failed to set physical link up, error %d\n",
8848 err);
8849 return err;
8850 }
8851 } else {
8852 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
8853 ice_set_link(vsi, false);
8854 }
8855
8856 err = ice_vsi_open(vsi);
8857 if (err)
8858 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
8859 vsi->vsi_num, vsi->vsw->sw_id);
8860
8861
8862 udp_tunnel_get_rx_info(netdev);
8863
8864 return err;
8865 }
8866
8867
8868
8869
8870
8871
8872
8873
8874
8875
8876
8877 int ice_stop(struct net_device *netdev)
8878 {
8879 struct ice_netdev_priv *np = netdev_priv(netdev);
8880 struct ice_vsi *vsi = np->vsi;
8881 struct ice_pf *pf = vsi->back;
8882
8883 if (ice_is_reset_in_progress(pf->state)) {
8884 netdev_err(netdev, "can't stop net device while reset is in progress");
8885 return -EBUSY;
8886 }
8887
8888 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
8889 int link_err = ice_force_phys_link_state(vsi, false);
8890
8891 if (link_err) {
8892 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
8893 vsi->vsi_num, link_err);
8894 return -EIO;
8895 }
8896 }
8897
8898 ice_vsi_close(vsi);
8899
8900 return 0;
8901 }
8902
8903
8904
8905
8906
8907
8908
8909 static netdev_features_t
8910 ice_features_check(struct sk_buff *skb,
8911 struct net_device __always_unused *netdev,
8912 netdev_features_t features)
8913 {
8914 bool gso = skb_is_gso(skb);
8915 size_t len;
8916
8917
8918
8919
8920
8921 if (skb->ip_summed != CHECKSUM_PARTIAL)
8922 return features;
8923
8924
8925
8926
8927 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
8928 features &= ~NETIF_F_GSO_MASK;
8929
8930 len = skb_network_offset(skb);
8931 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
8932 goto out_rm_features;
8933
8934 len = skb_network_header_len(skb);
8935 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
8936 goto out_rm_features;
8937
8938 if (skb->encapsulation) {
8939
8940
8941
8942
8943
8944 if (gso && (skb_shinfo(skb)->gso_type &
8945 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
8946 len = skb_inner_network_header(skb) -
8947 skb_transport_header(skb);
8948 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
8949 goto out_rm_features;
8950 }
8951
8952 len = skb_inner_network_header_len(skb);
8953 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
8954 goto out_rm_features;
8955 }
8956
8957 return features;
8958 out_rm_features:
8959 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
8960 }
8961
8962 static const struct net_device_ops ice_netdev_safe_mode_ops = {
8963 .ndo_open = ice_open,
8964 .ndo_stop = ice_stop,
8965 .ndo_start_xmit = ice_start_xmit,
8966 .ndo_set_mac_address = ice_set_mac_address,
8967 .ndo_validate_addr = eth_validate_addr,
8968 .ndo_change_mtu = ice_change_mtu,
8969 .ndo_get_stats64 = ice_get_stats64,
8970 .ndo_tx_timeout = ice_tx_timeout,
8971 .ndo_bpf = ice_xdp_safe_mode,
8972 };
8973
8974 static const struct net_device_ops ice_netdev_ops = {
8975 .ndo_open = ice_open,
8976 .ndo_stop = ice_stop,
8977 .ndo_start_xmit = ice_start_xmit,
8978 .ndo_select_queue = ice_select_queue,
8979 .ndo_features_check = ice_features_check,
8980 .ndo_fix_features = ice_fix_features,
8981 .ndo_set_rx_mode = ice_set_rx_mode,
8982 .ndo_set_mac_address = ice_set_mac_address,
8983 .ndo_validate_addr = eth_validate_addr,
8984 .ndo_change_mtu = ice_change_mtu,
8985 .ndo_get_stats64 = ice_get_stats64,
8986 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
8987 .ndo_eth_ioctl = ice_eth_ioctl,
8988 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
8989 .ndo_set_vf_mac = ice_set_vf_mac,
8990 .ndo_get_vf_config = ice_get_vf_cfg,
8991 .ndo_set_vf_trust = ice_set_vf_trust,
8992 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
8993 .ndo_set_vf_link_state = ice_set_vf_link_state,
8994 .ndo_get_vf_stats = ice_get_vf_stats,
8995 .ndo_set_vf_rate = ice_set_vf_bw,
8996 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
8997 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
8998 .ndo_setup_tc = ice_setup_tc,
8999 .ndo_set_features = ice_set_features,
9000 .ndo_bridge_getlink = ice_bridge_getlink,
9001 .ndo_bridge_setlink = ice_bridge_setlink,
9002 .ndo_fdb_add = ice_fdb_add,
9003 .ndo_fdb_del = ice_fdb_del,
9004 #ifdef CONFIG_RFS_ACCEL
9005 .ndo_rx_flow_steer = ice_rx_flow_steer,
9006 #endif
9007 .ndo_tx_timeout = ice_tx_timeout,
9008 .ndo_bpf = ice_xdp,
9009 .ndo_xdp_xmit = ice_xdp_xmit,
9010 .ndo_xsk_wakeup = ice_xsk_wakeup,
9011 .ndo_get_devlink_port = ice_get_devlink_port,
9012 };