0001
0002
0003
0004 #include "ice.h"
0005 #include "ice_lib.h"
0006 #include "ice_eswitch.h"
0007 #include "ice_fltr.h"
0008 #include "ice_repr.h"
0009 #include "ice_devlink.h"
0010 #include "ice_tc_lib.h"
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 int
0022 ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
0023 {
0024 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
0025 struct ice_adv_rule_info rule_info = { 0 };
0026 struct ice_adv_lkup_elem *list;
0027 struct ice_hw *hw = &pf->hw;
0028 const u16 lkups_cnt = 1;
0029 int err;
0030
0031 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
0032 if (!list)
0033 return -ENOMEM;
0034
0035 list[0].type = ICE_MAC_OFOS;
0036 ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
0037 eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
0038
0039 rule_info.sw_act.flag |= ICE_FLTR_TX;
0040 rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
0041 rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
0042 rule_info.rx = false;
0043 rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
0044 ctrl_vsi->rxq_map[vf->vf_id];
0045 rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
0046 rule_info.flags_info.act_valid = true;
0047 rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
0048
0049 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
0050 vf->repr->mac_rule);
0051 if (err)
0052 dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
0053 vf->vf_id);
0054 else
0055 vf->repr->rule_added = true;
0056
0057 kfree(list);
0058 return err;
0059 }
0060
0061
0062
0063
0064
0065
0066
0067 void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
0068 {
0069 int err;
0070
0071 if (!ice_is_switchdev_running(vf->pf))
0072 return;
0073
0074 if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
0075 err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
0076 vf->hw_lan_addr.addr);
0077 if (err) {
0078 dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
0079 vf->hw_lan_addr.addr, vf->vf_id, err);
0080 return;
0081 }
0082 vf->num_mac++;
0083
0084 ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
0085 }
0086 }
0087
0088
0089
0090
0091
0092
0093
0094
0095 void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
0096 {
0097 if (!ice_is_switchdev_running(vf->pf))
0098 return;
0099
0100 if (!vf->repr->rule_added)
0101 return;
0102
0103 ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
0104 vf->repr->rule_added = false;
0105 }
0106
0107
0108
0109
0110
0111
0112
0113
0114 static int ice_eswitch_setup_env(struct ice_pf *pf)
0115 {
0116 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
0117 struct net_device *uplink_netdev = uplink_vsi->netdev;
0118 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
0119 struct ice_vsi_vlan_ops *vlan_ops;
0120 bool rule_added = false;
0121
0122 vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
0123 if (vlan_ops->dis_stripping(ctrl_vsi))
0124 return -ENODEV;
0125
0126 ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
0127
0128 netif_addr_lock_bh(uplink_netdev);
0129 __dev_uc_unsync(uplink_netdev, NULL);
0130 __dev_mc_unsync(uplink_netdev, NULL);
0131 netif_addr_unlock_bh(uplink_netdev);
0132
0133 if (ice_vsi_add_vlan_zero(uplink_vsi))
0134 goto err_def_rx;
0135
0136 if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
0137 if (ice_set_dflt_vsi(uplink_vsi))
0138 goto err_def_rx;
0139 rule_added = true;
0140 }
0141
0142 if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
0143 goto err_override_uplink;
0144
0145 if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
0146 goto err_override_control;
0147
0148 return 0;
0149
0150 err_override_control:
0151 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
0152 err_override_uplink:
0153 if (rule_added)
0154 ice_clear_dflt_vsi(uplink_vsi);
0155 err_def_rx:
0156 ice_fltr_add_mac_and_broadcast(uplink_vsi,
0157 uplink_vsi->port_info->mac.perm_addr,
0158 ICE_FWD_TO_VSI);
0159 return -ENODEV;
0160 }
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
0174 {
0175 struct ice_vsi *vsi = pf->switchdev.control_vsi;
0176 int q_id;
0177
0178 ice_for_each_txq(vsi, q_id) {
0179 struct ice_q_vector *q_vector;
0180 struct ice_tx_ring *tx_ring;
0181 struct ice_rx_ring *rx_ring;
0182 struct ice_repr *repr;
0183 struct ice_vf *vf;
0184
0185 vf = ice_get_vf_by_id(pf, q_id);
0186 if (WARN_ON(!vf))
0187 continue;
0188
0189 repr = vf->repr;
0190 q_vector = repr->q_vector;
0191 tx_ring = vsi->tx_rings[q_id];
0192 rx_ring = vsi->rx_rings[q_id];
0193
0194 q_vector->vsi = vsi;
0195 q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
0196
0197 q_vector->num_ring_tx = 1;
0198 q_vector->tx.tx_ring = tx_ring;
0199 tx_ring->q_vector = q_vector;
0200 tx_ring->next = NULL;
0201 tx_ring->netdev = repr->netdev;
0202
0203
0204
0205 tx_ring->q_index = 0;
0206
0207 q_vector->num_ring_rx = 1;
0208 q_vector->rx.rx_ring = rx_ring;
0209 rx_ring->q_vector = q_vector;
0210 rx_ring->next = NULL;
0211 rx_ring->netdev = repr->netdev;
0212
0213 ice_put_vf(vf);
0214 }
0215 }
0216
0217
0218
0219
0220
0221
0222 static void
0223 ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
0224 {
0225 struct ice_vf *vf;
0226 unsigned int bkt;
0227
0228 lockdep_assert_held(&pf->vfs.table_lock);
0229
0230 ice_for_each_vf(pf, bkt, vf) {
0231 struct ice_vsi *vsi = vf->repr->src_vsi;
0232
0233
0234 if (!vf->repr->dst)
0235 continue;
0236
0237 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
0238 metadata_dst_free(vf->repr->dst);
0239 vf->repr->dst = NULL;
0240 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
0241 ICE_FWD_TO_VSI);
0242
0243 netif_napi_del(&vf->repr->q_vector->napi);
0244 }
0245 }
0246
0247
0248
0249
0250
0251 static int ice_eswitch_setup_reprs(struct ice_pf *pf)
0252 {
0253 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
0254 int max_vsi_num = 0;
0255 struct ice_vf *vf;
0256 unsigned int bkt;
0257
0258 lockdep_assert_held(&pf->vfs.table_lock);
0259
0260 ice_for_each_vf(pf, bkt, vf) {
0261 struct ice_vsi *vsi = vf->repr->src_vsi;
0262
0263 ice_remove_vsi_fltr(&pf->hw, vsi->idx);
0264 vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
0265 GFP_KERNEL);
0266 if (!vf->repr->dst) {
0267 ice_fltr_add_mac_and_broadcast(vsi,
0268 vf->hw_lan_addr.addr,
0269 ICE_FWD_TO_VSI);
0270 goto err;
0271 }
0272
0273 if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
0274 ice_fltr_add_mac_and_broadcast(vsi,
0275 vf->hw_lan_addr.addr,
0276 ICE_FWD_TO_VSI);
0277 metadata_dst_free(vf->repr->dst);
0278 vf->repr->dst = NULL;
0279 goto err;
0280 }
0281
0282 if (ice_vsi_add_vlan_zero(vsi)) {
0283 ice_fltr_add_mac_and_broadcast(vsi,
0284 vf->hw_lan_addr.addr,
0285 ICE_FWD_TO_VSI);
0286 metadata_dst_free(vf->repr->dst);
0287 vf->repr->dst = NULL;
0288 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
0289 goto err;
0290 }
0291
0292 if (max_vsi_num < vsi->vsi_num)
0293 max_vsi_num = vsi->vsi_num;
0294
0295 netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
0296 NAPI_POLL_WEIGHT);
0297
0298 netif_keep_dst(vf->repr->netdev);
0299 }
0300
0301 ice_for_each_vf(pf, bkt, vf) {
0302 struct ice_repr *repr = vf->repr;
0303 struct ice_vsi *vsi = repr->src_vsi;
0304 struct metadata_dst *dst;
0305
0306 dst = repr->dst;
0307 dst->u.port_info.port_id = vsi->vsi_num;
0308 dst->u.port_info.lower_dev = repr->netdev;
0309 ice_repr_set_traffic_vsi(repr, ctrl_vsi);
0310 }
0311
0312 return 0;
0313
0314 err:
0315 ice_eswitch_release_reprs(pf, ctrl_vsi);
0316
0317 return -ENODEV;
0318 }
0319
0320
0321
0322
0323
0324 void ice_eswitch_update_repr(struct ice_vsi *vsi)
0325 {
0326 struct ice_pf *pf = vsi->back;
0327 struct ice_repr *repr;
0328 struct ice_vf *vf;
0329 int ret;
0330
0331 if (!ice_is_switchdev_running(pf))
0332 return;
0333
0334 vf = vsi->vf;
0335 repr = vf->repr;
0336 repr->src_vsi = vsi;
0337 repr->dst->u.port_info.port_id = vsi->vsi_num;
0338
0339 ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
0340 if (ret) {
0341 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
0342 dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
0343 vsi->vf->vf_id);
0344 }
0345 }
0346
0347
0348
0349
0350
0351
0352
0353
0354 netdev_tx_t
0355 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
0356 {
0357 struct ice_netdev_priv *np;
0358 struct ice_repr *repr;
0359 struct ice_vsi *vsi;
0360
0361 np = netdev_priv(netdev);
0362 vsi = np->vsi;
0363
0364 if (ice_is_reset_in_progress(vsi->back->state) ||
0365 test_bit(ICE_VF_DIS, vsi->back->state))
0366 return NETDEV_TX_BUSY;
0367
0368 repr = ice_netdev_to_repr(netdev);
0369 skb_dst_drop(skb);
0370 dst_hold((struct dst_entry *)repr->dst);
0371 skb_dst_set(skb, (struct dst_entry *)repr->dst);
0372 skb->queue_mapping = repr->vf->vf_id;
0373
0374 return ice_start_xmit(skb, netdev);
0375 }
0376
0377
0378
0379
0380
0381
0382 void
0383 ice_eswitch_set_target_vsi(struct sk_buff *skb,
0384 struct ice_tx_offload_params *off)
0385 {
0386 struct metadata_dst *dst = skb_metadata_dst(skb);
0387 u64 cd_cmd, dst_vsi;
0388
0389 if (!dst) {
0390 cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
0391 off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
0392 } else {
0393 cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
0394 dst_vsi = ((u64)dst->u.port_info.port_id <<
0395 ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
0396 off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
0397 }
0398 }
0399
0400
0401
0402
0403
0404
0405
0406
0407 static void ice_eswitch_release_env(struct ice_pf *pf)
0408 {
0409 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
0410 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
0411
0412 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
0413 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
0414 ice_clear_dflt_vsi(uplink_vsi);
0415 ice_fltr_add_mac_and_broadcast(uplink_vsi,
0416 uplink_vsi->port_info->mac.perm_addr,
0417 ICE_FWD_TO_VSI);
0418 }
0419
0420
0421
0422
0423
0424
0425 static struct ice_vsi *
0426 ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
0427 {
0428 return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL);
0429 }
0430
0431
0432
0433
0434
0435 static void ice_eswitch_napi_del(struct ice_pf *pf)
0436 {
0437 struct ice_vf *vf;
0438 unsigned int bkt;
0439
0440 lockdep_assert_held(&pf->vfs.table_lock);
0441
0442 ice_for_each_vf(pf, bkt, vf)
0443 netif_napi_del(&vf->repr->q_vector->napi);
0444 }
0445
0446
0447
0448
0449
0450 static void ice_eswitch_napi_enable(struct ice_pf *pf)
0451 {
0452 struct ice_vf *vf;
0453 unsigned int bkt;
0454
0455 lockdep_assert_held(&pf->vfs.table_lock);
0456
0457 ice_for_each_vf(pf, bkt, vf)
0458 napi_enable(&vf->repr->q_vector->napi);
0459 }
0460
0461
0462
0463
0464
0465 static void ice_eswitch_napi_disable(struct ice_pf *pf)
0466 {
0467 struct ice_vf *vf;
0468 unsigned int bkt;
0469
0470 lockdep_assert_held(&pf->vfs.table_lock);
0471
0472 ice_for_each_vf(pf, bkt, vf)
0473 napi_disable(&vf->repr->q_vector->napi);
0474 }
0475
0476
0477
0478
0479
0480 static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
0481 {
0482 struct ice_vsi *ctrl_vsi;
0483
0484 pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
0485 if (!pf->switchdev.control_vsi)
0486 return -ENODEV;
0487
0488 ctrl_vsi = pf->switchdev.control_vsi;
0489 pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
0490 if (!pf->switchdev.uplink_vsi)
0491 goto err_vsi;
0492
0493 if (ice_eswitch_setup_env(pf))
0494 goto err_vsi;
0495
0496 if (ice_repr_add_for_all_vfs(pf))
0497 goto err_repr_add;
0498
0499 if (ice_eswitch_setup_reprs(pf))
0500 goto err_setup_reprs;
0501
0502 ice_eswitch_remap_rings_to_vectors(pf);
0503
0504 if (ice_vsi_open(ctrl_vsi))
0505 goto err_setup_reprs;
0506
0507 ice_eswitch_napi_enable(pf);
0508
0509 return 0;
0510
0511 err_setup_reprs:
0512 ice_repr_rem_from_all_vfs(pf);
0513 err_repr_add:
0514 ice_eswitch_release_env(pf);
0515 err_vsi:
0516 ice_vsi_release(ctrl_vsi);
0517 return -ENODEV;
0518 }
0519
0520
0521
0522
0523
0524 static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
0525 {
0526 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
0527
0528 ice_eswitch_napi_disable(pf);
0529 ice_eswitch_release_env(pf);
0530 ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
0531 ice_eswitch_release_reprs(pf, ctrl_vsi);
0532 ice_vsi_release(ctrl_vsi);
0533 ice_repr_rem_from_all_vfs(pf);
0534 }
0535
0536
0537
0538
0539
0540
0541
0542 int
0543 ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
0544 struct netlink_ext_ack *extack)
0545 {
0546 struct ice_pf *pf = devlink_priv(devlink);
0547
0548 if (pf->eswitch_mode == mode)
0549 return 0;
0550
0551 if (ice_has_vfs(pf)) {
0552 dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
0553 NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
0554 return -EOPNOTSUPP;
0555 }
0556
0557 switch (mode) {
0558 case DEVLINK_ESWITCH_MODE_LEGACY:
0559 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
0560 pf->hw.pf_id);
0561 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
0562 break;
0563 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
0564 {
0565 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
0566 pf->hw.pf_id);
0567 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
0568 break;
0569 }
0570 default:
0571 NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
0572 return -EINVAL;
0573 }
0574
0575 pf->eswitch_mode = mode;
0576 return 0;
0577 }
0578
0579
0580
0581
0582
0583
0584 int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
0585 {
0586 struct ice_pf *pf = devlink_priv(devlink);
0587
0588 *mode = pf->eswitch_mode;
0589 return 0;
0590 }
0591
0592
0593
0594
0595
0596
0597
0598
0599 bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
0600 {
0601 return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
0602 }
0603
0604
0605
0606
0607
0608 void ice_eswitch_release(struct ice_pf *pf)
0609 {
0610 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
0611 return;
0612
0613 ice_eswitch_disable_switchdev(pf);
0614 pf->switchdev.is_running = false;
0615 }
0616
0617
0618
0619
0620
0621 int ice_eswitch_configure(struct ice_pf *pf)
0622 {
0623 int status;
0624
0625 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
0626 return 0;
0627
0628 status = ice_eswitch_enable_switchdev(pf);
0629 if (status)
0630 return status;
0631
0632 pf->switchdev.is_running = true;
0633 return 0;
0634 }
0635
0636
0637
0638
0639
0640 static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
0641 {
0642 struct ice_vf *vf;
0643 unsigned int bkt;
0644
0645 lockdep_assert_held(&pf->vfs.table_lock);
0646
0647 if (test_bit(ICE_DOWN, pf->state))
0648 return;
0649
0650 ice_for_each_vf(pf, bkt, vf) {
0651 if (vf->repr)
0652 ice_repr_start_tx_queues(vf->repr);
0653 }
0654 }
0655
0656
0657
0658
0659
0660 void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
0661 {
0662 struct ice_vf *vf;
0663 unsigned int bkt;
0664
0665 lockdep_assert_held(&pf->vfs.table_lock);
0666
0667 if (test_bit(ICE_DOWN, pf->state))
0668 return;
0669
0670 ice_for_each_vf(pf, bkt, vf) {
0671 if (vf->repr)
0672 ice_repr_stop_tx_queues(vf->repr);
0673 }
0674 }
0675
0676
0677
0678
0679
0680 int ice_eswitch_rebuild(struct ice_pf *pf)
0681 {
0682 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
0683 int status;
0684
0685 ice_eswitch_napi_disable(pf);
0686 ice_eswitch_napi_del(pf);
0687
0688 status = ice_eswitch_setup_env(pf);
0689 if (status)
0690 return status;
0691
0692 status = ice_eswitch_setup_reprs(pf);
0693 if (status)
0694 return status;
0695
0696 ice_eswitch_remap_rings_to_vectors(pf);
0697
0698 ice_replay_tc_fltrs(pf);
0699
0700 status = ice_vsi_open(ctrl_vsi);
0701 if (status)
0702 return status;
0703
0704 ice_eswitch_napi_enable(pf);
0705 ice_eswitch_start_all_tx_queues(pf);
0706
0707 return 0;
0708 }