0001
0002
0003
0004 #include "ice.h"
0005 #include "ice_base.h"
0006 #include "ice_flow.h"
0007 #include "ice_lib.h"
0008 #include "ice_fltr.h"
0009 #include "ice_dcb_lib.h"
0010 #include "ice_devlink.h"
0011 #include "ice_vsi_vlan_ops.h"
0012
0013
0014
0015
0016
0017 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
0018 {
0019 switch (vsi_type) {
0020 case ICE_VSI_PF:
0021 return "ICE_VSI_PF";
0022 case ICE_VSI_VF:
0023 return "ICE_VSI_VF";
0024 case ICE_VSI_CTRL:
0025 return "ICE_VSI_CTRL";
0026 case ICE_VSI_CHNL:
0027 return "ICE_VSI_CHNL";
0028 case ICE_VSI_LB:
0029 return "ICE_VSI_LB";
0030 case ICE_VSI_SWITCHDEV_CTRL:
0031 return "ICE_VSI_SWITCHDEV_CTRL";
0032 default:
0033 return "unknown";
0034 }
0035 }
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
0048 {
0049 int ret = 0;
0050 u16 i;
0051
0052 ice_for_each_rxq(vsi, i)
0053 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
0054
0055 ice_flush(&vsi->back->hw);
0056
0057 ice_for_each_rxq(vsi, i) {
0058 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
0059 if (ret)
0060 break;
0061 }
0062
0063 return ret;
0064 }
0065
0066
0067
0068
0069
0070
0071
0072
0073 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
0074 {
0075 struct ice_pf *pf = vsi->back;
0076 struct device *dev;
0077
0078 dev = ice_pf_to_dev(pf);
0079 if (vsi->type == ICE_VSI_CHNL)
0080 return 0;
0081
0082
0083 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
0084 sizeof(*vsi->tx_rings), GFP_KERNEL);
0085 if (!vsi->tx_rings)
0086 return -ENOMEM;
0087
0088 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
0089 sizeof(*vsi->rx_rings), GFP_KERNEL);
0090 if (!vsi->rx_rings)
0091 goto err_rings;
0092
0093
0094
0095
0096
0097
0098
0099 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
0100 sizeof(*vsi->txq_map), GFP_KERNEL);
0101
0102 if (!vsi->txq_map)
0103 goto err_txq_map;
0104
0105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
0106 sizeof(*vsi->rxq_map), GFP_KERNEL);
0107 if (!vsi->rxq_map)
0108 goto err_rxq_map;
0109
0110
0111 if (vsi->type == ICE_VSI_LB)
0112 return 0;
0113
0114
0115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
0116 sizeof(*vsi->q_vectors), GFP_KERNEL);
0117 if (!vsi->q_vectors)
0118 goto err_vectors;
0119
0120 vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
0121 if (!vsi->af_xdp_zc_qps)
0122 goto err_zc_qps;
0123
0124 return 0;
0125
0126 err_zc_qps:
0127 devm_kfree(dev, vsi->q_vectors);
0128 err_vectors:
0129 devm_kfree(dev, vsi->rxq_map);
0130 err_rxq_map:
0131 devm_kfree(dev, vsi->txq_map);
0132 err_txq_map:
0133 devm_kfree(dev, vsi->rx_rings);
0134 err_rings:
0135 devm_kfree(dev, vsi->tx_rings);
0136 return -ENOMEM;
0137 }
0138
0139
0140
0141
0142
0143 static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
0144 {
0145 switch (vsi->type) {
0146 case ICE_VSI_PF:
0147 case ICE_VSI_SWITCHDEV_CTRL:
0148 case ICE_VSI_CTRL:
0149 case ICE_VSI_LB:
0150
0151
0152
0153
0154 if (!vsi->num_rx_desc)
0155 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
0156 if (!vsi->num_tx_desc)
0157 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
0158 break;
0159 default:
0160 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
0161 vsi->type);
0162 break;
0163 }
0164 }
0165
0166
0167
0168
0169
0170
0171
0172
0173 static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf)
0174 {
0175 enum ice_vsi_type vsi_type = vsi->type;
0176 struct ice_pf *pf = vsi->back;
0177
0178 if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
0179 return;
0180
0181 switch (vsi_type) {
0182 case ICE_VSI_PF:
0183 if (vsi->req_txq) {
0184 vsi->alloc_txq = vsi->req_txq;
0185 vsi->num_txq = vsi->req_txq;
0186 } else {
0187 vsi->alloc_txq = min3(pf->num_lan_msix,
0188 ice_get_avail_txq_count(pf),
0189 (u16)num_online_cpus());
0190 }
0191
0192 pf->num_lan_tx = vsi->alloc_txq;
0193
0194
0195 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
0196 vsi->alloc_rxq = 1;
0197 } else {
0198 if (vsi->req_rxq) {
0199 vsi->alloc_rxq = vsi->req_rxq;
0200 vsi->num_rxq = vsi->req_rxq;
0201 } else {
0202 vsi->alloc_rxq = min3(pf->num_lan_msix,
0203 ice_get_avail_rxq_count(pf),
0204 (u16)num_online_cpus());
0205 }
0206 }
0207
0208 pf->num_lan_rx = vsi->alloc_rxq;
0209
0210 vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
0211 max_t(int, vsi->alloc_rxq,
0212 vsi->alloc_txq));
0213 break;
0214 case ICE_VSI_SWITCHDEV_CTRL:
0215
0216
0217
0218 vsi->alloc_txq = ice_get_num_vfs(pf);
0219 vsi->alloc_rxq = vsi->alloc_txq;
0220 vsi->num_q_vectors = 1;
0221 break;
0222 case ICE_VSI_VF:
0223 if (vf->num_req_qs)
0224 vf->num_vf_qs = vf->num_req_qs;
0225 vsi->alloc_txq = vf->num_vf_qs;
0226 vsi->alloc_rxq = vf->num_vf_qs;
0227
0228
0229
0230
0231
0232 vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
0233 break;
0234 case ICE_VSI_CTRL:
0235 vsi->alloc_txq = 1;
0236 vsi->alloc_rxq = 1;
0237 vsi->num_q_vectors = 1;
0238 break;
0239 case ICE_VSI_CHNL:
0240 vsi->alloc_txq = 0;
0241 vsi->alloc_rxq = 0;
0242 break;
0243 case ICE_VSI_LB:
0244 vsi->alloc_txq = 1;
0245 vsi->alloc_rxq = 1;
0246 break;
0247 default:
0248 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
0249 break;
0250 }
0251
0252 ice_vsi_set_num_desc(vsi);
0253 }
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 static int ice_get_free_slot(void *array, int size, int curr)
0265 {
0266 int **tmp_array = (int **)array;
0267 int next;
0268
0269 if (curr < (size - 1) && !tmp_array[curr + 1]) {
0270 next = curr + 1;
0271 } else {
0272 int i = 0;
0273
0274 while ((i < size) && (tmp_array[i]))
0275 i++;
0276 if (i == size)
0277 next = ICE_NO_VSI;
0278 else
0279 next = i;
0280 }
0281 return next;
0282 }
0283
0284
0285
0286
0287
0288 void ice_vsi_delete(struct ice_vsi *vsi)
0289 {
0290 struct ice_pf *pf = vsi->back;
0291 struct ice_vsi_ctx *ctxt;
0292 int status;
0293
0294 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
0295 if (!ctxt)
0296 return;
0297
0298 if (vsi->type == ICE_VSI_VF)
0299 ctxt->vf_num = vsi->vf->vf_id;
0300 ctxt->vsi_num = vsi->vsi_num;
0301
0302 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
0303
0304 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
0305 if (status)
0306 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
0307 vsi->vsi_num, status);
0308
0309 kfree(ctxt);
0310 }
0311
0312
0313
0314
0315
0316 static void ice_vsi_free_arrays(struct ice_vsi *vsi)
0317 {
0318 struct ice_pf *pf = vsi->back;
0319 struct device *dev;
0320
0321 dev = ice_pf_to_dev(pf);
0322
0323 if (vsi->af_xdp_zc_qps) {
0324 bitmap_free(vsi->af_xdp_zc_qps);
0325 vsi->af_xdp_zc_qps = NULL;
0326 }
0327
0328 if (vsi->q_vectors) {
0329 devm_kfree(dev, vsi->q_vectors);
0330 vsi->q_vectors = NULL;
0331 }
0332 if (vsi->tx_rings) {
0333 devm_kfree(dev, vsi->tx_rings);
0334 vsi->tx_rings = NULL;
0335 }
0336 if (vsi->rx_rings) {
0337 devm_kfree(dev, vsi->rx_rings);
0338 vsi->rx_rings = NULL;
0339 }
0340 if (vsi->txq_map) {
0341 devm_kfree(dev, vsi->txq_map);
0342 vsi->txq_map = NULL;
0343 }
0344 if (vsi->rxq_map) {
0345 devm_kfree(dev, vsi->rxq_map);
0346 vsi->rxq_map = NULL;
0347 }
0348 }
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 int ice_vsi_clear(struct ice_vsi *vsi)
0360 {
0361 struct ice_pf *pf = NULL;
0362 struct device *dev;
0363
0364 if (!vsi)
0365 return 0;
0366
0367 if (!vsi->back)
0368 return -EINVAL;
0369
0370 pf = vsi->back;
0371 dev = ice_pf_to_dev(pf);
0372
0373 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
0374 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
0375 return -EINVAL;
0376 }
0377
0378 mutex_lock(&pf->sw_mutex);
0379
0380
0381 pf->vsi[vsi->idx] = NULL;
0382 if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
0383 pf->next_vsi = vsi->idx;
0384 if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf)
0385 pf->next_vsi = vsi->idx;
0386
0387 ice_vsi_free_arrays(vsi);
0388 mutex_unlock(&pf->sw_mutex);
0389 devm_kfree(dev, vsi);
0390
0391 return 0;
0392 }
0393
0394
0395
0396
0397
0398
0399 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
0400 {
0401 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
0402
0403 if (!q_vector->tx.tx_ring)
0404 return IRQ_HANDLED;
0405
0406 #define FDIR_RX_DESC_CLEAN_BUDGET 64
0407 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
0408 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
0409
0410 return IRQ_HANDLED;
0411 }
0412
0413
0414
0415
0416
0417
0418 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
0419 {
0420 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
0421
0422 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
0423 return IRQ_HANDLED;
0424
0425 q_vector->total_events++;
0426
0427 napi_schedule(&q_vector->napi);
0428
0429 return IRQ_HANDLED;
0430 }
0431
0432 static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
0433 {
0434 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
0435 struct ice_pf *pf = q_vector->vsi->back;
0436 struct ice_vf *vf;
0437 unsigned int bkt;
0438
0439 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
0440 return IRQ_HANDLED;
0441
0442 rcu_read_lock();
0443 ice_for_each_vf_rcu(pf, bkt, vf)
0444 napi_schedule(&vf->repr->q_vector->napi);
0445 rcu_read_unlock();
0446
0447 return IRQ_HANDLED;
0448 }
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 static struct ice_vsi *
0464 ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
0465 struct ice_channel *ch, struct ice_vf *vf)
0466 {
0467 struct device *dev = ice_pf_to_dev(pf);
0468 struct ice_vsi *vsi = NULL;
0469
0470 if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
0471 return NULL;
0472
0473
0474 mutex_lock(&pf->sw_mutex);
0475
0476
0477
0478
0479
0480 if (pf->next_vsi == ICE_NO_VSI) {
0481 dev_dbg(dev, "out of VSI slots!\n");
0482 goto unlock_pf;
0483 }
0484
0485 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
0486 if (!vsi)
0487 goto unlock_pf;
0488
0489 vsi->type = vsi_type;
0490 vsi->back = pf;
0491 set_bit(ICE_VSI_DOWN, vsi->state);
0492
0493 if (vsi_type == ICE_VSI_VF)
0494 ice_vsi_set_num_qs(vsi, vf);
0495 else if (vsi_type != ICE_VSI_CHNL)
0496 ice_vsi_set_num_qs(vsi, NULL);
0497
0498 switch (vsi->type) {
0499 case ICE_VSI_SWITCHDEV_CTRL:
0500 if (ice_vsi_alloc_arrays(vsi))
0501 goto err_rings;
0502
0503
0504 vsi->irq_handler = ice_eswitch_msix_clean_rings;
0505 break;
0506 case ICE_VSI_PF:
0507 if (ice_vsi_alloc_arrays(vsi))
0508 goto err_rings;
0509
0510
0511 vsi->irq_handler = ice_msix_clean_rings;
0512 break;
0513 case ICE_VSI_CTRL:
0514 if (ice_vsi_alloc_arrays(vsi))
0515 goto err_rings;
0516
0517
0518 vsi->irq_handler = ice_msix_clean_ctrl_vsi;
0519
0520
0521
0522
0523 vsi->vf = vf;
0524 break;
0525 case ICE_VSI_VF:
0526 if (ice_vsi_alloc_arrays(vsi))
0527 goto err_rings;
0528 vsi->vf = vf;
0529 break;
0530 case ICE_VSI_CHNL:
0531 if (!ch)
0532 goto err_rings;
0533 vsi->num_rxq = ch->num_rxq;
0534 vsi->num_txq = ch->num_txq;
0535 vsi->next_base_q = ch->base_q;
0536 break;
0537 case ICE_VSI_LB:
0538 if (ice_vsi_alloc_arrays(vsi))
0539 goto err_rings;
0540 break;
0541 default:
0542 dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
0543 goto unlock_pf;
0544 }
0545
0546 if (vsi->type == ICE_VSI_CTRL && !vf) {
0547
0548 vsi->idx = pf->num_alloc_vsi - 1;
0549 pf->ctrl_vsi_idx = vsi->idx;
0550 pf->vsi[vsi->idx] = vsi;
0551 } else {
0552
0553 vsi->idx = pf->next_vsi;
0554 pf->vsi[pf->next_vsi] = vsi;
0555
0556
0557 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
0558 pf->next_vsi);
0559 }
0560
0561 if (vsi->type == ICE_VSI_CTRL && vf)
0562 vf->ctrl_vsi_idx = vsi->idx;
0563 goto unlock_pf;
0564
0565 err_rings:
0566 devm_kfree(dev, vsi);
0567 vsi = NULL;
0568 unlock_pf:
0569 mutex_unlock(&pf->sw_mutex);
0570 return vsi;
0571 }
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 static int ice_alloc_fd_res(struct ice_vsi *vsi)
0582 {
0583 struct ice_pf *pf = vsi->back;
0584 u32 g_val, b_val;
0585
0586
0587
0588
0589
0590 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
0591 return -EPERM;
0592
0593 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
0594 vsi->type == ICE_VSI_CHNL))
0595 return -EPERM;
0596
0597
0598 g_val = pf->hw.func_caps.fd_fltr_guar;
0599 if (!g_val)
0600 return -EPERM;
0601
0602
0603 b_val = pf->hw.func_caps.fd_fltr_best_effort;
0604 if (!b_val)
0605 return -EPERM;
0606
0607
0608
0609
0610 #define ICE_PF_VSI_GFLTR 64
0611
0612
0613
0614
0615 if (vsi->type == ICE_VSI_PF) {
0616 vsi->num_gfltr = g_val;
0617
0618
0619
0620 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
0621 if (g_val < ICE_PF_VSI_GFLTR)
0622 return -EPERM;
0623
0624 vsi->num_gfltr = ICE_PF_VSI_GFLTR;
0625 }
0626
0627
0628 vsi->num_bfltr = b_val;
0629 } else if (vsi->type == ICE_VSI_VF) {
0630 vsi->num_gfltr = 0;
0631
0632
0633 vsi->num_bfltr = b_val;
0634 } else {
0635 struct ice_vsi *main_vsi;
0636 int numtc;
0637
0638 main_vsi = ice_get_main_vsi(pf);
0639 if (!main_vsi)
0640 return -EPERM;
0641
0642 if (!main_vsi->all_numtc)
0643 return -EINVAL;
0644
0645
0646 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
0647
0648
0649
0650
0651 if (numtc < ICE_CHNL_START_TC)
0652 return -EPERM;
0653
0654 g_val -= ICE_PF_VSI_GFLTR;
0655
0656 vsi->num_gfltr = g_val / numtc;
0657
0658
0659 vsi->num_bfltr = b_val;
0660 }
0661
0662 return 0;
0663 }
0664
0665
0666
0667
0668
0669
0670
0671 static int ice_vsi_get_qs(struct ice_vsi *vsi)
0672 {
0673 struct ice_pf *pf = vsi->back;
0674 struct ice_qs_cfg tx_qs_cfg = {
0675 .qs_mutex = &pf->avail_q_mutex,
0676 .pf_map = pf->avail_txqs,
0677 .pf_map_size = pf->max_pf_txqs,
0678 .q_count = vsi->alloc_txq,
0679 .scatter_count = ICE_MAX_SCATTER_TXQS,
0680 .vsi_map = vsi->txq_map,
0681 .vsi_map_offset = 0,
0682 .mapping_mode = ICE_VSI_MAP_CONTIG
0683 };
0684 struct ice_qs_cfg rx_qs_cfg = {
0685 .qs_mutex = &pf->avail_q_mutex,
0686 .pf_map = pf->avail_rxqs,
0687 .pf_map_size = pf->max_pf_rxqs,
0688 .q_count = vsi->alloc_rxq,
0689 .scatter_count = ICE_MAX_SCATTER_RXQS,
0690 .vsi_map = vsi->rxq_map,
0691 .vsi_map_offset = 0,
0692 .mapping_mode = ICE_VSI_MAP_CONTIG
0693 };
0694 int ret;
0695
0696 if (vsi->type == ICE_VSI_CHNL)
0697 return 0;
0698
0699 ret = __ice_vsi_get_qs(&tx_qs_cfg);
0700 if (ret)
0701 return ret;
0702 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
0703
0704 ret = __ice_vsi_get_qs(&rx_qs_cfg);
0705 if (ret)
0706 return ret;
0707 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
0708
0709 return 0;
0710 }
0711
0712
0713
0714
0715
0716 static void ice_vsi_put_qs(struct ice_vsi *vsi)
0717 {
0718 struct ice_pf *pf = vsi->back;
0719 int i;
0720
0721 mutex_lock(&pf->avail_q_mutex);
0722
0723 ice_for_each_alloc_txq(vsi, i) {
0724 clear_bit(vsi->txq_map[i], pf->avail_txqs);
0725 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
0726 }
0727
0728 ice_for_each_alloc_rxq(vsi, i) {
0729 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
0730 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
0731 }
0732
0733 mutex_unlock(&pf->avail_q_mutex);
0734 }
0735
0736
0737
0738
0739
0740
0741
0742 bool ice_is_safe_mode(struct ice_pf *pf)
0743 {
0744 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
0745 }
0746
0747
0748
0749
0750
0751
0752
0753 bool ice_is_rdma_ena(struct ice_pf *pf)
0754 {
0755 return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
0756 }
0757
0758
0759
0760
0761
0762
0763
0764
0765 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
0766 {
0767 struct ice_pf *pf = vsi->back;
0768 int status;
0769
0770 if (ice_is_safe_mode(pf))
0771 return;
0772
0773 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
0774 if (status)
0775 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
0776 vsi->vsi_num, status);
0777 }
0778
0779
0780
0781
0782
0783 static void ice_rss_clean(struct ice_vsi *vsi)
0784 {
0785 struct ice_pf *pf = vsi->back;
0786 struct device *dev;
0787
0788 dev = ice_pf_to_dev(pf);
0789
0790 if (vsi->rss_hkey_user)
0791 devm_kfree(dev, vsi->rss_hkey_user);
0792 if (vsi->rss_lut_user)
0793 devm_kfree(dev, vsi->rss_lut_user);
0794
0795 ice_vsi_clean_rss_flow_fld(vsi);
0796
0797 if (!ice_is_safe_mode(pf))
0798 ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
0799 }
0800
0801
0802
0803
0804
0805 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
0806 {
0807 struct ice_hw_common_caps *cap;
0808 struct ice_pf *pf = vsi->back;
0809
0810 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
0811 vsi->rss_size = 1;
0812 return;
0813 }
0814
0815 cap = &pf->hw.func_caps.common_cap;
0816 switch (vsi->type) {
0817 case ICE_VSI_CHNL:
0818 case ICE_VSI_PF:
0819
0820 vsi->rss_table_size = (u16)cap->rss_table_size;
0821 if (vsi->type == ICE_VSI_CHNL)
0822 vsi->rss_size = min_t(u16, vsi->num_rxq,
0823 BIT(cap->rss_table_entry_width));
0824 else
0825 vsi->rss_size = min_t(u16, num_online_cpus(),
0826 BIT(cap->rss_table_entry_width));
0827 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
0828 break;
0829 case ICE_VSI_SWITCHDEV_CTRL:
0830 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
0831 vsi->rss_size = min_t(u16, num_online_cpus(),
0832 BIT(cap->rss_table_entry_width));
0833 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
0834 break;
0835 case ICE_VSI_VF:
0836
0837
0838
0839 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
0840 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
0841 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
0842 break;
0843 case ICE_VSI_LB:
0844 break;
0845 default:
0846 dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
0847 ice_vsi_type_str(vsi->type));
0848 break;
0849 }
0850 }
0851
0852
0853
0854
0855
0856
0857
0858
0859 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
0860 {
0861 u32 table = 0;
0862
0863 memset(&ctxt->info, 0, sizeof(ctxt->info));
0864
0865 ctxt->alloc_from_pool = true;
0866
0867 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
0868
0869 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
0870
0871 ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
0872 ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
0873 ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
0874
0875
0876
0877
0878
0879 if (ice_is_dvm_ena(hw)) {
0880 ctxt->info.inner_vlan_flags |=
0881 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
0882 ctxt->info.outer_vlan_flags =
0883 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
0884 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
0885 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
0886 ctxt->info.outer_vlan_flags |=
0887 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
0888 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
0889 ICE_AQ_VSI_OUTER_TAG_TYPE_M;
0890 ctxt->info.outer_vlan_flags |=
0891 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
0892 ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
0893 }
0894
0895 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
0896 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
0897 table |= ICE_UP_TABLE_TRANSLATE(2, 2);
0898 table |= ICE_UP_TABLE_TRANSLATE(3, 3);
0899 table |= ICE_UP_TABLE_TRANSLATE(4, 4);
0900 table |= ICE_UP_TABLE_TRANSLATE(5, 5);
0901 table |= ICE_UP_TABLE_TRANSLATE(6, 6);
0902 table |= ICE_UP_TABLE_TRANSLATE(7, 7);
0903 ctxt->info.ingress_table = cpu_to_le32(table);
0904 ctxt->info.egress_table = cpu_to_le32(table);
0905
0906 ctxt->info.outer_up_table = cpu_to_le32(table);
0907
0908 }
0909
0910
0911
0912
0913
0914
0915 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
0916 {
0917 u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
0918 u16 num_txq_per_tc, num_rxq_per_tc;
0919 u16 qcount_tx = vsi->alloc_txq;
0920 u16 qcount_rx = vsi->alloc_rxq;
0921 u8 netdev_tc = 0;
0922 int i;
0923
0924 if (!vsi->tc_cfg.numtc) {
0925
0926 vsi->tc_cfg.numtc = 1;
0927 vsi->tc_cfg.ena_tc = 1;
0928 }
0929
0930 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
0931 if (!num_rxq_per_tc)
0932 num_rxq_per_tc = 1;
0933 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
0934 if (!num_txq_per_tc)
0935 num_txq_per_tc = 1;
0936
0937
0938 pow = (u16)order_base_2(num_rxq_per_tc);
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951 ice_for_each_traffic_class(i) {
0952 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
0953
0954 vsi->tc_cfg.tc_info[i].qoffset = 0;
0955 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
0956 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
0957 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
0958 ctxt->info.tc_mapping[i] = 0;
0959 continue;
0960 }
0961
0962
0963 vsi->tc_cfg.tc_info[i].qoffset = offset;
0964 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
0965 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
0966 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
0967
0968 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
0969 ICE_AQ_VSI_TC_Q_OFFSET_M) |
0970 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
0971 ICE_AQ_VSI_TC_Q_NUM_M);
0972 offset += num_rxq_per_tc;
0973 tx_count += num_txq_per_tc;
0974 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
0975 }
0976
0977
0978
0979
0980
0981
0982
0983 if (offset)
0984 rx_count = offset;
0985 else
0986 rx_count = num_rxq_per_tc;
0987
0988 if (rx_count > vsi->alloc_rxq) {
0989 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
0990 rx_count, vsi->alloc_rxq);
0991 return -EINVAL;
0992 }
0993
0994 if (tx_count > vsi->alloc_txq) {
0995 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
0996 tx_count, vsi->alloc_txq);
0997 return -EINVAL;
0998 }
0999
1000 vsi->num_txq = tx_count;
1001 vsi->num_rxq = rx_count;
1002
1003 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
1004 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
1005
1006
1007
1008 vsi->num_txq = vsi->num_rxq;
1009 }
1010
1011
1012 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1013
1014
1015
1016
1017 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
1018 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1019
1020 return 0;
1021 }
1022
1023
1024
1025
1026
1027
1028 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1029 {
1030 u8 dflt_q_group, dflt_q_prio;
1031 u16 dflt_q, report_q, val;
1032
1033 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
1034 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
1035 return;
1036
1037 val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1038 ctxt->info.valid_sections |= cpu_to_le16(val);
1039 dflt_q = 0;
1040 dflt_q_group = 0;
1041 report_q = 0;
1042 dflt_q_prio = 0;
1043
1044
1045 val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
1046 ctxt->info.fd_options = cpu_to_le16(val);
1047
1048 ctxt->info.max_fd_fltr_dedicated =
1049 cpu_to_le16(vsi->num_gfltr);
1050
1051 ctxt->info.max_fd_fltr_shared =
1052 cpu_to_le16(vsi->num_bfltr);
1053
1054 val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
1055 ICE_AQ_VSI_FD_DEF_Q_M);
1056
1057 val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
1058 ICE_AQ_VSI_FD_DEF_GRP_M);
1059 ctxt->info.fd_def_q = cpu_to_le16(val);
1060
1061 val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
1062 ICE_AQ_VSI_FD_REPORT_Q_M);
1063
1064 val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
1065 ICE_AQ_VSI_FD_DEF_PRIORITY_M);
1066 ctxt->info.fd_report_opt = cpu_to_le16(val);
1067 }
1068
1069
1070
1071
1072
1073
1074 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1075 {
1076 u8 lut_type, hash_type;
1077 struct device *dev;
1078 struct ice_pf *pf;
1079
1080 pf = vsi->back;
1081 dev = ice_pf_to_dev(pf);
1082
1083 switch (vsi->type) {
1084 case ICE_VSI_CHNL:
1085 case ICE_VSI_PF:
1086
1087 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1088 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1089 break;
1090 case ICE_VSI_VF:
1091
1092 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
1093 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1094 break;
1095 default:
1096 dev_dbg(dev, "Unsupported VSI type %s\n",
1097 ice_vsi_type_str(vsi->type));
1098 return;
1099 }
1100
1101 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
1102 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
1103 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
1104 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
1105 }
1106
1107 static void
1108 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1109 {
1110 struct ice_pf *pf = vsi->back;
1111 u16 qcount, qmap;
1112 u8 offset = 0;
1113 int pow;
1114
1115 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
1116
1117 pow = order_base_2(qcount);
1118 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
1119 ICE_AQ_VSI_TC_Q_OFFSET_M) |
1120 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
1121 ICE_AQ_VSI_TC_Q_NUM_M);
1122
1123 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1124 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1125 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
1126 ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
1127 }
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137 static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
1138 {
1139 struct ice_pf *pf = vsi->back;
1140 struct ice_hw *hw = &pf->hw;
1141 struct ice_vsi_ctx *ctxt;
1142 struct device *dev;
1143 int ret = 0;
1144
1145 dev = ice_pf_to_dev(pf);
1146 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1147 if (!ctxt)
1148 return -ENOMEM;
1149
1150 switch (vsi->type) {
1151 case ICE_VSI_CTRL:
1152 case ICE_VSI_LB:
1153 case ICE_VSI_PF:
1154 ctxt->flags = ICE_AQ_VSI_TYPE_PF;
1155 break;
1156 case ICE_VSI_SWITCHDEV_CTRL:
1157 case ICE_VSI_CHNL:
1158 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
1159 break;
1160 case ICE_VSI_VF:
1161 ctxt->flags = ICE_AQ_VSI_TYPE_VF;
1162
1163 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
1164 break;
1165 default:
1166 ret = -ENODEV;
1167 goto out;
1168 }
1169
1170
1171
1172
1173 if (vsi->type == ICE_VSI_CHNL) {
1174 struct ice_vsi *main_vsi;
1175
1176 main_vsi = ice_get_main_vsi(pf);
1177 if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
1178 ctxt->info.sw_flags2 |=
1179 ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1180 else
1181 ctxt->info.sw_flags2 &=
1182 ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1183 }
1184
1185 ice_set_dflt_vsi_ctx(hw, ctxt);
1186 if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
1187 ice_set_fd_vsi_ctx(ctxt, vsi);
1188
1189 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1190 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1191
1192
1193 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
1194 vsi->type != ICE_VSI_CTRL) {
1195 ice_set_rss_vsi_ctx(ctxt, vsi);
1196
1197
1198
1199 if (!init_vsi)
1200 ctxt->info.valid_sections |=
1201 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1202 }
1203
1204 ctxt->info.sw_id = vsi->port_info->sw_id;
1205 if (vsi->type == ICE_VSI_CHNL) {
1206 ice_chnl_vsi_setup_q_map(vsi, ctxt);
1207 } else {
1208 ret = ice_vsi_setup_q_map(vsi, ctxt);
1209 if (ret)
1210 goto out;
1211
1212 if (!init_vsi)
1213
1214
1215
1216 ctxt->info.valid_sections |=
1217 cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
1218 }
1219
1220
1221 if (vsi->type == ICE_VSI_PF) {
1222 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
1223 ctxt->info.valid_sections |=
1224 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1225 }
1226
1227 if (init_vsi) {
1228 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1229 if (ret) {
1230 dev_err(dev, "Add VSI failed, err %d\n", ret);
1231 ret = -EIO;
1232 goto out;
1233 }
1234 } else {
1235 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1236 if (ret) {
1237 dev_err(dev, "Update VSI failed, err %d\n", ret);
1238 ret = -EIO;
1239 goto out;
1240 }
1241 }
1242
1243
1244 vsi->info = ctxt->info;
1245
1246
1247 vsi->vsi_num = ctxt->vsi_num;
1248
1249 out:
1250 kfree(ctxt);
1251 return ret;
1252 }
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
1263 {
1264 int count = 0;
1265 int i;
1266
1267 if (!res || index >= res->end)
1268 return -EINVAL;
1269
1270 id |= ICE_RES_VALID_BIT;
1271 for (i = index; i < res->end && res->list[i] == id; i++) {
1272 res->list[i] = 0;
1273 count++;
1274 }
1275
1276 return count;
1277 }
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
1288 {
1289 u16 start = 0, end = 0;
1290
1291 if (needed > res->end)
1292 return -ENOMEM;
1293
1294 id |= ICE_RES_VALID_BIT;
1295
1296 do {
1297
1298 if (res->list[end++] & ICE_RES_VALID_BIT) {
1299 start = end;
1300 if ((start + needed) > res->end)
1301 break;
1302 }
1303
1304 if (end == (start + needed)) {
1305 int i = start;
1306
1307
1308 while (i != end)
1309 res->list[i++] = id;
1310
1311 return start;
1312 }
1313 } while (end < res->end);
1314
1315 return -ENOMEM;
1316 }
1317
1318
1319
1320
1321
1322 static u16 ice_get_free_res_count(struct ice_res_tracker *res)
1323 {
1324 u16 i, count = 0;
1325
1326 for (i = 0; i < res->end; i++)
1327 if (!(res->list[i] & ICE_RES_VALID_BIT))
1328 count++;
1329
1330 return count;
1331 }
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342 int
1343 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
1344 {
1345 if (!res || !pf)
1346 return -EINVAL;
1347
1348 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
1349 dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
1350 needed, res->num_entries, id);
1351 return -EINVAL;
1352 }
1353
1354 return ice_search_res(res, needed, id);
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
1368 {
1369 struct ice_vf *vf;
1370 unsigned int bkt;
1371 int base;
1372
1373 rcu_read_lock();
1374 ice_for_each_vf_rcu(pf, bkt, vf) {
1375 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1376 base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
1377 rcu_read_unlock();
1378 return base;
1379 }
1380 }
1381 rcu_read_unlock();
1382
1383 return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors,
1384 ICE_RES_VF_CTRL_VEC_ID);
1385 }
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
1398 {
1399 struct ice_pf *pf = vsi->back;
1400 struct device *dev;
1401 u16 num_q_vectors;
1402 int base;
1403
1404 dev = ice_pf_to_dev(pf);
1405
1406 if (vsi->type == ICE_VSI_VF)
1407 return 0;
1408 if (vsi->type == ICE_VSI_CHNL)
1409 return 0;
1410
1411 if (vsi->base_vector) {
1412 dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
1413 vsi->vsi_num, vsi->base_vector);
1414 return -EEXIST;
1415 }
1416
1417 num_q_vectors = vsi->num_q_vectors;
1418
1419 if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
1420 base = ice_get_vf_ctrl_res(pf, vsi);
1421 } else {
1422 base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
1423 vsi->idx);
1424 }
1425
1426 if (base < 0) {
1427 dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
1428 ice_get_free_res_count(pf->irq_tracker),
1429 ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
1430 return -ENOENT;
1431 }
1432 vsi->base_vector = (u16)base;
1433 pf->num_avail_sw_msix -= num_q_vectors;
1434
1435 return 0;
1436 }
1437
1438
1439
1440
1441
1442 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1443 {
1444 int i;
1445
1446
1447 if (vsi->q_vectors) {
1448 ice_for_each_q_vector(vsi, i) {
1449 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1450
1451 if (q_vector) {
1452 q_vector->tx.tx_ring = NULL;
1453 q_vector->rx.rx_ring = NULL;
1454 }
1455 }
1456 }
1457
1458 if (vsi->tx_rings) {
1459 ice_for_each_alloc_txq(vsi, i) {
1460 if (vsi->tx_rings[i]) {
1461 kfree_rcu(vsi->tx_rings[i], rcu);
1462 WRITE_ONCE(vsi->tx_rings[i], NULL);
1463 }
1464 }
1465 }
1466 if (vsi->rx_rings) {
1467 ice_for_each_alloc_rxq(vsi, i) {
1468 if (vsi->rx_rings[i]) {
1469 kfree_rcu(vsi->rx_rings[i], rcu);
1470 WRITE_ONCE(vsi->rx_rings[i], NULL);
1471 }
1472 }
1473 }
1474 }
1475
1476
1477
1478
1479
1480 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1481 {
1482 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
1483 struct ice_pf *pf = vsi->back;
1484 struct device *dev;
1485 u16 i;
1486
1487 dev = ice_pf_to_dev(pf);
1488
1489 ice_for_each_alloc_txq(vsi, i) {
1490 struct ice_tx_ring *ring;
1491
1492
1493 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1494
1495 if (!ring)
1496 goto err_out;
1497
1498 ring->q_index = i;
1499 ring->reg_idx = vsi->txq_map[i];
1500 ring->vsi = vsi;
1501 ring->tx_tstamps = &pf->ptp.port.tx;
1502 ring->dev = dev;
1503 ring->count = vsi->num_tx_desc;
1504 ring->txq_teid = ICE_INVAL_TEID;
1505 if (dvm_ena)
1506 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
1507 else
1508 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
1509 WRITE_ONCE(vsi->tx_rings[i], ring);
1510 }
1511
1512
1513 ice_for_each_alloc_rxq(vsi, i) {
1514 struct ice_rx_ring *ring;
1515
1516
1517 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1518 if (!ring)
1519 goto err_out;
1520
1521 ring->q_index = i;
1522 ring->reg_idx = vsi->rxq_map[i];
1523 ring->vsi = vsi;
1524 ring->netdev = vsi->netdev;
1525 ring->dev = dev;
1526 ring->count = vsi->num_rx_desc;
1527 WRITE_ONCE(vsi->rx_rings[i], ring);
1528 }
1529
1530 return 0;
1531
1532 err_out:
1533 ice_vsi_clear_rings(vsi);
1534 return -ENOMEM;
1535 }
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1547 {
1548 u8 *lut;
1549
1550 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1551 if (!lut)
1552 return;
1553
1554 if (ena) {
1555 if (vsi->rss_lut_user)
1556 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1557 else
1558 ice_fill_rss_lut(lut, vsi->rss_table_size,
1559 vsi->rss_size);
1560 }
1561
1562 ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1563 kfree(lut);
1564 }
1565
1566
1567
1568
1569
1570 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1571 {
1572 struct ice_pf *pf = vsi->back;
1573 struct device *dev;
1574 u8 *lut, *key;
1575 int err;
1576
1577 dev = ice_pf_to_dev(pf);
1578 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
1579 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
1580 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
1581 } else {
1582 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
1583
1584
1585
1586
1587
1588
1589
1590 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
1591 vsi->orig_rss_size <= vsi->num_rxq) {
1592 vsi->rss_size = vsi->orig_rss_size;
1593
1594 vsi->orig_rss_size = 0;
1595 }
1596 }
1597
1598 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1599 if (!lut)
1600 return -ENOMEM;
1601
1602 if (vsi->rss_lut_user)
1603 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1604 else
1605 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1606
1607 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1608 if (err) {
1609 dev_err(dev, "set_rss_lut failed, error %d\n", err);
1610 goto ice_vsi_cfg_rss_exit;
1611 }
1612
1613 key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL);
1614 if (!key) {
1615 err = -ENOMEM;
1616 goto ice_vsi_cfg_rss_exit;
1617 }
1618
1619 if (vsi->rss_hkey_user)
1620 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1621 else
1622 netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1623
1624 err = ice_set_rss_key(vsi, key);
1625 if (err)
1626 dev_err(dev, "set_rss_key failed, error %d\n", err);
1627
1628 kfree(key);
1629 ice_vsi_cfg_rss_exit:
1630 kfree(lut);
1631 return err;
1632 }
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
1643 {
1644 struct ice_pf *pf = vsi->back;
1645 struct device *dev;
1646 int status;
1647
1648 dev = ice_pf_to_dev(pf);
1649 if (ice_is_safe_mode(pf)) {
1650 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1651 vsi->vsi_num);
1652 return;
1653 }
1654
1655 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
1656 if (status)
1657 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
1658 vsi->vsi_num, status);
1659 }
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1673 {
1674 u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
1675 struct ice_pf *pf = vsi->back;
1676 struct ice_hw *hw = &pf->hw;
1677 struct device *dev;
1678 int status;
1679
1680 dev = ice_pf_to_dev(pf);
1681 if (ice_is_safe_mode(pf)) {
1682 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1683 vsi_num);
1684 return;
1685 }
1686
1687 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1688 ICE_FLOW_SEG_HDR_IPV4);
1689 if (status)
1690 dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
1691 vsi_num, status);
1692
1693
1694 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1695 ICE_FLOW_SEG_HDR_IPV6);
1696 if (status)
1697 dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
1698 vsi_num, status);
1699
1700
1701 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
1702 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
1703 if (status)
1704 dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
1705 vsi_num, status);
1706
1707
1708 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
1709 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
1710 if (status)
1711 dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
1712 vsi_num, status);
1713
1714
1715 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1716 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
1717 if (status)
1718 dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
1719 vsi_num, status);
1720
1721
1722 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
1723 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
1724 if (status)
1725 dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
1726 vsi_num, status);
1727
1728
1729 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
1730 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
1731 if (status)
1732 dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
1733 vsi_num, status);
1734
1735
1736 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1737 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
1738 if (status)
1739 dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
1740 vsi_num, status);
1741
1742 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
1743 ICE_FLOW_SEG_HDR_ESP);
1744 if (status)
1745 dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
1746 vsi_num, status);
1747 }
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759 bool ice_pf_state_is_nominal(struct ice_pf *pf)
1760 {
1761 DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
1762
1763 if (!pf)
1764 return false;
1765
1766 bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS);
1767 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
1768 return false;
1769
1770 return true;
1771 }
1772
1773
1774
1775
1776
1777 void ice_update_eth_stats(struct ice_vsi *vsi)
1778 {
1779 struct ice_eth_stats *prev_es, *cur_es;
1780 struct ice_hw *hw = &vsi->back->hw;
1781 u16 vsi_num = vsi->vsi_num;
1782
1783 prev_es = &vsi->eth_stats_prev;
1784 cur_es = &vsi->eth_stats;
1785
1786 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1787 &prev_es->rx_bytes, &cur_es->rx_bytes);
1788
1789 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1790 &prev_es->rx_unicast, &cur_es->rx_unicast);
1791
1792 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1793 &prev_es->rx_multicast, &cur_es->rx_multicast);
1794
1795 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1796 &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1797
1798 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1799 &prev_es->rx_discards, &cur_es->rx_discards);
1800
1801 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1802 &prev_es->tx_bytes, &cur_es->tx_bytes);
1803
1804 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1805 &prev_es->tx_unicast, &cur_es->tx_unicast);
1806
1807 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1808 &prev_es->tx_multicast, &cur_es->tx_multicast);
1809
1810 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1811 &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1812
1813 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1814 &prev_es->tx_errors, &cur_es->tx_errors);
1815
1816 vsi->stat_offsets_loaded = true;
1817 }
1818
1819
1820
1821
1822
1823 void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
1824 {
1825 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
1826 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
1827 vsi->rx_buf_len = ICE_RXBUF_2048;
1828 #if (PAGE_SIZE < 8192)
1829 } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
1830 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
1831 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
1832 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
1833 #endif
1834 } else {
1835 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
1836 #if (PAGE_SIZE < 8192)
1837 vsi->rx_buf_len = ICE_RXBUF_3072;
1838 #else
1839 vsi->rx_buf_len = ICE_RXBUF_2048;
1840 #endif
1841 }
1842 }
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852 void
1853 ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
1854 bool ena_ts)
1855 {
1856 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
1857
1858
1859 regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
1860 QRXFLXP_CNTXT_RXDID_PRIO_M |
1861 QRXFLXP_CNTXT_TS_M);
1862
1863 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
1864 QRXFLXP_CNTXT_RXDID_IDX_M;
1865
1866 regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
1867 QRXFLXP_CNTXT_RXDID_PRIO_M;
1868
1869 if (ena_ts)
1870
1871 regval |= QRXFLXP_CNTXT_TS_M;
1872
1873 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
1874 }
1875
1876 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
1877 {
1878 if (q_idx >= vsi->num_rxq)
1879 return -EINVAL;
1880
1881 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
1882 }
1883
1884 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
1885 {
1886 struct ice_aqc_add_tx_qgrp *qg_buf;
1887 int err;
1888
1889 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
1890 return -EINVAL;
1891
1892 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
1893 if (!qg_buf)
1894 return -ENOMEM;
1895
1896 qg_buf->num_txqs = 1;
1897
1898 err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
1899 kfree(qg_buf);
1900 return err;
1901 }
1902
1903
1904
1905
1906
1907
1908
1909
1910 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1911 {
1912 u16 i;
1913
1914 if (vsi->type == ICE_VSI_VF)
1915 goto setup_rings;
1916
1917 ice_vsi_cfg_frame_size(vsi);
1918 setup_rings:
1919
1920 ice_for_each_rxq(vsi, i) {
1921 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
1922
1923 if (err)
1924 return err;
1925 }
1926
1927 return 0;
1928 }
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939 static int
1940 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
1941 {
1942 struct ice_aqc_add_tx_qgrp *qg_buf;
1943 u16 q_idx = 0;
1944 int err = 0;
1945
1946 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
1947 if (!qg_buf)
1948 return -ENOMEM;
1949
1950 qg_buf->num_txqs = 1;
1951
1952 for (q_idx = 0; q_idx < count; q_idx++) {
1953 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
1954 if (err)
1955 goto err_cfg_txqs;
1956 }
1957
1958 err_cfg_txqs:
1959 kfree(qg_buf);
1960 return err;
1961 }
1962
1963
1964
1965
1966
1967
1968
1969
1970 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1971 {
1972 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
1973 }
1974
1975
1976
1977
1978
1979
1980
1981
1982 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1983 {
1984 int ret;
1985 int i;
1986
1987 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
1988 if (ret)
1989 return ret;
1990
1991 ice_for_each_rxq(vsi, i)
1992 ice_tx_xsk_pool(vsi, i);
1993
1994 return ret;
1995 }
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
2006 {
2007 u32 val = intrl / gran;
2008
2009 if (val)
2010 return val | GLINT_RATE_INTRL_ENA_M;
2011 return 0;
2012 }
2013
2014
2015
2016
2017
2018
2019 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
2020 {
2021 struct ice_hw *hw = &q_vector->vsi->back->hw;
2022
2023 wr32(hw, GLINT_RATE(q_vector->reg_idx),
2024 ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
2025 }
2026
2027 static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
2028 {
2029 switch (rc->type) {
2030 case ICE_RX_CONTAINER:
2031 if (rc->rx_ring)
2032 return rc->rx_ring->q_vector;
2033 break;
2034 case ICE_TX_CONTAINER:
2035 if (rc->tx_ring)
2036 return rc->tx_ring->q_vector;
2037 break;
2038 default:
2039 break;
2040 }
2041
2042 return NULL;
2043 }
2044
2045
2046
2047
2048
2049
2050
2051 static void __ice_write_itr(struct ice_q_vector *q_vector,
2052 struct ice_ring_container *rc, u16 itr)
2053 {
2054 struct ice_hw *hw = &q_vector->vsi->back->hw;
2055
2056 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
2057 ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S);
2058 }
2059
2060
2061
2062
2063
2064
2065 void ice_write_itr(struct ice_ring_container *rc, u16 itr)
2066 {
2067 struct ice_q_vector *q_vector;
2068
2069 q_vector = ice_pull_qvec_from_rc(rc);
2070 if (!q_vector)
2071 return;
2072
2073 __ice_write_itr(q_vector, rc, itr);
2074 }
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086 void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
2087 {
2088 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
2089
2090
2091
2092
2093
2094
2095 ice_write_intrl(q_vector, 4);
2096 } else {
2097 ice_write_intrl(q_vector, q_vector->intrl);
2098 }
2099 }
2100
2101
2102
2103
2104
2105
2106
2107
2108 void ice_vsi_cfg_msix(struct ice_vsi *vsi)
2109 {
2110 struct ice_pf *pf = vsi->back;
2111 struct ice_hw *hw = &pf->hw;
2112 u16 txq = 0, rxq = 0;
2113 int i, q;
2114
2115 ice_for_each_q_vector(vsi, i) {
2116 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2117 u16 reg_idx = q_vector->reg_idx;
2118
2119 ice_cfg_itr(hw, q_vector);
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132 for (q = 0; q < q_vector->num_ring_tx; q++) {
2133 ice_cfg_txq_interrupt(vsi, txq, reg_idx,
2134 q_vector->tx.itr_idx);
2135 txq++;
2136 }
2137
2138 for (q = 0; q < q_vector->num_ring_rx; q++) {
2139 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
2140 q_vector->rx.itr_idx);
2141 rxq++;
2142 }
2143 }
2144 }
2145
2146
2147
2148
2149
2150
2151
2152 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
2153 {
2154 return ice_vsi_ctrl_all_rx_rings(vsi, true);
2155 }
2156
2157
2158
2159
2160
2161
2162
2163 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
2164 {
2165 return ice_vsi_ctrl_all_rx_rings(vsi, false);
2166 }
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176 static int
2177 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2178 u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
2179 {
2180 u16 q_idx;
2181
2182 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
2183 return -EINVAL;
2184
2185 for (q_idx = 0; q_idx < count; q_idx++) {
2186 struct ice_txq_meta txq_meta = { };
2187 int status;
2188
2189 if (!rings || !rings[q_idx])
2190 return -EINVAL;
2191
2192 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
2193 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
2194 rings[q_idx], &txq_meta);
2195
2196 if (status)
2197 return status;
2198 }
2199
2200 return 0;
2201 }
2202
2203
2204
2205
2206
2207
2208
2209 int
2210 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2211 u16 rel_vmvf_num)
2212 {
2213 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
2214 }
2215
2216
2217
2218
2219
2220 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
2221 {
2222 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
2223 }
2224
2225
2226
2227
2228
2229
2230
2231 bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
2232 {
2233 if (!vsi)
2234 return false;
2235
2236 return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
2237 }
2238
2239 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2240 {
2241 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
2242 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
2243 vsi->tc_cfg.numtc = 1;
2244 return;
2245 }
2246
2247
2248 ice_vsi_set_dcb_tc_cfg(vsi);
2249 }
2250
2251
2252
2253
2254
2255 static int
2256 ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
2257 {
2258 u16 i;
2259
2260 if (!vsi || !vsi->q_vectors)
2261 return -EINVAL;
2262
2263 ice_for_each_q_vector(vsi, i) {
2264 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2265
2266 if (!q_vector) {
2267 dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n",
2268 i, vsi->vsi_num);
2269 goto clear_reg_idx;
2270 }
2271
2272 if (vsi->type == ICE_VSI_VF) {
2273 struct ice_vf *vf = vsi->vf;
2274
2275 q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
2276 } else {
2277 q_vector->reg_idx =
2278 q_vector->v_idx + vsi->base_vector;
2279 }
2280 }
2281
2282 return 0;
2283
2284 clear_reg_idx:
2285 ice_for_each_q_vector(vsi, i) {
2286 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2287
2288 if (q_vector)
2289 q_vector->reg_idx = 0;
2290 }
2291
2292 return -EINVAL;
2293 }
2294
2295
2296
2297
2298
2299
2300
2301 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2302 {
2303 int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
2304 enum ice_sw_fwd_act_type act);
2305 struct ice_pf *pf = vsi->back;
2306 struct device *dev;
2307 int status;
2308
2309 dev = ice_pf_to_dev(pf);
2310 eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
2311
2312 if (tx) {
2313 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
2314 ICE_DROP_PACKET);
2315 } else {
2316 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
2317 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
2318 create);
2319 } else {
2320 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
2321 ICE_FWD_TO_VSI);
2322 }
2323 }
2324
2325 if (status)
2326 dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
2327 create ? "adding" : "removing", tx ? "TX" : "RX",
2328 vsi->vsi_num, status);
2329 }
2330
2331
2332
2333
2334
2335
2336
2337
2338 static void ice_set_agg_vsi(struct ice_vsi *vsi)
2339 {
2340 struct device *dev = ice_pf_to_dev(vsi->back);
2341 struct ice_agg_node *agg_node_iter = NULL;
2342 u32 agg_id = ICE_INVALID_AGG_NODE_ID;
2343 struct ice_agg_node *agg_node = NULL;
2344 int node_offset, max_agg_nodes = 0;
2345 struct ice_port_info *port_info;
2346 struct ice_pf *pf = vsi->back;
2347 u32 agg_node_id_start = 0;
2348 int status;
2349
2350
2351
2352
2353
2354
2355 port_info = pf->hw.port_info;
2356 if (!port_info)
2357 return;
2358
2359 switch (vsi->type) {
2360 case ICE_VSI_CTRL:
2361 case ICE_VSI_CHNL:
2362 case ICE_VSI_LB:
2363 case ICE_VSI_PF:
2364 case ICE_VSI_SWITCHDEV_CTRL:
2365 max_agg_nodes = ICE_MAX_PF_AGG_NODES;
2366 agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
2367 agg_node_iter = &pf->pf_agg_node[0];
2368 break;
2369 case ICE_VSI_VF:
2370
2371
2372
2373
2374
2375
2376 max_agg_nodes = ICE_MAX_VF_AGG_NODES;
2377 agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
2378 agg_node_iter = &pf->vf_agg_node[0];
2379 break;
2380 default:
2381
2382 dev_dbg(dev, "unexpected VSI type %s\n",
2383 ice_vsi_type_str(vsi->type));
2384 return;
2385 }
2386
2387
2388 for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
2389
2390
2391
2392 if (agg_node_iter->num_vsis &&
2393 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
2394 agg_node_iter++;
2395 continue;
2396 }
2397
2398 if (agg_node_iter->valid &&
2399 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
2400 agg_id = agg_node_iter->agg_id;
2401 agg_node = agg_node_iter;
2402 break;
2403 }
2404
2405
2406 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
2407 agg_id = node_offset + agg_node_id_start;
2408 agg_node = agg_node_iter;
2409 break;
2410 }
2411
2412 agg_node_iter++;
2413 }
2414
2415 if (!agg_node)
2416 return;
2417
2418
2419 if (!agg_node->valid) {
2420 status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
2421 (u8)vsi->tc_cfg.ena_tc);
2422 if (status) {
2423 dev_err(dev, "unable to create aggregator node with agg_id %u\n",
2424 agg_id);
2425 return;
2426 }
2427
2428 agg_node->valid = true;
2429 agg_node->agg_id = agg_id;
2430 }
2431
2432
2433 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2434 (u8)vsi->tc_cfg.ena_tc);
2435 if (status) {
2436 dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
2437 vsi->idx, agg_id);
2438 return;
2439 }
2440
2441
2442 agg_node->num_vsis++;
2443
2444
2445
2446
2447 vsi->agg_node = agg_node;
2448 dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
2449 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2450 vsi->agg_node->num_vsis);
2451 }
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467 struct ice_vsi *
2468 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2469 enum ice_vsi_type vsi_type, struct ice_vf *vf,
2470 struct ice_channel *ch)
2471 {
2472 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2473 struct device *dev = ice_pf_to_dev(pf);
2474 struct ice_vsi *vsi;
2475 int ret, i;
2476
2477 if (vsi_type == ICE_VSI_CHNL)
2478 vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL);
2479 else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
2480 vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf);
2481 else
2482 vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL);
2483
2484 if (!vsi) {
2485 dev_err(dev, "could not allocate VSI\n");
2486 return NULL;
2487 }
2488
2489 vsi->port_info = pi;
2490 vsi->vsw = pf->first_sw;
2491 if (vsi->type == ICE_VSI_PF)
2492 vsi->ethtype = ETH_P_PAUSE;
2493
2494 ice_alloc_fd_res(vsi);
2495
2496 if (vsi_type != ICE_VSI_CHNL) {
2497 if (ice_vsi_get_qs(vsi)) {
2498 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2499 vsi->idx);
2500 goto unroll_vsi_alloc;
2501 }
2502 }
2503
2504
2505 ice_vsi_set_rss_params(vsi);
2506
2507
2508 ice_vsi_set_tc_cfg(vsi);
2509
2510
2511 ret = ice_vsi_init(vsi, true);
2512 if (ret)
2513 goto unroll_get_qs;
2514
2515 ice_vsi_init_vlan_ops(vsi);
2516
2517 switch (vsi->type) {
2518 case ICE_VSI_CTRL:
2519 case ICE_VSI_SWITCHDEV_CTRL:
2520 case ICE_VSI_PF:
2521 ret = ice_vsi_alloc_q_vectors(vsi);
2522 if (ret)
2523 goto unroll_vsi_init;
2524
2525 ret = ice_vsi_setup_vector_base(vsi);
2526 if (ret)
2527 goto unroll_alloc_q_vector;
2528
2529 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2530 if (ret)
2531 goto unroll_vector_base;
2532
2533 ret = ice_vsi_alloc_rings(vsi);
2534 if (ret)
2535 goto unroll_vector_base;
2536
2537 ice_vsi_map_rings_to_vectors(vsi);
2538
2539
2540 if (vsi->type != ICE_VSI_CTRL)
2541
2542
2543
2544
2545 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2546 ice_vsi_cfg_rss_lut_key(vsi);
2547 ice_vsi_set_rss_flow_fld(vsi);
2548 }
2549 ice_init_arfs(vsi);
2550 break;
2551 case ICE_VSI_CHNL:
2552 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2553 ice_vsi_cfg_rss_lut_key(vsi);
2554 ice_vsi_set_rss_flow_fld(vsi);
2555 }
2556 break;
2557 case ICE_VSI_VF:
2558
2559
2560
2561
2562
2563 ret = ice_vsi_alloc_q_vectors(vsi);
2564 if (ret)
2565 goto unroll_vsi_init;
2566
2567 ret = ice_vsi_alloc_rings(vsi);
2568 if (ret)
2569 goto unroll_alloc_q_vector;
2570
2571 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2572 if (ret)
2573 goto unroll_vector_base;
2574
2575
2576
2577
2578
2579 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2580 ice_vsi_cfg_rss_lut_key(vsi);
2581 ice_vsi_set_vf_rss_flow_fld(vsi);
2582 }
2583 break;
2584 case ICE_VSI_LB:
2585 ret = ice_vsi_alloc_rings(vsi);
2586 if (ret)
2587 goto unroll_vsi_init;
2588 break;
2589 default:
2590
2591 goto unroll_vsi_init;
2592 }
2593
2594
2595 ice_for_each_traffic_class(i) {
2596 if (!(vsi->tc_cfg.ena_tc & BIT(i)))
2597 continue;
2598
2599 if (vsi->type == ICE_VSI_CHNL) {
2600 if (!vsi->alloc_txq && vsi->num_txq)
2601 max_txqs[i] = vsi->num_txq;
2602 else
2603 max_txqs[i] = pf->num_lan_tx;
2604 } else {
2605 max_txqs[i] = vsi->alloc_txq;
2606 }
2607 }
2608
2609 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
2610 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2611 max_txqs);
2612 if (ret) {
2613 dev_err(dev, "VSI %d failed lan queue config, error %d\n",
2614 vsi->vsi_num, ret);
2615 goto unroll_clear_rings;
2616 }
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627 if (!ice_is_safe_mode(pf))
2628 if (vsi->type == ICE_VSI_PF) {
2629 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
2630 ICE_DROP_PACKET);
2631 ice_cfg_sw_lldp(vsi, true, true);
2632 }
2633
2634 if (!vsi->agg_node)
2635 ice_set_agg_vsi(vsi);
2636 return vsi;
2637
2638 unroll_clear_rings:
2639 ice_vsi_clear_rings(vsi);
2640 unroll_vector_base:
2641
2642 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2643 pf->num_avail_sw_msix += vsi->num_q_vectors;
2644 unroll_alloc_q_vector:
2645 ice_vsi_free_q_vectors(vsi);
2646 unroll_vsi_init:
2647 ice_vsi_delete(vsi);
2648 unroll_get_qs:
2649 ice_vsi_put_qs(vsi);
2650 unroll_vsi_alloc:
2651 if (vsi_type == ICE_VSI_VF)
2652 ice_enable_lag(pf->lag);
2653 ice_vsi_clear(vsi);
2654
2655 return NULL;
2656 }
2657
2658
2659
2660
2661
2662 static void ice_vsi_release_msix(struct ice_vsi *vsi)
2663 {
2664 struct ice_pf *pf = vsi->back;
2665 struct ice_hw *hw = &pf->hw;
2666 u32 txq = 0;
2667 u32 rxq = 0;
2668 int i, q;
2669
2670 ice_for_each_q_vector(vsi, i) {
2671 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2672
2673 ice_write_intrl(q_vector, 0);
2674 for (q = 0; q < q_vector->num_ring_tx; q++) {
2675 ice_write_itr(&q_vector->tx, 0);
2676 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2677 if (ice_is_xdp_ena_vsi(vsi)) {
2678 u32 xdp_txq = txq + vsi->num_xdp_txq;
2679
2680 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2681 }
2682 txq++;
2683 }
2684
2685 for (q = 0; q < q_vector->num_ring_rx; q++) {
2686 ice_write_itr(&q_vector->rx, 0);
2687 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2688 rxq++;
2689 }
2690 }
2691
2692 ice_flush(hw);
2693 }
2694
2695
2696
2697
2698
2699 void ice_vsi_free_irq(struct ice_vsi *vsi)
2700 {
2701 struct ice_pf *pf = vsi->back;
2702 int base = vsi->base_vector;
2703 int i;
2704
2705 if (!vsi->q_vectors || !vsi->irqs_ready)
2706 return;
2707
2708 ice_vsi_release_msix(vsi);
2709 if (vsi->type == ICE_VSI_VF)
2710 return;
2711
2712 vsi->irqs_ready = false;
2713 ice_free_cpu_rx_rmap(vsi);
2714
2715 ice_for_each_q_vector(vsi, i) {
2716 u16 vector = i + base;
2717 int irq_num;
2718
2719 irq_num = pf->msix_entries[vector].vector;
2720
2721
2722 if (!vsi->q_vectors[i] ||
2723 !(vsi->q_vectors[i]->num_ring_tx ||
2724 vsi->q_vectors[i]->num_ring_rx))
2725 continue;
2726
2727
2728 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2729 irq_set_affinity_notifier(irq_num, NULL);
2730
2731
2732 irq_set_affinity_hint(irq_num, NULL);
2733 synchronize_irq(irq_num);
2734 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2735 }
2736 }
2737
2738
2739
2740
2741
2742 void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2743 {
2744 int i;
2745
2746 if (!vsi->tx_rings)
2747 return;
2748
2749 ice_for_each_txq(vsi, i)
2750 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2751 ice_free_tx_ring(vsi->tx_rings[i]);
2752 }
2753
2754
2755
2756
2757
2758 void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2759 {
2760 int i;
2761
2762 if (!vsi->rx_rings)
2763 return;
2764
2765 ice_for_each_rxq(vsi, i)
2766 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2767 ice_free_rx_ring(vsi->rx_rings[i]);
2768 }
2769
2770
2771
2772
2773
2774 void ice_vsi_close(struct ice_vsi *vsi)
2775 {
2776 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
2777 ice_down(vsi);
2778
2779 ice_vsi_free_irq(vsi);
2780 ice_vsi_free_tx_rings(vsi);
2781 ice_vsi_free_rx_rings(vsi);
2782 }
2783
2784
2785
2786
2787
2788
2789 int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
2790 {
2791 int err = 0;
2792
2793 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
2794 return 0;
2795
2796 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2797
2798 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
2799 if (netif_running(vsi->netdev)) {
2800 if (!locked)
2801 rtnl_lock();
2802
2803 err = ice_open_internal(vsi->netdev);
2804
2805 if (!locked)
2806 rtnl_unlock();
2807 }
2808 } else if (vsi->type == ICE_VSI_CTRL) {
2809 err = ice_vsi_open_ctrl(vsi);
2810 }
2811
2812 return err;
2813 }
2814
2815
2816
2817
2818
2819
2820 void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
2821 {
2822 if (test_bit(ICE_VSI_DOWN, vsi->state))
2823 return;
2824
2825 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2826
2827 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
2828 if (netif_running(vsi->netdev)) {
2829 if (!locked)
2830 rtnl_lock();
2831
2832 ice_vsi_close(vsi);
2833
2834 if (!locked)
2835 rtnl_unlock();
2836 } else {
2837 ice_vsi_close(vsi);
2838 }
2839 } else if (vsi->type == ICE_VSI_CTRL ||
2840 vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
2841 ice_vsi_close(vsi);
2842 }
2843 }
2844
2845
2846
2847
2848
2849 void ice_vsi_dis_irq(struct ice_vsi *vsi)
2850 {
2851 int base = vsi->base_vector;
2852 struct ice_pf *pf = vsi->back;
2853 struct ice_hw *hw = &pf->hw;
2854 u32 val;
2855 int i;
2856
2857
2858 if (vsi->tx_rings) {
2859 ice_for_each_txq(vsi, i) {
2860 if (vsi->tx_rings[i]) {
2861 u16 reg;
2862
2863 reg = vsi->tx_rings[i]->reg_idx;
2864 val = rd32(hw, QINT_TQCTL(reg));
2865 val &= ~QINT_TQCTL_CAUSE_ENA_M;
2866 wr32(hw, QINT_TQCTL(reg), val);
2867 }
2868 }
2869 }
2870
2871 if (vsi->rx_rings) {
2872 ice_for_each_rxq(vsi, i) {
2873 if (vsi->rx_rings[i]) {
2874 u16 reg;
2875
2876 reg = vsi->rx_rings[i]->reg_idx;
2877 val = rd32(hw, QINT_RQCTL(reg));
2878 val &= ~QINT_RQCTL_CAUSE_ENA_M;
2879 wr32(hw, QINT_RQCTL(reg), val);
2880 }
2881 }
2882 }
2883
2884
2885 ice_for_each_q_vector(vsi, i) {
2886 if (!vsi->q_vectors[i])
2887 continue;
2888 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2889 }
2890
2891 ice_flush(hw);
2892
2893
2894 if (vsi->type == ICE_VSI_VF)
2895 return;
2896
2897 ice_for_each_q_vector(vsi, i)
2898 synchronize_irq(pf->msix_entries[i + base].vector);
2899 }
2900
2901
2902
2903
2904
2905 void ice_napi_del(struct ice_vsi *vsi)
2906 {
2907 int v_idx;
2908
2909 if (!vsi->netdev)
2910 return;
2911
2912 ice_for_each_q_vector(vsi, v_idx)
2913 netif_napi_del(&vsi->q_vectors[v_idx]->napi);
2914 }
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925 static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
2926 {
2927 struct ice_vf *vf;
2928 unsigned int bkt;
2929
2930 rcu_read_lock();
2931 ice_for_each_vf_rcu(pf, bkt, vf) {
2932 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
2933 rcu_read_unlock();
2934 return;
2935 }
2936 }
2937 rcu_read_unlock();
2938
2939
2940
2941
2942 ice_free_res(pf->irq_tracker, vsi->base_vector,
2943 ICE_RES_VF_CTRL_VEC_ID);
2944 pf->num_avail_sw_msix += vsi->num_q_vectors;
2945 }
2946
2947
2948
2949
2950
2951
2952
2953 int ice_vsi_release(struct ice_vsi *vsi)
2954 {
2955 struct ice_pf *pf;
2956 int err;
2957
2958 if (!vsi->back)
2959 return -ENODEV;
2960 pf = vsi->back;
2961
2962
2963
2964
2965
2966
2967
2968 if (vsi->netdev && !ice_is_reset_in_progress(pf->state) &&
2969 (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state))) {
2970 unregister_netdev(vsi->netdev);
2971 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
2972 }
2973
2974 if (vsi->type == ICE_VSI_PF)
2975 ice_devlink_destroy_pf_port(pf);
2976
2977 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2978 ice_rss_clean(vsi);
2979
2980
2981 if (vsi->type != ICE_VSI_LB)
2982 ice_vsi_dis_irq(vsi);
2983 ice_vsi_close(vsi);
2984
2985
2986
2987
2988
2989
2990 if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
2991 ice_free_vf_ctrl_res(pf, vsi);
2992 } else if (vsi->type != ICE_VSI_VF) {
2993
2994 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2995 pf->num_avail_sw_msix += vsi->num_q_vectors;
2996 }
2997
2998 if (!ice_is_safe_mode(pf)) {
2999 if (vsi->type == ICE_VSI_PF) {
3000 ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
3001 ICE_DROP_PACKET);
3002 ice_cfg_sw_lldp(vsi, true, false);
3003
3004
3005
3006 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
3007 ice_cfg_sw_lldp(vsi, false, false);
3008 }
3009 }
3010
3011 if (ice_is_vsi_dflt_vsi(vsi))
3012 ice_clear_dflt_vsi(vsi);
3013 ice_fltr_remove_all(vsi);
3014 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
3015 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
3016 if (err)
3017 dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
3018 vsi->vsi_num, err);
3019 ice_vsi_delete(vsi);
3020 ice_vsi_free_q_vectors(vsi);
3021
3022 if (vsi->netdev) {
3023 if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) {
3024 unregister_netdev(vsi->netdev);
3025 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
3026 }
3027 if (test_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state)) {
3028 free_netdev(vsi->netdev);
3029 vsi->netdev = NULL;
3030 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3031 }
3032 }
3033
3034 if (vsi->type == ICE_VSI_VF &&
3035 vsi->agg_node && vsi->agg_node->valid)
3036 vsi->agg_node->num_vsis--;
3037 ice_vsi_clear_rings(vsi);
3038
3039 ice_vsi_put_qs(vsi);
3040
3041
3042
3043
3044
3045 if (!ice_is_reset_in_progress(pf->state))
3046 ice_vsi_clear(vsi);
3047
3048 return 0;
3049 }
3050
3051
3052
3053
3054
3055
3056
3057
3058 static int
3059 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
3060 struct ice_coalesce_stored *coalesce)
3061 {
3062 int i;
3063
3064 ice_for_each_q_vector(vsi, i) {
3065 struct ice_q_vector *q_vector = vsi->q_vectors[i];
3066
3067 coalesce[i].itr_tx = q_vector->tx.itr_settings;
3068 coalesce[i].itr_rx = q_vector->rx.itr_settings;
3069 coalesce[i].intrl = q_vector->intrl;
3070
3071 if (i < vsi->num_txq)
3072 coalesce[i].tx_valid = true;
3073 if (i < vsi->num_rxq)
3074 coalesce[i].rx_valid = true;
3075 }
3076
3077 return vsi->num_q_vectors;
3078 }
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090 static void
3091 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
3092 struct ice_coalesce_stored *coalesce, int size)
3093 {
3094 struct ice_ring_container *rc;
3095 int i;
3096
3097 if ((size && !coalesce) || !vsi)
3098 return;
3099
3100
3101
3102
3103
3104
3105
3106 for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
3123 rc = &vsi->q_vectors[i]->rx;
3124 rc->itr_settings = coalesce[i].itr_rx;
3125 ice_write_itr(rc, rc->itr_setting);
3126 } else if (i < vsi->alloc_rxq) {
3127 rc = &vsi->q_vectors[i]->rx;
3128 rc->itr_settings = coalesce[0].itr_rx;
3129 ice_write_itr(rc, rc->itr_setting);
3130 }
3131
3132 if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
3133 rc = &vsi->q_vectors[i]->tx;
3134 rc->itr_settings = coalesce[i].itr_tx;
3135 ice_write_itr(rc, rc->itr_setting);
3136 } else if (i < vsi->alloc_txq) {
3137 rc = &vsi->q_vectors[i]->tx;
3138 rc->itr_settings = coalesce[0].itr_tx;
3139 ice_write_itr(rc, rc->itr_setting);
3140 }
3141
3142 vsi->q_vectors[i]->intrl = coalesce[i].intrl;
3143 ice_set_q_vector_intrl(vsi->q_vectors[i]);
3144 }
3145
3146
3147
3148
3149 for (; i < vsi->num_q_vectors; i++) {
3150
3151 rc = &vsi->q_vectors[i]->tx;
3152 rc->itr_settings = coalesce[0].itr_tx;
3153 ice_write_itr(rc, rc->itr_setting);
3154
3155
3156 rc = &vsi->q_vectors[i]->rx;
3157 rc->itr_settings = coalesce[0].itr_rx;
3158 ice_write_itr(rc, rc->itr_setting);
3159
3160 vsi->q_vectors[i]->intrl = coalesce[0].intrl;
3161 ice_set_q_vector_intrl(vsi->q_vectors[i]);
3162 }
3163 }
3164
3165
3166
3167
3168
3169
3170
3171
3172 int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
3173 {
3174 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3175 struct ice_coalesce_stored *coalesce;
3176 int prev_num_q_vectors = 0;
3177 enum ice_vsi_type vtype;
3178 struct ice_pf *pf;
3179 int ret, i;
3180
3181 if (!vsi)
3182 return -EINVAL;
3183
3184 pf = vsi->back;
3185 vtype = vsi->type;
3186 if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
3187 return -EINVAL;
3188
3189 ice_vsi_init_vlan_ops(vsi);
3190
3191 coalesce = kcalloc(vsi->num_q_vectors,
3192 sizeof(struct ice_coalesce_stored), GFP_KERNEL);
3193 if (!coalesce)
3194 return -ENOMEM;
3195
3196 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
3197
3198 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
3199 ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
3200 if (ret)
3201 dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
3202 vsi->vsi_num, ret);
3203 ice_vsi_free_q_vectors(vsi);
3204
3205
3206
3207
3208
3209
3210 if (vtype != ICE_VSI_VF) {
3211
3212 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
3213 pf->num_avail_sw_msix += vsi->num_q_vectors;
3214 vsi->base_vector = 0;
3215 }
3216
3217 if (ice_is_xdp_ena_vsi(vsi))
3218
3219
3220
3221 ice_destroy_xdp_rings(vsi);
3222 ice_vsi_put_qs(vsi);
3223 ice_vsi_clear_rings(vsi);
3224 ice_vsi_free_arrays(vsi);
3225 if (vtype == ICE_VSI_VF)
3226 ice_vsi_set_num_qs(vsi, vsi->vf);
3227 else
3228 ice_vsi_set_num_qs(vsi, NULL);
3229
3230 ret = ice_vsi_alloc_arrays(vsi);
3231 if (ret < 0)
3232 goto err_vsi;
3233
3234 ice_vsi_get_qs(vsi);
3235
3236 ice_alloc_fd_res(vsi);
3237 ice_vsi_set_tc_cfg(vsi);
3238
3239
3240 ret = ice_vsi_init(vsi, init_vsi);
3241 if (ret < 0)
3242 goto err_vsi;
3243
3244 switch (vtype) {
3245 case ICE_VSI_CTRL:
3246 case ICE_VSI_SWITCHDEV_CTRL:
3247 case ICE_VSI_PF:
3248 ret = ice_vsi_alloc_q_vectors(vsi);
3249 if (ret)
3250 goto err_rings;
3251
3252 ret = ice_vsi_setup_vector_base(vsi);
3253 if (ret)
3254 goto err_vectors;
3255
3256 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
3257 if (ret)
3258 goto err_vectors;
3259
3260 ret = ice_vsi_alloc_rings(vsi);
3261 if (ret)
3262 goto err_vectors;
3263
3264 ice_vsi_map_rings_to_vectors(vsi);
3265 if (ice_is_xdp_ena_vsi(vsi)) {
3266 ret = ice_vsi_determine_xdp_res(vsi);
3267 if (ret)
3268 goto err_vectors;
3269 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
3270 if (ret)
3271 goto err_vectors;
3272 }
3273
3274 if (vtype != ICE_VSI_CTRL)
3275
3276
3277
3278
3279 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3280 ice_vsi_cfg_rss_lut_key(vsi);
3281 break;
3282 case ICE_VSI_VF:
3283 ret = ice_vsi_alloc_q_vectors(vsi);
3284 if (ret)
3285 goto err_rings;
3286
3287 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
3288 if (ret)
3289 goto err_vectors;
3290
3291 ret = ice_vsi_alloc_rings(vsi);
3292 if (ret)
3293 goto err_vectors;
3294
3295 break;
3296 case ICE_VSI_CHNL:
3297 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
3298 ice_vsi_cfg_rss_lut_key(vsi);
3299 ice_vsi_set_rss_flow_fld(vsi);
3300 }
3301 break;
3302 default:
3303 break;
3304 }
3305
3306
3307 for (i = 0; i < vsi->tc_cfg.numtc; i++) {
3308
3309
3310
3311
3312
3313
3314 if (vtype == ICE_VSI_CHNL)
3315 max_txqs[i] = pf->num_lan_tx;
3316 else
3317 max_txqs[i] = vsi->alloc_txq;
3318
3319 if (ice_is_xdp_ena_vsi(vsi))
3320 max_txqs[i] += vsi->num_xdp_txq;
3321 }
3322
3323 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3324
3325
3326
3327 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
3328 else
3329 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
3330 vsi->tc_cfg.ena_tc, max_txqs);
3331
3332 if (ret) {
3333 dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
3334 vsi->vsi_num, ret);
3335 if (init_vsi) {
3336 ret = -EIO;
3337 goto err_vectors;
3338 } else {
3339 return ice_schedule_reset(pf, ICE_RESET_PFR);
3340 }
3341 }
3342 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
3343 kfree(coalesce);
3344
3345 return 0;
3346
3347 err_vectors:
3348 ice_vsi_free_q_vectors(vsi);
3349 err_rings:
3350 if (vsi->netdev) {
3351 vsi->current_netdev_flags = 0;
3352 unregister_netdev(vsi->netdev);
3353 free_netdev(vsi->netdev);
3354 vsi->netdev = NULL;
3355 }
3356 err_vsi:
3357 ice_vsi_clear(vsi);
3358 set_bit(ICE_RESET_FAILED, pf->state);
3359 kfree(coalesce);
3360 return ret;
3361 }
3362
3363
3364
3365
3366
3367 bool ice_is_reset_in_progress(unsigned long *state)
3368 {
3369 return test_bit(ICE_RESET_OICR_RECV, state) ||
3370 test_bit(ICE_PFR_REQ, state) ||
3371 test_bit(ICE_CORER_REQ, state) ||
3372 test_bit(ICE_GLOBR_REQ, state);
3373 }
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
3389 {
3390 long ret;
3391
3392 ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
3393 !ice_is_reset_in_progress(pf->state),
3394 timeout);
3395 if (ret < 0)
3396 return ret;
3397 else if (!ret)
3398 return -EBUSY;
3399 else
3400 return 0;
3401 }
3402
3403
3404
3405
3406
3407
3408 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3409 {
3410 vsi->info.mapping_flags = ctx->info.mapping_flags;
3411 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3412 sizeof(vsi->info.q_mapping));
3413 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3414 sizeof(vsi->info.tc_mapping));
3415 }
3416
3417
3418
3419
3420
3421
3422 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
3423 {
3424 struct net_device *netdev = vsi->netdev;
3425 struct ice_pf *pf = vsi->back;
3426 int numtc = vsi->tc_cfg.numtc;
3427 struct ice_dcbx_cfg *dcbcfg;
3428 u8 netdev_tc;
3429 int i;
3430
3431 if (!netdev)
3432 return;
3433
3434
3435 if (vsi->type == ICE_VSI_CHNL)
3436 return;
3437
3438 if (!ena_tc) {
3439 netdev_reset_tc(netdev);
3440 return;
3441 }
3442
3443 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
3444 numtc = vsi->all_numtc;
3445
3446 if (netdev_set_num_tc(netdev, numtc))
3447 return;
3448
3449 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
3450
3451 ice_for_each_traffic_class(i)
3452 if (vsi->tc_cfg.ena_tc & BIT(i))
3453 netdev_set_tc_queue(netdev,
3454 vsi->tc_cfg.tc_info[i].netdev_tc,
3455 vsi->tc_cfg.tc_info[i].qcount_tx,
3456 vsi->tc_cfg.tc_info[i].qoffset);
3457
3458 ice_for_each_chnl_tc(i) {
3459 if (!(vsi->all_enatc & BIT(i)))
3460 break;
3461 if (!vsi->mqprio_qopt.qopt.count[i])
3462 break;
3463 netdev_set_tc_queue(netdev, i,
3464 vsi->mqprio_qopt.qopt.count[i],
3465 vsi->mqprio_qopt.qopt.offset[i]);
3466 }
3467
3468 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3469 return;
3470
3471 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
3472 u8 ets_tc = dcbcfg->etscfg.prio_table[i];
3473
3474
3475 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
3476 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3477 }
3478 }
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488 static int
3489 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
3490 u8 ena_tc)
3491 {
3492 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
3493 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
3494 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
3495 u16 new_txq, new_rxq;
3496 u8 netdev_tc = 0;
3497 int i;
3498
3499 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
3500
3501 pow = order_base_2(tc0_qcount);
3502 qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
3503 ICE_AQ_VSI_TC_Q_OFFSET_M) |
3504 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
3505
3506 ice_for_each_traffic_class(i) {
3507 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
3508
3509 vsi->tc_cfg.tc_info[i].qoffset = 0;
3510 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
3511 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
3512 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
3513 ctxt->info.tc_mapping[i] = 0;
3514 continue;
3515 }
3516
3517 offset = vsi->mqprio_qopt.qopt.offset[i];
3518 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3519 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3520 vsi->tc_cfg.tc_info[i].qoffset = offset;
3521 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
3522 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
3523 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
3524 }
3525
3526 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
3527 ice_for_each_chnl_tc(i) {
3528 if (!(vsi->all_enatc & BIT(i)))
3529 continue;
3530 offset = vsi->mqprio_qopt.qopt.offset[i];
3531 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3532 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3533 }
3534 }
3535
3536 new_txq = offset + qcount_tx;
3537 if (new_txq > vsi->alloc_txq) {
3538 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
3539 new_txq, vsi->alloc_txq);
3540 return -EINVAL;
3541 }
3542
3543 new_rxq = offset + qcount_rx;
3544 if (new_rxq > vsi->alloc_rxq) {
3545 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
3546 new_rxq, vsi->alloc_rxq);
3547 return -EINVAL;
3548 }
3549
3550
3551 vsi->num_txq = new_txq;
3552 vsi->num_rxq = new_rxq;
3553
3554
3555 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
3556 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
3557 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
3558
3559
3560
3561
3562 if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
3563 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
3564 vsi->next_base_q = tc0_qcount;
3565 }
3566 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq);
3567 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
3568 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
3569 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
3570
3571 return 0;
3572 }
3573
3574
3575
3576
3577
3578
3579
3580
3581 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3582 {
3583 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3584 struct ice_pf *pf = vsi->back;
3585 struct ice_tc_cfg old_tc_cfg;
3586 struct ice_vsi_ctx *ctx;
3587 struct device *dev;
3588 int i, ret = 0;
3589 u8 num_tc = 0;
3590
3591 dev = ice_pf_to_dev(pf);
3592 if (vsi->tc_cfg.ena_tc == ena_tc &&
3593 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
3594 return ret;
3595
3596 ice_for_each_traffic_class(i) {
3597
3598 if (ena_tc & BIT(i))
3599 num_tc++;
3600
3601 max_txqs[i] = vsi->alloc_txq;
3602
3603
3604
3605 if (vsi->type == ICE_VSI_CHNL &&
3606 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3607 max_txqs[i] = vsi->num_txq;
3608 }
3609
3610 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
3611 vsi->tc_cfg.ena_tc = ena_tc;
3612 vsi->tc_cfg.numtc = num_tc;
3613
3614 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3615 if (!ctx)
3616 return -ENOMEM;
3617
3618 ctx->vf_num = 0;
3619 ctx->info = vsi->info;
3620
3621 if (vsi->type == ICE_VSI_PF &&
3622 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3623 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
3624 else
3625 ret = ice_vsi_setup_q_map(vsi, ctx);
3626
3627 if (ret) {
3628 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
3629 goto out;
3630 }
3631
3632
3633 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3634 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3635 if (ret) {
3636 dev_info(dev, "Failed VSI Update\n");
3637 goto out;
3638 }
3639
3640 if (vsi->type == ICE_VSI_PF &&
3641 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3642 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
3643 else
3644 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
3645 vsi->tc_cfg.ena_tc, max_txqs);
3646
3647 if (ret) {
3648 dev_err(dev, "VSI %d failed TC config, error %d\n",
3649 vsi->vsi_num, ret);
3650 goto out;
3651 }
3652 ice_vsi_update_q_map(vsi, ctx);
3653 vsi->info.valid_sections = 0;
3654
3655 ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3656 out:
3657 kfree(ctx);
3658 return ret;
3659 }
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669 static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
3670 {
3671 stats->bytes += bytes;
3672 stats->pkts += pkts;
3673 }
3674
3675
3676
3677
3678
3679
3680
3681 void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
3682 {
3683 u64_stats_update_begin(&tx_ring->syncp);
3684 ice_update_ring_stats(&tx_ring->stats, pkts, bytes);
3685 u64_stats_update_end(&tx_ring->syncp);
3686 }
3687
3688
3689
3690
3691
3692
3693
3694 void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
3695 {
3696 u64_stats_update_begin(&rx_ring->syncp);
3697 ice_update_ring_stats(&rx_ring->stats, pkts, bytes);
3698 u64_stats_update_end(&rx_ring->syncp);
3699 }
3700
3701
3702
3703
3704
3705
3706
3707 bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
3708 {
3709 bool exists = false;
3710
3711 ice_check_if_dflt_vsi(pi, 0, &exists);
3712 return exists;
3713 }
3714
3715
3716
3717
3718
3719
3720
3721
3722 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
3723 {
3724 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
3725 }
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737 int ice_set_dflt_vsi(struct ice_vsi *vsi)
3738 {
3739 struct device *dev;
3740 int status;
3741
3742 if (!vsi)
3743 return -EINVAL;
3744
3745 dev = ice_pf_to_dev(vsi->back);
3746
3747
3748 if (ice_is_vsi_dflt_vsi(vsi)) {
3749 dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
3750 vsi->vsi_num);
3751 return 0;
3752 }
3753
3754 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
3755 if (status) {
3756 dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
3757 vsi->vsi_num, status);
3758 return status;
3759 }
3760
3761 return 0;
3762 }
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772 int ice_clear_dflt_vsi(struct ice_vsi *vsi)
3773 {
3774 struct device *dev;
3775 int status;
3776
3777 if (!vsi)
3778 return -EINVAL;
3779
3780 dev = ice_pf_to_dev(vsi->back);
3781
3782
3783 if (!ice_is_dflt_vsi_in_use(vsi->port_info))
3784 return -ENODEV;
3785
3786 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
3787 ICE_FLTR_RX);
3788 if (status) {
3789 dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
3790 vsi->vsi_num, status);
3791 return -EIO;
3792 }
3793
3794 return 0;
3795 }
3796
3797
3798
3799
3800
3801
3802
3803 int ice_get_link_speed_mbps(struct ice_vsi *vsi)
3804 {
3805 switch (vsi->port_info->phy.link_info.link_speed) {
3806 case ICE_AQ_LINK_SPEED_100GB:
3807 return SPEED_100000;
3808 case ICE_AQ_LINK_SPEED_50GB:
3809 return SPEED_50000;
3810 case ICE_AQ_LINK_SPEED_40GB:
3811 return SPEED_40000;
3812 case ICE_AQ_LINK_SPEED_25GB:
3813 return SPEED_25000;
3814 case ICE_AQ_LINK_SPEED_20GB:
3815 return SPEED_20000;
3816 case ICE_AQ_LINK_SPEED_10GB:
3817 return SPEED_10000;
3818 case ICE_AQ_LINK_SPEED_5GB:
3819 return SPEED_5000;
3820 case ICE_AQ_LINK_SPEED_2500MB:
3821 return SPEED_2500;
3822 case ICE_AQ_LINK_SPEED_1000MB:
3823 return SPEED_1000;
3824 case ICE_AQ_LINK_SPEED_100MB:
3825 return SPEED_100;
3826 case ICE_AQ_LINK_SPEED_10MB:
3827 return SPEED_10;
3828 case ICE_AQ_LINK_SPEED_UNKNOWN:
3829 default:
3830 return 0;
3831 }
3832 }
3833
3834
3835
3836
3837
3838
3839
3840 int ice_get_link_speed_kbps(struct ice_vsi *vsi)
3841 {
3842 int speed_mbps;
3843
3844 speed_mbps = ice_get_link_speed_mbps(vsi);
3845
3846 return speed_mbps * 1000;
3847 }
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
3859 {
3860 struct ice_pf *pf = vsi->back;
3861 struct device *dev;
3862 int status;
3863 int speed;
3864
3865 dev = ice_pf_to_dev(pf);
3866 if (!vsi->port_info) {
3867 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3868 vsi->idx, vsi->type);
3869 return -EINVAL;
3870 }
3871
3872 speed = ice_get_link_speed_kbps(vsi);
3873 if (min_tx_rate > (u64)speed) {
3874 dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3875 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3876 speed);
3877 return -EINVAL;
3878 }
3879
3880
3881 if (min_tx_rate) {
3882 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3883 ICE_MIN_BW, min_tx_rate);
3884 if (status) {
3885 dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
3886 min_tx_rate, ice_vsi_type_str(vsi->type),
3887 vsi->idx);
3888 return status;
3889 }
3890
3891 dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
3892 min_tx_rate, ice_vsi_type_str(vsi->type));
3893 } else {
3894 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3895 vsi->idx, 0,
3896 ICE_MIN_BW);
3897 if (status) {
3898 dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
3899 ice_vsi_type_str(vsi->type), vsi->idx);
3900 return status;
3901 }
3902
3903 dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
3904 ice_vsi_type_str(vsi->type), vsi->idx);
3905 }
3906
3907 return 0;
3908 }
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
3920 {
3921 struct ice_pf *pf = vsi->back;
3922 struct device *dev;
3923 int status;
3924 int speed;
3925
3926 dev = ice_pf_to_dev(pf);
3927 if (!vsi->port_info) {
3928 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3929 vsi->idx, vsi->type);
3930 return -EINVAL;
3931 }
3932
3933 speed = ice_get_link_speed_kbps(vsi);
3934 if (max_tx_rate > (u64)speed) {
3935 dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3936 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3937 speed);
3938 return -EINVAL;
3939 }
3940
3941
3942 if (max_tx_rate) {
3943 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3944 ICE_MAX_BW, max_tx_rate);
3945 if (status) {
3946 dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
3947 max_tx_rate, ice_vsi_type_str(vsi->type),
3948 vsi->idx);
3949 return status;
3950 }
3951
3952 dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
3953 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
3954 } else {
3955 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3956 vsi->idx, 0,
3957 ICE_MAX_BW);
3958 if (status) {
3959 dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
3960 ice_vsi_type_str(vsi->type), vsi->idx);
3961 return status;
3962 }
3963
3964 dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
3965 ice_vsi_type_str(vsi->type), vsi->idx);
3966 }
3967
3968 return 0;
3969 }
3970
3971
3972
3973
3974
3975
3976 int ice_set_link(struct ice_vsi *vsi, bool ena)
3977 {
3978 struct device *dev = ice_pf_to_dev(vsi->back);
3979 struct ice_port_info *pi = vsi->port_info;
3980 struct ice_hw *hw = pi->hw;
3981 int status;
3982
3983 if (vsi->type != ICE_VSI_PF)
3984 return -EINVAL;
3985
3986 status = ice_aq_set_link_restart_an(pi, ena, NULL);
3987
3988
3989
3990
3991
3992
3993 if (status == -EIO) {
3994 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3995 dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
3996 (ena ? "ON" : "OFF"), status,
3997 ice_aq_str(hw->adminq.sq_last_status));
3998 } else if (status) {
3999 dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
4000 (ena ? "ON" : "OFF"), status,
4001 ice_aq_str(hw->adminq.sq_last_status));
4002 return status;
4003 }
4004
4005 return 0;
4006 }
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
4026 {
4027 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
4028 struct ice_vlan vlan;
4029 int err;
4030
4031 vlan = ICE_VLAN(0, 0, 0);
4032 err = vlan_ops->add_vlan(vsi, &vlan);
4033 if (err && err != -EEXIST)
4034 return err;
4035
4036
4037 if (!ice_is_dvm_ena(&vsi->back->hw))
4038 return 0;
4039
4040 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
4041 err = vlan_ops->add_vlan(vsi, &vlan);
4042 if (err && err != -EEXIST)
4043 return err;
4044
4045 return 0;
4046 }
4047
4048
4049
4050
4051
4052
4053
4054
4055 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
4056 {
4057 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
4058 struct ice_vlan vlan;
4059 int err;
4060
4061 vlan = ICE_VLAN(0, 0, 0);
4062 err = vlan_ops->del_vlan(vsi, &vlan);
4063 if (err && err != -EEXIST)
4064 return err;
4065
4066
4067 if (!ice_is_dvm_ena(&vsi->back->hw))
4068 return 0;
4069
4070 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
4071 err = vlan_ops->del_vlan(vsi, &vlan);
4072 if (err && err != -EEXIST)
4073 return err;
4074
4075
4076
4077
4078 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
4079 ICE_MCAST_VLAN_PROMISC_BITS, 0);
4080 }
4081
4082
4083
4084
4085
4086
4087
4088
4089 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
4090 {
4091 #define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2
4092 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1
4093
4094 if (vsi->type == ICE_VSI_VF) {
4095 if (WARN_ON(!vsi->vf))
4096 return 0;
4097
4098 if (ice_vf_is_port_vlan_ena(vsi->vf))
4099 return 0;
4100 }
4101
4102 if (ice_is_dvm_ena(&vsi->back->hw))
4103 return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
4104 else
4105 return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
4106 }
4107
4108
4109
4110
4111
4112 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
4113 {
4114 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
4115 }
4116
4117
4118
4119
4120
4121 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
4122 {
4123 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
4124 }
4125
4126
4127
4128
4129
4130
4131
4132
4133 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
4134 {
4135 if (f < 0 || f >= ICE_F_MAX)
4136 return false;
4137
4138 return test_bit(f, pf->features);
4139 }
4140
4141
4142
4143
4144
4145
4146 static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
4147 {
4148 if (f < 0 || f >= ICE_F_MAX)
4149 return;
4150
4151 set_bit(f, pf->features);
4152 }
4153
4154
4155
4156
4157
4158
4159 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
4160 {
4161 if (f < 0 || f >= ICE_F_MAX)
4162 return;
4163
4164 clear_bit(f, pf->features);
4165 }
4166
4167
4168
4169
4170
4171
4172
4173 void ice_init_feature_support(struct ice_pf *pf)
4174 {
4175 switch (pf->hw.device_id) {
4176 case ICE_DEV_ID_E810C_BACKPLANE:
4177 case ICE_DEV_ID_E810C_QSFP:
4178 case ICE_DEV_ID_E810C_SFP:
4179 ice_set_feature_support(pf, ICE_F_DSCP);
4180 ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
4181 if (ice_is_e810t(&pf->hw)) {
4182 ice_set_feature_support(pf, ICE_F_SMA_CTRL);
4183 if (ice_gnss_is_gps_present(&pf->hw))
4184 ice_set_feature_support(pf, ICE_F_GNSS);
4185 }
4186 break;
4187 default:
4188 break;
4189 }
4190 }
4191
4192
4193
4194
4195
4196
4197 int
4198 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
4199 {
4200 struct ice_vsi_ctx ctx = { 0 };
4201
4202 ctx.info = vsi->info;
4203 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
4204 fill(&ctx);
4205
4206 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
4207 return -ENODEV;
4208
4209 vsi->info = ctx.info;
4210 return 0;
4211 }
4212
4213
4214
4215
4216
4217 void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
4218 {
4219 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
4220 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4221 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4222 }
4223
4224
4225
4226
4227
4228 void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
4229 {
4230 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
4231 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4232 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4233 }
4234
4235
4236
4237
4238
4239 void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
4240 {
4241 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
4242 }
4243
4244
4245
4246
4247
4248 void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
4249 {
4250 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
4251 }