0001
0002
0003
0004 #include <net/xdp_sock_drv.h>
0005 #include "ice_base.h"
0006 #include "ice_lib.h"
0007 #include "ice_dcb_lib.h"
0008 #include "ice_sriov.h"
0009
0010
0011
0012
0013
0014
0015
0016 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
0017 {
0018 unsigned int offset, i;
0019
0020 mutex_lock(qs_cfg->qs_mutex);
0021 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
0022 0, qs_cfg->q_count, 0);
0023 if (offset >= qs_cfg->pf_map_size) {
0024 mutex_unlock(qs_cfg->qs_mutex);
0025 return -ENOMEM;
0026 }
0027
0028 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
0029 for (i = 0; i < qs_cfg->q_count; i++)
0030 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
0031 mutex_unlock(qs_cfg->qs_mutex);
0032
0033 return 0;
0034 }
0035
0036
0037
0038
0039
0040
0041
0042 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
0043 {
0044 unsigned int i, index = 0;
0045
0046 mutex_lock(qs_cfg->qs_mutex);
0047 for (i = 0; i < qs_cfg->q_count; i++) {
0048 index = find_next_zero_bit(qs_cfg->pf_map,
0049 qs_cfg->pf_map_size, index);
0050 if (index >= qs_cfg->pf_map_size)
0051 goto err_scatter;
0052 set_bit(index, qs_cfg->pf_map);
0053 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
0054 }
0055 mutex_unlock(qs_cfg->qs_mutex);
0056
0057 return 0;
0058 err_scatter:
0059 for (index = 0; index < i; index++) {
0060 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
0061 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
0062 }
0063 mutex_unlock(qs_cfg->qs_mutex);
0064
0065 return -ENOMEM;
0066 }
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
0080 {
0081 int i;
0082
0083 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
0084 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
0085 QRX_CTRL_QENA_STAT_M))
0086 return 0;
0087
0088 usleep_range(20, 40);
0089 }
0090
0091 return -ETIMEDOUT;
0092 }
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
0103 {
0104 struct ice_pf *pf = vsi->back;
0105 struct ice_q_vector *q_vector;
0106
0107
0108 q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
0109 GFP_KERNEL);
0110 if (!q_vector)
0111 return -ENOMEM;
0112
0113 q_vector->vsi = vsi;
0114 q_vector->v_idx = v_idx;
0115 q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
0116 q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
0117 q_vector->tx.itr_mode = ITR_DYNAMIC;
0118 q_vector->rx.itr_mode = ITR_DYNAMIC;
0119 q_vector->tx.type = ICE_TX_CONTAINER;
0120 q_vector->rx.type = ICE_RX_CONTAINER;
0121
0122 if (vsi->type == ICE_VSI_VF)
0123 goto out;
0124
0125 if (cpu_online(v_idx))
0126 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
0127
0128
0129
0130
0131
0132 if (vsi->netdev)
0133 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
0134 NAPI_POLL_WEIGHT);
0135
0136 out:
0137
0138 vsi->q_vectors[v_idx] = q_vector;
0139
0140 return 0;
0141 }
0142
0143
0144
0145
0146
0147
0148 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
0149 {
0150 struct ice_q_vector *q_vector;
0151 struct ice_pf *pf = vsi->back;
0152 struct ice_tx_ring *tx_ring;
0153 struct ice_rx_ring *rx_ring;
0154 struct device *dev;
0155
0156 dev = ice_pf_to_dev(pf);
0157 if (!vsi->q_vectors[v_idx]) {
0158 dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
0159 return;
0160 }
0161 q_vector = vsi->q_vectors[v_idx];
0162
0163 ice_for_each_tx_ring(tx_ring, q_vector->tx)
0164 tx_ring->q_vector = NULL;
0165 ice_for_each_rx_ring(rx_ring, q_vector->rx)
0166 rx_ring->q_vector = NULL;
0167
0168
0169 if (vsi->netdev)
0170 netif_napi_del(&q_vector->napi);
0171
0172 devm_kfree(dev, q_vector);
0173 vsi->q_vectors[v_idx] = NULL;
0174 }
0175
0176
0177
0178
0179
0180 static void ice_cfg_itr_gran(struct ice_hw *hw)
0181 {
0182 u32 regval = rd32(hw, GLINT_CTL);
0183
0184
0185 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
0186 (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
0187 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
0188 (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
0189 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
0190 (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
0191 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
0192 (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
0193 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
0194 return;
0195
0196 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
0197 GLINT_CTL_ITR_GRAN_200_M) |
0198 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
0199 GLINT_CTL_ITR_GRAN_100_M) |
0200 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
0201 GLINT_CTL_ITR_GRAN_50_M) |
0202 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
0203 GLINT_CTL_ITR_GRAN_25_M);
0204 wr32(hw, GLINT_CTL, regval);
0205 }
0206
0207
0208
0209
0210
0211
0212
0213 static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
0214 {
0215 WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
0216
0217 if (ring->ch)
0218 return ring->q_index - ring->ch->base_q;
0219
0220
0221
0222
0223
0224 return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
0225 }
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238 static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
0239 {
0240 struct ice_vsi *vsi = ring->vsi;
0241 int i;
0242
0243 ice_for_each_txq(vsi, i) {
0244 if (vsi->tx_rings[i] == ring)
0245 return i;
0246 }
0247
0248 return ICE_INVAL_Q_INDEX;
0249 }
0250
0251
0252
0253
0254
0255
0256
0257
0258 static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
0259 {
0260 if (!ring->q_vector || !ring->netdev)
0261 return;
0262
0263
0264 if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
0265 return;
0266
0267 netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
0268 ring->q_index);
0269 }
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279 static void
0280 ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
0281 {
0282 struct ice_vsi *vsi = ring->vsi;
0283 struct ice_hw *hw = &vsi->back->hw;
0284
0285 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
0286
0287 tlan_ctx->port_num = vsi->port_info->lport;
0288
0289
0290 tlan_ctx->qlen = ring->count;
0291
0292 ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
0293
0294
0295 tlan_ctx->pf_num = hw->pf_id;
0296
0297
0298
0299
0300
0301
0302
0303 switch (vsi->type) {
0304 case ICE_VSI_LB:
0305 case ICE_VSI_CTRL:
0306 case ICE_VSI_PF:
0307 if (ring->ch)
0308 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
0309 else
0310 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
0311 break;
0312 case ICE_VSI_VF:
0313
0314 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
0315 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
0316 break;
0317 case ICE_VSI_SWITCHDEV_CTRL:
0318 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
0319 break;
0320 default:
0321 return;
0322 }
0323
0324
0325 if (ring->ch)
0326 tlan_ctx->src_vsi = ring->ch->vsi_num;
0327 else
0328 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
0329
0330
0331 switch (vsi->type) {
0332 case ICE_VSI_PF:
0333 tlan_ctx->tsyn_ena = 1;
0334 break;
0335 default:
0336 break;
0337 }
0338
0339 tlan_ctx->tso_ena = ICE_TX_LEGACY;
0340 tlan_ctx->tso_qnum = pf_q;
0341
0342
0343
0344
0345
0346 tlan_ctx->legacy_int = ICE_TX_LEGACY;
0347 }
0348
0349
0350
0351
0352
0353
0354
0355 static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
0356 {
0357 if (ice_ring_uses_build_skb(rx_ring))
0358 return ICE_SKB_PAD;
0359 else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
0360 return XDP_PACKET_HEADROOM;
0361
0362 return 0;
0363 }
0364
0365
0366
0367
0368
0369
0370
0371 static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
0372 {
0373 int chain_len = ICE_MAX_CHAINED_RX_BUFS;
0374 struct ice_vsi *vsi = ring->vsi;
0375 u32 rxdid = ICE_RXDID_FLEX_NIC;
0376 struct ice_rlan_ctx rlan_ctx;
0377 struct ice_hw *hw;
0378 u16 pf_q;
0379 int err;
0380
0381 hw = &vsi->back->hw;
0382
0383
0384 pf_q = vsi->rxq_map[ring->q_index];
0385
0386
0387 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
0388
0389
0390
0391
0392
0393 rlan_ctx.base = ring->dma >> 7;
0394
0395 rlan_ctx.qlen = ring->count;
0396
0397
0398
0399
0400 rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
0401
0402
0403 rlan_ctx.dsize = 1;
0404
0405
0406
0407
0408 rlan_ctx.crcstrip = 1;
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418 if (ice_is_dvm_ena(hw))
0419 if (vsi->type == ICE_VSI_VF &&
0420 ice_vf_is_port_vlan_ena(vsi->vf))
0421 rlan_ctx.l2tsel = 1;
0422 else
0423 rlan_ctx.l2tsel = 0;
0424 else
0425 rlan_ctx.l2tsel = 1;
0426
0427 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
0428 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
0429 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
0430
0431
0432
0433
0434
0435 rlan_ctx.showiv = 0;
0436
0437
0438
0439
0440
0441 if (ring->xsk_pool)
0442 chain_len = 1;
0443
0444
0445
0446 rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
0447 chain_len * ring->rx_buf_len);
0448
0449
0450 rlan_ctx.lrxqthresh = 1;
0451
0452
0453
0454
0455
0456
0457
0458 if (vsi->type != ICE_VSI_VF)
0459 ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
0460 else
0461 ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
0462 false);
0463
0464
0465 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
0466 if (err) {
0467 dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
0468 pf_q, err);
0469 return -EIO;
0470 }
0471
0472 if (vsi->type == ICE_VSI_VF)
0473 return 0;
0474
0475
0476 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
0477 ice_clear_ring_build_skb_ena(ring);
0478 else
0479 ice_set_ring_build_skb_ena(ring);
0480
0481 ring->rx_offset = ice_rx_offset(ring);
0482
0483
0484 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
0485 writel(0, ring->tail);
0486
0487 return 0;
0488 }
0489
0490
0491
0492
0493
0494
0495
0496 int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
0497 {
0498 struct device *dev = ice_pf_to_dev(ring->vsi->back);
0499 u16 num_bufs = ICE_DESC_UNUSED(ring);
0500 int err;
0501
0502 ring->rx_buf_len = ring->vsi->rx_buf_len;
0503
0504 if (ring->vsi->type == ICE_VSI_PF) {
0505 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
0506
0507 xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
0508 ring->q_index, ring->q_vector->napi.napi_id);
0509
0510 ring->xsk_pool = ice_xsk_pool(ring);
0511 if (ring->xsk_pool) {
0512 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
0513
0514 ring->rx_buf_len =
0515 xsk_pool_get_rx_frame_size(ring->xsk_pool);
0516 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
0517 MEM_TYPE_XSK_BUFF_POOL,
0518 NULL);
0519 if (err)
0520 return err;
0521 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
0522
0523 dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
0524 ring->q_index);
0525 } else {
0526 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
0527
0528 xdp_rxq_info_reg(&ring->xdp_rxq,
0529 ring->netdev,
0530 ring->q_index, ring->q_vector->napi.napi_id);
0531
0532 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
0533 MEM_TYPE_PAGE_SHARED,
0534 NULL);
0535 if (err)
0536 return err;
0537 }
0538 }
0539
0540 err = ice_setup_rx_ctx(ring);
0541 if (err) {
0542 dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
0543 ring->q_index, err);
0544 return err;
0545 }
0546
0547 if (ring->xsk_pool) {
0548 bool ok;
0549
0550 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
0551 dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
0552 num_bufs, ring->q_index);
0553 dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
0554
0555 return 0;
0556 }
0557
0558 ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
0559 if (!ok) {
0560 u16 pf_q = ring->vsi->rxq_map[ring->q_index];
0561
0562 dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
0563 ring->q_index, pf_q);
0564 }
0565
0566 return 0;
0567 }
0568
0569 ice_alloc_rx_bufs(ring, num_bufs);
0570
0571 return 0;
0572 }
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
0584 {
0585 int ret = 0;
0586
0587 ret = __ice_vsi_get_qs_contig(qs_cfg);
0588 if (ret) {
0589
0590 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
0591 qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
0592 qs_cfg->scatter_count);
0593 ret = __ice_vsi_get_qs_sc(qs_cfg);
0594 }
0595 return ret;
0596 }
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 int
0608 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
0609 {
0610 int pf_q = vsi->rxq_map[rxq_idx];
0611 struct ice_pf *pf = vsi->back;
0612 struct ice_hw *hw = &pf->hw;
0613 u32 rx_reg;
0614
0615 rx_reg = rd32(hw, QRX_CTRL(pf_q));
0616
0617
0618 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
0619 return 0;
0620
0621
0622 if (ena)
0623 rx_reg |= QRX_CTRL_QENA_REQ_M;
0624 else
0625 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
0626 wr32(hw, QRX_CTRL(pf_q), rx_reg);
0627
0628 if (!wait)
0629 return 0;
0630
0631 ice_flush(hw);
0632 return ice_pf_rxq_wait(pf, pf_q, ena);
0633 }
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
0647 {
0648 int pf_q = vsi->rxq_map[rxq_idx];
0649 struct ice_pf *pf = vsi->back;
0650
0651 return ice_pf_rxq_wait(pf, pf_q, ena);
0652 }
0653
0654
0655
0656
0657
0658
0659
0660
0661 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
0662 {
0663 struct device *dev = ice_pf_to_dev(vsi->back);
0664 u16 v_idx;
0665 int err;
0666
0667 if (vsi->q_vectors[0]) {
0668 dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
0669 return -EEXIST;
0670 }
0671
0672 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
0673 err = ice_vsi_alloc_q_vector(vsi, v_idx);
0674 if (err)
0675 goto err_out;
0676 }
0677
0678 return 0;
0679
0680 err_out:
0681 while (v_idx--)
0682 ice_free_q_vector(vsi, v_idx);
0683
0684 dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
0685 vsi->num_q_vectors, vsi->vsi_num, err);
0686 vsi->num_q_vectors = 0;
0687 return err;
0688 }
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
0699 {
0700 int q_vectors = vsi->num_q_vectors;
0701 u16 tx_rings_rem, rx_rings_rem;
0702 int v_id;
0703
0704
0705 tx_rings_rem = vsi->num_txq;
0706 rx_rings_rem = vsi->num_rxq;
0707
0708 for (v_id = 0; v_id < q_vectors; v_id++) {
0709 struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
0710 u8 tx_rings_per_v, rx_rings_per_v;
0711 u16 q_id, q_base;
0712
0713
0714 tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
0715 q_vectors - v_id);
0716 q_vector->num_ring_tx = tx_rings_per_v;
0717 q_vector->tx.tx_ring = NULL;
0718 q_vector->tx.itr_idx = ICE_TX_ITR;
0719 q_base = vsi->num_txq - tx_rings_rem;
0720
0721 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
0722 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
0723
0724 tx_ring->q_vector = q_vector;
0725 tx_ring->next = q_vector->tx.tx_ring;
0726 q_vector->tx.tx_ring = tx_ring;
0727 }
0728 tx_rings_rem -= tx_rings_per_v;
0729
0730
0731 rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
0732 q_vectors - v_id);
0733 q_vector->num_ring_rx = rx_rings_per_v;
0734 q_vector->rx.rx_ring = NULL;
0735 q_vector->rx.itr_idx = ICE_RX_ITR;
0736 q_base = vsi->num_rxq - rx_rings_rem;
0737
0738 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
0739 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
0740
0741 rx_ring->q_vector = q_vector;
0742 rx_ring->next = q_vector->rx.rx_ring;
0743 q_vector->rx.rx_ring = rx_ring;
0744 }
0745 rx_rings_rem -= rx_rings_per_v;
0746 }
0747 }
0748
0749
0750
0751
0752
0753 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
0754 {
0755 int v_idx;
0756
0757 ice_for_each_q_vector(vsi, v_idx)
0758 ice_free_q_vector(vsi, v_idx);
0759 }
0760
0761
0762
0763
0764
0765
0766
0767 int
0768 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
0769 struct ice_aqc_add_tx_qgrp *qg_buf)
0770 {
0771 u8 buf_len = struct_size(qg_buf, txqs, 1);
0772 struct ice_tlan_ctx tlan_ctx = { 0 };
0773 struct ice_aqc_add_txqs_perq *txq;
0774 struct ice_channel *ch = ring->ch;
0775 struct ice_pf *pf = vsi->back;
0776 struct ice_hw *hw = &pf->hw;
0777 int status;
0778 u16 pf_q;
0779 u8 tc;
0780
0781
0782 ice_cfg_xps_tx_ring(ring);
0783
0784 pf_q = ring->reg_idx;
0785 ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
0786
0787 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
0788 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
0789 ice_tlan_ctx_info);
0790
0791
0792
0793
0794 ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
0795
0796 if (IS_ENABLED(CONFIG_DCB))
0797 tc = ring->dcb_tc;
0798 else
0799 tc = 0;
0800
0801
0802
0803
0804 if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
0805 ring->q_handle = ice_eswitch_calc_txq_handle(ring);
0806
0807 if (ring->q_handle == ICE_INVAL_Q_INDEX)
0808 return -ENODEV;
0809 } else {
0810 ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
0811 }
0812
0813 if (ch)
0814 status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
0815 ring->q_handle, 1, qg_buf, buf_len,
0816 NULL);
0817 else
0818 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
0819 ring->q_handle, 1, qg_buf, buf_len,
0820 NULL);
0821 if (status) {
0822 dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
0823 status);
0824 return status;
0825 }
0826
0827
0828
0829
0830
0831 txq = &qg_buf->txqs[0];
0832 if (pf_q == le16_to_cpu(txq->txq_id))
0833 ring->txq_teid = le32_to_cpu(txq->q_teid);
0834
0835 return 0;
0836 }
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
0847 {
0848 ice_cfg_itr_gran(hw);
0849
0850 if (q_vector->num_ring_rx)
0851 ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
0852
0853 if (q_vector->num_ring_tx)
0854 ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
0855
0856 ice_write_intrl(q_vector, q_vector->intrl);
0857 }
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869 void
0870 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
0871 {
0872 struct ice_pf *pf = vsi->back;
0873 struct ice_hw *hw = &pf->hw;
0874 u32 val;
0875
0876 itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
0877
0878 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
0879 ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
0880
0881 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
0882 if (ice_is_xdp_ena_vsi(vsi)) {
0883 u32 xdp_txq = txq + vsi->num_xdp_txq;
0884
0885 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
0886 val);
0887 }
0888 ice_flush(hw);
0889 }
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901 void
0902 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
0903 {
0904 struct ice_pf *pf = vsi->back;
0905 struct ice_hw *hw = &pf->hw;
0906 u32 val;
0907
0908 itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
0909
0910 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
0911 ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
0912
0913 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
0914
0915 ice_flush(hw);
0916 }
0917
0918
0919
0920
0921
0922
0923 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
0924 {
0925 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
0926 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
0927 GLINT_DYN_CTL_SWINT_TRIG_M |
0928 GLINT_DYN_CTL_INTENA_M);
0929 }
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939 int
0940 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
0941 u16 rel_vmvf_num, struct ice_tx_ring *ring,
0942 struct ice_txq_meta *txq_meta)
0943 {
0944 struct ice_pf *pf = vsi->back;
0945 struct ice_q_vector *q_vector;
0946 struct ice_hw *hw = &pf->hw;
0947 int status;
0948 u32 val;
0949
0950
0951 val = rd32(hw, QINT_TQCTL(ring->reg_idx));
0952 val &= ~QINT_TQCTL_CAUSE_ENA_M;
0953 wr32(hw, QINT_TQCTL(ring->reg_idx), val);
0954
0955
0956 ndelay(100);
0957
0958
0959
0960
0961 q_vector = ring->q_vector;
0962 if (q_vector)
0963 ice_trigger_sw_intr(hw, q_vector);
0964
0965 status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
0966 txq_meta->tc, 1, &txq_meta->q_handle,
0967 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
0968 rel_vmvf_num, NULL);
0969
0970
0971
0972
0973
0974
0975 if (status == -EBUSY) {
0976 dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
0977 } else if (status == -ENOENT) {
0978 dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
0979 } else if (status) {
0980 dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
0981 status);
0982 return status;
0983 }
0984
0985 return 0;
0986 }
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997 void
0998 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
0999 struct ice_txq_meta *txq_meta)
1000 {
1001 struct ice_channel *ch = ring->ch;
1002 u8 tc;
1003
1004 if (IS_ENABLED(CONFIG_DCB))
1005 tc = ring->dcb_tc;
1006 else
1007 tc = 0;
1008
1009 txq_meta->q_id = ring->reg_idx;
1010 txq_meta->q_teid = ring->txq_teid;
1011 txq_meta->q_handle = ring->q_handle;
1012 if (ch) {
1013 txq_meta->vsi_idx = ch->ch_vsi->idx;
1014 txq_meta->tc = 0;
1015 } else {
1016 txq_meta->vsi_idx = vsi->idx;
1017 txq_meta->tc = tc;
1018 }
1019 }