Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (c) 2019, Intel Corporation. */
0003 
0004 #include <net/xdp_sock_drv.h>
0005 #include "ice_base.h"
0006 #include "ice_lib.h"
0007 #include "ice_dcb_lib.h"
0008 #include "ice_sriov.h"
0009 
0010 /**
0011  * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
0012  * @qs_cfg: gathered variables needed for PF->VSI queues assignment
0013  *
0014  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
0015  */
0016 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
0017 {
0018     unsigned int offset, i;
0019 
0020     mutex_lock(qs_cfg->qs_mutex);
0021     offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
0022                         0, qs_cfg->q_count, 0);
0023     if (offset >= qs_cfg->pf_map_size) {
0024         mutex_unlock(qs_cfg->qs_mutex);
0025         return -ENOMEM;
0026     }
0027 
0028     bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
0029     for (i = 0; i < qs_cfg->q_count; i++)
0030         qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
0031     mutex_unlock(qs_cfg->qs_mutex);
0032 
0033     return 0;
0034 }
0035 
0036 /**
0037  * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
0038  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
0039  *
0040  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
0041  */
0042 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
0043 {
0044     unsigned int i, index = 0;
0045 
0046     mutex_lock(qs_cfg->qs_mutex);
0047     for (i = 0; i < qs_cfg->q_count; i++) {
0048         index = find_next_zero_bit(qs_cfg->pf_map,
0049                        qs_cfg->pf_map_size, index);
0050         if (index >= qs_cfg->pf_map_size)
0051             goto err_scatter;
0052         set_bit(index, qs_cfg->pf_map);
0053         qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
0054     }
0055     mutex_unlock(qs_cfg->qs_mutex);
0056 
0057     return 0;
0058 err_scatter:
0059     for (index = 0; index < i; index++) {
0060         clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
0061         qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
0062     }
0063     mutex_unlock(qs_cfg->qs_mutex);
0064 
0065     return -ENOMEM;
0066 }
0067 
0068 /**
0069  * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
0070  * @pf: the PF being configured
0071  * @pf_q: the PF queue
0072  * @ena: enable or disable state of the queue
0073  *
0074  * This routine will wait for the given Rx queue of the PF to reach the
0075  * enabled or disabled state.
0076  * Returns -ETIMEDOUT in case of failing to reach the requested state after
0077  * multiple retries; else will return 0 in case of success.
0078  */
0079 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
0080 {
0081     int i;
0082 
0083     for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
0084         if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
0085                   QRX_CTRL_QENA_STAT_M))
0086             return 0;
0087 
0088         usleep_range(20, 40);
0089     }
0090 
0091     return -ETIMEDOUT;
0092 }
0093 
0094 /**
0095  * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
0096  * @vsi: the VSI being configured
0097  * @v_idx: index of the vector in the VSI struct
0098  *
0099  * We allocate one q_vector and set default value for ITR setting associated
0100  * with this q_vector. If allocation fails we return -ENOMEM.
0101  */
0102 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
0103 {
0104     struct ice_pf *pf = vsi->back;
0105     struct ice_q_vector *q_vector;
0106 
0107     /* allocate q_vector */
0108     q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
0109                 GFP_KERNEL);
0110     if (!q_vector)
0111         return -ENOMEM;
0112 
0113     q_vector->vsi = vsi;
0114     q_vector->v_idx = v_idx;
0115     q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
0116     q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
0117     q_vector->tx.itr_mode = ITR_DYNAMIC;
0118     q_vector->rx.itr_mode = ITR_DYNAMIC;
0119     q_vector->tx.type = ICE_TX_CONTAINER;
0120     q_vector->rx.type = ICE_RX_CONTAINER;
0121 
0122     if (vsi->type == ICE_VSI_VF)
0123         goto out;
0124     /* only set affinity_mask if the CPU is online */
0125     if (cpu_online(v_idx))
0126         cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
0127 
0128     /* This will not be called in the driver load path because the netdev
0129      * will not be created yet. All other cases with register the NAPI
0130      * handler here (i.e. resume, reset/rebuild, etc.)
0131      */
0132     if (vsi->netdev)
0133         netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
0134                    NAPI_POLL_WEIGHT);
0135 
0136 out:
0137     /* tie q_vector and VSI together */
0138     vsi->q_vectors[v_idx] = q_vector;
0139 
0140     return 0;
0141 }
0142 
0143 /**
0144  * ice_free_q_vector - Free memory allocated for a specific interrupt vector
0145  * @vsi: VSI having the memory freed
0146  * @v_idx: index of the vector to be freed
0147  */
0148 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
0149 {
0150     struct ice_q_vector *q_vector;
0151     struct ice_pf *pf = vsi->back;
0152     struct ice_tx_ring *tx_ring;
0153     struct ice_rx_ring *rx_ring;
0154     struct device *dev;
0155 
0156     dev = ice_pf_to_dev(pf);
0157     if (!vsi->q_vectors[v_idx]) {
0158         dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
0159         return;
0160     }
0161     q_vector = vsi->q_vectors[v_idx];
0162 
0163     ice_for_each_tx_ring(tx_ring, q_vector->tx)
0164         tx_ring->q_vector = NULL;
0165     ice_for_each_rx_ring(rx_ring, q_vector->rx)
0166         rx_ring->q_vector = NULL;
0167 
0168     /* only VSI with an associated netdev is set up with NAPI */
0169     if (vsi->netdev)
0170         netif_napi_del(&q_vector->napi);
0171 
0172     devm_kfree(dev, q_vector);
0173     vsi->q_vectors[v_idx] = NULL;
0174 }
0175 
0176 /**
0177  * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
0178  * @hw: board specific structure
0179  */
0180 static void ice_cfg_itr_gran(struct ice_hw *hw)
0181 {
0182     u32 regval = rd32(hw, GLINT_CTL);
0183 
0184     /* no need to update global register if ITR gran is already set */
0185     if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
0186         (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
0187          GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
0188         (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
0189          GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
0190         (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
0191          GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
0192         (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
0193           GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
0194         return;
0195 
0196     regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
0197           GLINT_CTL_ITR_GRAN_200_M) |
0198          ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
0199           GLINT_CTL_ITR_GRAN_100_M) |
0200          ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
0201           GLINT_CTL_ITR_GRAN_50_M) |
0202          ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
0203           GLINT_CTL_ITR_GRAN_25_M);
0204     wr32(hw, GLINT_CTL, regval);
0205 }
0206 
0207 /**
0208  * ice_calc_txq_handle - calculate the queue handle
0209  * @vsi: VSI that ring belongs to
0210  * @ring: ring to get the absolute queue index
0211  * @tc: traffic class number
0212  */
0213 static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
0214 {
0215     WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
0216 
0217     if (ring->ch)
0218         return ring->q_index - ring->ch->base_q;
0219 
0220     /* Idea here for calculation is that we subtract the number of queue
0221      * count from TC that ring belongs to from it's absolute queue index
0222      * and as a result we get the queue's index within TC.
0223      */
0224     return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
0225 }
0226 
0227 /**
0228  * ice_eswitch_calc_txq_handle
0229  * @ring: pointer to ring which unique index is needed
0230  *
0231  * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
0232  * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
0233  * here by finding index in vsi->tx_rings of this ring.
0234  *
0235  * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
0236  * because VSI is get from ring->vsi, so it has to be present in this VSI.
0237  */
0238 static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
0239 {
0240     struct ice_vsi *vsi = ring->vsi;
0241     int i;
0242 
0243     ice_for_each_txq(vsi, i) {
0244         if (vsi->tx_rings[i] == ring)
0245             return i;
0246     }
0247 
0248     return ICE_INVAL_Q_INDEX;
0249 }
0250 
0251 /**
0252  * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
0253  * @ring: The Tx ring to configure
0254  *
0255  * This enables/disables XPS for a given Tx descriptor ring
0256  * based on the TCs enabled for the VSI that ring belongs to.
0257  */
0258 static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
0259 {
0260     if (!ring->q_vector || !ring->netdev)
0261         return;
0262 
0263     /* We only initialize XPS once, so as not to overwrite user settings */
0264     if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
0265         return;
0266 
0267     netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
0268                 ring->q_index);
0269 }
0270 
0271 /**
0272  * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
0273  * @ring: The Tx ring to configure
0274  * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
0275  * @pf_q: queue index in the PF space
0276  *
0277  * Configure the Tx descriptor ring in TLAN context.
0278  */
0279 static void
0280 ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
0281 {
0282     struct ice_vsi *vsi = ring->vsi;
0283     struct ice_hw *hw = &vsi->back->hw;
0284 
0285     tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
0286 
0287     tlan_ctx->port_num = vsi->port_info->lport;
0288 
0289     /* Transmit Queue Length */
0290     tlan_ctx->qlen = ring->count;
0291 
0292     ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
0293 
0294     /* PF number */
0295     tlan_ctx->pf_num = hw->pf_id;
0296 
0297     /* queue belongs to a specific VSI type
0298      * VF / VM index should be programmed per vmvf_type setting:
0299      * for vmvf_type = VF, it is VF number between 0-256
0300      * for vmvf_type = VM, it is VM number between 0-767
0301      * for PF or EMP this field should be set to zero
0302      */
0303     switch (vsi->type) {
0304     case ICE_VSI_LB:
0305     case ICE_VSI_CTRL:
0306     case ICE_VSI_PF:
0307         if (ring->ch)
0308             tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
0309         else
0310             tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
0311         break;
0312     case ICE_VSI_VF:
0313         /* Firmware expects vmvf_num to be absolute VF ID */
0314         tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
0315         tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
0316         break;
0317     case ICE_VSI_SWITCHDEV_CTRL:
0318         tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
0319         break;
0320     default:
0321         return;
0322     }
0323 
0324     /* make sure the context is associated with the right VSI */
0325     if (ring->ch)
0326         tlan_ctx->src_vsi = ring->ch->vsi_num;
0327     else
0328         tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
0329 
0330     /* Restrict Tx timestamps to the PF VSI */
0331     switch (vsi->type) {
0332     case ICE_VSI_PF:
0333         tlan_ctx->tsyn_ena = 1;
0334         break;
0335     default:
0336         break;
0337     }
0338 
0339     tlan_ctx->tso_ena = ICE_TX_LEGACY;
0340     tlan_ctx->tso_qnum = pf_q;
0341 
0342     /* Legacy or Advanced Host Interface:
0343      * 0: Advanced Host Interface
0344      * 1: Legacy Host Interface
0345      */
0346     tlan_ctx->legacy_int = ICE_TX_LEGACY;
0347 }
0348 
0349 /**
0350  * ice_rx_offset - Return expected offset into page to access data
0351  * @rx_ring: Ring we are requesting offset of
0352  *
0353  * Returns the offset value for ring into the data buffer.
0354  */
0355 static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
0356 {
0357     if (ice_ring_uses_build_skb(rx_ring))
0358         return ICE_SKB_PAD;
0359     else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
0360         return XDP_PACKET_HEADROOM;
0361 
0362     return 0;
0363 }
0364 
0365 /**
0366  * ice_setup_rx_ctx - Configure a receive ring context
0367  * @ring: The Rx ring to configure
0368  *
0369  * Configure the Rx descriptor ring in RLAN context.
0370  */
0371 static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
0372 {
0373     int chain_len = ICE_MAX_CHAINED_RX_BUFS;
0374     struct ice_vsi *vsi = ring->vsi;
0375     u32 rxdid = ICE_RXDID_FLEX_NIC;
0376     struct ice_rlan_ctx rlan_ctx;
0377     struct ice_hw *hw;
0378     u16 pf_q;
0379     int err;
0380 
0381     hw = &vsi->back->hw;
0382 
0383     /* what is Rx queue number in global space of 2K Rx queues */
0384     pf_q = vsi->rxq_map[ring->q_index];
0385 
0386     /* clear the context structure first */
0387     memset(&rlan_ctx, 0, sizeof(rlan_ctx));
0388 
0389     /* Receive Queue Base Address.
0390      * Indicates the starting address of the descriptor queue defined in
0391      * 128 Byte units.
0392      */
0393     rlan_ctx.base = ring->dma >> 7;
0394 
0395     rlan_ctx.qlen = ring->count;
0396 
0397     /* Receive Packet Data Buffer Size.
0398      * The Packet Data Buffer Size is defined in 128 byte units.
0399      */
0400     rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
0401 
0402     /* use 32 byte descriptors */
0403     rlan_ctx.dsize = 1;
0404 
0405     /* Strip the Ethernet CRC bytes before the packet is posted to host
0406      * memory.
0407      */
0408     rlan_ctx.crcstrip = 1;
0409 
0410     /* L2TSEL flag defines the reported L2 Tags in the receive descriptor
0411      * and it needs to remain 1 for non-DVM capable configurations to not
0412      * break backward compatibility for VF drivers. Setting this field to 0
0413      * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
0414      * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
0415      * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
0416      * check for the tag
0417      */
0418     if (ice_is_dvm_ena(hw))
0419         if (vsi->type == ICE_VSI_VF &&
0420             ice_vf_is_port_vlan_ena(vsi->vf))
0421             rlan_ctx.l2tsel = 1;
0422         else
0423             rlan_ctx.l2tsel = 0;
0424     else
0425         rlan_ctx.l2tsel = 1;
0426 
0427     rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
0428     rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
0429     rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
0430 
0431     /* This controls whether VLAN is stripped from inner headers
0432      * The VLAN in the inner L2 header is stripped to the receive
0433      * descriptor if enabled by this flag.
0434      */
0435     rlan_ctx.showiv = 0;
0436 
0437     /* For AF_XDP ZC, we disallow packets to span on
0438      * multiple buffers, thus letting us skip that
0439      * handling in the fast-path.
0440      */
0441     if (ring->xsk_pool)
0442         chain_len = 1;
0443     /* Max packet size for this queue - must not be set to a larger value
0444      * than 5 x DBUF
0445      */
0446     rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
0447                    chain_len * ring->rx_buf_len);
0448 
0449     /* Rx queue threshold in units of 64 */
0450     rlan_ctx.lrxqthresh = 1;
0451 
0452     /* Enable Flexible Descriptors in the queue context which
0453      * allows this driver to select a specific receive descriptor format
0454      * increasing context priority to pick up profile ID; default is 0x01;
0455      * setting to 0x03 to ensure profile is programming if prev context is
0456      * of same priority
0457      */
0458     if (vsi->type != ICE_VSI_VF)
0459         ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
0460     else
0461         ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
0462                     false);
0463 
0464     /* Absolute queue number out of 2K needs to be passed */
0465     err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
0466     if (err) {
0467         dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
0468             pf_q, err);
0469         return -EIO;
0470     }
0471 
0472     if (vsi->type == ICE_VSI_VF)
0473         return 0;
0474 
0475     /* configure Rx buffer alignment */
0476     if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
0477         ice_clear_ring_build_skb_ena(ring);
0478     else
0479         ice_set_ring_build_skb_ena(ring);
0480 
0481     ring->rx_offset = ice_rx_offset(ring);
0482 
0483     /* init queue specific tail register */
0484     ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
0485     writel(0, ring->tail);
0486 
0487     return 0;
0488 }
0489 
0490 /**
0491  * ice_vsi_cfg_rxq - Configure an Rx queue
0492  * @ring: the ring being configured
0493  *
0494  * Return 0 on success and a negative value on error.
0495  */
0496 int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
0497 {
0498     struct device *dev = ice_pf_to_dev(ring->vsi->back);
0499     u16 num_bufs = ICE_DESC_UNUSED(ring);
0500     int err;
0501 
0502     ring->rx_buf_len = ring->vsi->rx_buf_len;
0503 
0504     if (ring->vsi->type == ICE_VSI_PF) {
0505         if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
0506             /* coverity[check_return] */
0507             xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
0508                      ring->q_index, ring->q_vector->napi.napi_id);
0509 
0510         ring->xsk_pool = ice_xsk_pool(ring);
0511         if (ring->xsk_pool) {
0512             xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
0513 
0514             ring->rx_buf_len =
0515                 xsk_pool_get_rx_frame_size(ring->xsk_pool);
0516             err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
0517                              MEM_TYPE_XSK_BUFF_POOL,
0518                              NULL);
0519             if (err)
0520                 return err;
0521             xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
0522 
0523             dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
0524                  ring->q_index);
0525         } else {
0526             if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
0527                 /* coverity[check_return] */
0528                 xdp_rxq_info_reg(&ring->xdp_rxq,
0529                          ring->netdev,
0530                          ring->q_index, ring->q_vector->napi.napi_id);
0531 
0532             err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
0533                              MEM_TYPE_PAGE_SHARED,
0534                              NULL);
0535             if (err)
0536                 return err;
0537         }
0538     }
0539 
0540     err = ice_setup_rx_ctx(ring);
0541     if (err) {
0542         dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
0543             ring->q_index, err);
0544         return err;
0545     }
0546 
0547     if (ring->xsk_pool) {
0548         bool ok;
0549 
0550         if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
0551             dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
0552                  num_bufs, ring->q_index);
0553             dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
0554 
0555             return 0;
0556         }
0557 
0558         ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
0559         if (!ok) {
0560             u16 pf_q = ring->vsi->rxq_map[ring->q_index];
0561 
0562             dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
0563                  ring->q_index, pf_q);
0564         }
0565 
0566         return 0;
0567     }
0568 
0569     ice_alloc_rx_bufs(ring, num_bufs);
0570 
0571     return 0;
0572 }
0573 
0574 /**
0575  * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
0576  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
0577  *
0578  * This function first tries to find contiguous space. If it is not successful,
0579  * it tries with the scatter approach.
0580  *
0581  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
0582  */
0583 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
0584 {
0585     int ret = 0;
0586 
0587     ret = __ice_vsi_get_qs_contig(qs_cfg);
0588     if (ret) {
0589         /* contig failed, so try with scatter approach */
0590         qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
0591         qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
0592                     qs_cfg->scatter_count);
0593         ret = __ice_vsi_get_qs_sc(qs_cfg);
0594     }
0595     return ret;
0596 }
0597 
0598 /**
0599  * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
0600  * @vsi: the VSI being configured
0601  * @ena: start or stop the Rx ring
0602  * @rxq_idx: 0-based Rx queue index for the VSI passed in
0603  * @wait: wait or don't wait for configuration to finish in hardware
0604  *
0605  * Return 0 on success and negative on error.
0606  */
0607 int
0608 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
0609 {
0610     int pf_q = vsi->rxq_map[rxq_idx];
0611     struct ice_pf *pf = vsi->back;
0612     struct ice_hw *hw = &pf->hw;
0613     u32 rx_reg;
0614 
0615     rx_reg = rd32(hw, QRX_CTRL(pf_q));
0616 
0617     /* Skip if the queue is already in the requested state */
0618     if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
0619         return 0;
0620 
0621     /* turn on/off the queue */
0622     if (ena)
0623         rx_reg |= QRX_CTRL_QENA_REQ_M;
0624     else
0625         rx_reg &= ~QRX_CTRL_QENA_REQ_M;
0626     wr32(hw, QRX_CTRL(pf_q), rx_reg);
0627 
0628     if (!wait)
0629         return 0;
0630 
0631     ice_flush(hw);
0632     return ice_pf_rxq_wait(pf, pf_q, ena);
0633 }
0634 
0635 /**
0636  * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
0637  * @vsi: the VSI being configured
0638  * @ena: true/false to verify Rx ring has been enabled/disabled respectively
0639  * @rxq_idx: 0-based Rx queue index for the VSI passed in
0640  *
0641  * This routine will wait for the given Rx queue of the VSI to reach the
0642  * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
0643  * the requested state after multiple retries; else will return 0 in case of
0644  * success.
0645  */
0646 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
0647 {
0648     int pf_q = vsi->rxq_map[rxq_idx];
0649     struct ice_pf *pf = vsi->back;
0650 
0651     return ice_pf_rxq_wait(pf, pf_q, ena);
0652 }
0653 
0654 /**
0655  * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
0656  * @vsi: the VSI being configured
0657  *
0658  * We allocate one q_vector per queue interrupt. If allocation fails we
0659  * return -ENOMEM.
0660  */
0661 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
0662 {
0663     struct device *dev = ice_pf_to_dev(vsi->back);
0664     u16 v_idx;
0665     int err;
0666 
0667     if (vsi->q_vectors[0]) {
0668         dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
0669         return -EEXIST;
0670     }
0671 
0672     for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
0673         err = ice_vsi_alloc_q_vector(vsi, v_idx);
0674         if (err)
0675             goto err_out;
0676     }
0677 
0678     return 0;
0679 
0680 err_out:
0681     while (v_idx--)
0682         ice_free_q_vector(vsi, v_idx);
0683 
0684     dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
0685         vsi->num_q_vectors, vsi->vsi_num, err);
0686     vsi->num_q_vectors = 0;
0687     return err;
0688 }
0689 
0690 /**
0691  * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
0692  * @vsi: the VSI being configured
0693  *
0694  * This function maps descriptor rings to the queue-specific vectors allotted
0695  * through the MSI-X enabling code. On a constrained vector budget, we map Tx
0696  * and Rx rings to the vector as "efficiently" as possible.
0697  */
0698 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
0699 {
0700     int q_vectors = vsi->num_q_vectors;
0701     u16 tx_rings_rem, rx_rings_rem;
0702     int v_id;
0703 
0704     /* initially assigning remaining rings count to VSIs num queue value */
0705     tx_rings_rem = vsi->num_txq;
0706     rx_rings_rem = vsi->num_rxq;
0707 
0708     for (v_id = 0; v_id < q_vectors; v_id++) {
0709         struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
0710         u8 tx_rings_per_v, rx_rings_per_v;
0711         u16 q_id, q_base;
0712 
0713         /* Tx rings mapping to vector */
0714         tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
0715                           q_vectors - v_id);
0716         q_vector->num_ring_tx = tx_rings_per_v;
0717         q_vector->tx.tx_ring = NULL;
0718         q_vector->tx.itr_idx = ICE_TX_ITR;
0719         q_base = vsi->num_txq - tx_rings_rem;
0720 
0721         for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
0722             struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
0723 
0724             tx_ring->q_vector = q_vector;
0725             tx_ring->next = q_vector->tx.tx_ring;
0726             q_vector->tx.tx_ring = tx_ring;
0727         }
0728         tx_rings_rem -= tx_rings_per_v;
0729 
0730         /* Rx rings mapping to vector */
0731         rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
0732                           q_vectors - v_id);
0733         q_vector->num_ring_rx = rx_rings_per_v;
0734         q_vector->rx.rx_ring = NULL;
0735         q_vector->rx.itr_idx = ICE_RX_ITR;
0736         q_base = vsi->num_rxq - rx_rings_rem;
0737 
0738         for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
0739             struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
0740 
0741             rx_ring->q_vector = q_vector;
0742             rx_ring->next = q_vector->rx.rx_ring;
0743             q_vector->rx.rx_ring = rx_ring;
0744         }
0745         rx_rings_rem -= rx_rings_per_v;
0746     }
0747 }
0748 
0749 /**
0750  * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
0751  * @vsi: the VSI having memory freed
0752  */
0753 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
0754 {
0755     int v_idx;
0756 
0757     ice_for_each_q_vector(vsi, v_idx)
0758         ice_free_q_vector(vsi, v_idx);
0759 }
0760 
0761 /**
0762  * ice_vsi_cfg_txq - Configure single Tx queue
0763  * @vsi: the VSI that queue belongs to
0764  * @ring: Tx ring to be configured
0765  * @qg_buf: queue group buffer
0766  */
0767 int
0768 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
0769         struct ice_aqc_add_tx_qgrp *qg_buf)
0770 {
0771     u8 buf_len = struct_size(qg_buf, txqs, 1);
0772     struct ice_tlan_ctx tlan_ctx = { 0 };
0773     struct ice_aqc_add_txqs_perq *txq;
0774     struct ice_channel *ch = ring->ch;
0775     struct ice_pf *pf = vsi->back;
0776     struct ice_hw *hw = &pf->hw;
0777     int status;
0778     u16 pf_q;
0779     u8 tc;
0780 
0781     /* Configure XPS */
0782     ice_cfg_xps_tx_ring(ring);
0783 
0784     pf_q = ring->reg_idx;
0785     ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
0786     /* copy context contents into the qg_buf */
0787     qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
0788     ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
0789             ice_tlan_ctx_info);
0790 
0791     /* init queue specific tail reg. It is referred as
0792      * transmit comm scheduler queue doorbell.
0793      */
0794     ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
0795 
0796     if (IS_ENABLED(CONFIG_DCB))
0797         tc = ring->dcb_tc;
0798     else
0799         tc = 0;
0800 
0801     /* Add unique software queue handle of the Tx queue per
0802      * TC into the VSI Tx ring
0803      */
0804     if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
0805         ring->q_handle = ice_eswitch_calc_txq_handle(ring);
0806 
0807         if (ring->q_handle == ICE_INVAL_Q_INDEX)
0808             return -ENODEV;
0809     } else {
0810         ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
0811     }
0812 
0813     if (ch)
0814         status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
0815                      ring->q_handle, 1, qg_buf, buf_len,
0816                      NULL);
0817     else
0818         status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
0819                      ring->q_handle, 1, qg_buf, buf_len,
0820                      NULL);
0821     if (status) {
0822         dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
0823             status);
0824         return status;
0825     }
0826 
0827     /* Add Tx Queue TEID into the VSI Tx ring from the
0828      * response. This will complete configuring and
0829      * enabling the queue.
0830      */
0831     txq = &qg_buf->txqs[0];
0832     if (pf_q == le16_to_cpu(txq->txq_id))
0833         ring->txq_teid = le32_to_cpu(txq->q_teid);
0834 
0835     return 0;
0836 }
0837 
0838 /**
0839  * ice_cfg_itr - configure the initial interrupt throttle values
0840  * @hw: pointer to the HW structure
0841  * @q_vector: interrupt vector that's being configured
0842  *
0843  * Configure interrupt throttling values for the ring containers that are
0844  * associated with the interrupt vector passed in.
0845  */
0846 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
0847 {
0848     ice_cfg_itr_gran(hw);
0849 
0850     if (q_vector->num_ring_rx)
0851         ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
0852 
0853     if (q_vector->num_ring_tx)
0854         ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
0855 
0856     ice_write_intrl(q_vector, q_vector->intrl);
0857 }
0858 
0859 /**
0860  * ice_cfg_txq_interrupt - configure interrupt on Tx queue
0861  * @vsi: the VSI being configured
0862  * @txq: Tx queue being mapped to MSI-X vector
0863  * @msix_idx: MSI-X vector index within the function
0864  * @itr_idx: ITR index of the interrupt cause
0865  *
0866  * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
0867  * within the function space.
0868  */
0869 void
0870 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
0871 {
0872     struct ice_pf *pf = vsi->back;
0873     struct ice_hw *hw = &pf->hw;
0874     u32 val;
0875 
0876     itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
0877 
0878     val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
0879           ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
0880 
0881     wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
0882     if (ice_is_xdp_ena_vsi(vsi)) {
0883         u32 xdp_txq = txq + vsi->num_xdp_txq;
0884 
0885         wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
0886              val);
0887     }
0888     ice_flush(hw);
0889 }
0890 
0891 /**
0892  * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
0893  * @vsi: the VSI being configured
0894  * @rxq: Rx queue being mapped to MSI-X vector
0895  * @msix_idx: MSI-X vector index within the function
0896  * @itr_idx: ITR index of the interrupt cause
0897  *
0898  * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
0899  * within the function space.
0900  */
0901 void
0902 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
0903 {
0904     struct ice_pf *pf = vsi->back;
0905     struct ice_hw *hw = &pf->hw;
0906     u32 val;
0907 
0908     itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
0909 
0910     val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
0911           ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
0912 
0913     wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
0914 
0915     ice_flush(hw);
0916 }
0917 
0918 /**
0919  * ice_trigger_sw_intr - trigger a software interrupt
0920  * @hw: pointer to the HW structure
0921  * @q_vector: interrupt vector to trigger the software interrupt for
0922  */
0923 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
0924 {
0925     wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
0926          (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
0927          GLINT_DYN_CTL_SWINT_TRIG_M |
0928          GLINT_DYN_CTL_INTENA_M);
0929 }
0930 
0931 /**
0932  * ice_vsi_stop_tx_ring - Disable single Tx ring
0933  * @vsi: the VSI being configured
0934  * @rst_src: reset source
0935  * @rel_vmvf_num: Relative ID of VF/VM
0936  * @ring: Tx ring to be stopped
0937  * @txq_meta: Meta data of Tx ring to be stopped
0938  */
0939 int
0940 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
0941              u16 rel_vmvf_num, struct ice_tx_ring *ring,
0942              struct ice_txq_meta *txq_meta)
0943 {
0944     struct ice_pf *pf = vsi->back;
0945     struct ice_q_vector *q_vector;
0946     struct ice_hw *hw = &pf->hw;
0947     int status;
0948     u32 val;
0949 
0950     /* clear cause_ena bit for disabled queues */
0951     val = rd32(hw, QINT_TQCTL(ring->reg_idx));
0952     val &= ~QINT_TQCTL_CAUSE_ENA_M;
0953     wr32(hw, QINT_TQCTL(ring->reg_idx), val);
0954 
0955     /* software is expected to wait for 100 ns */
0956     ndelay(100);
0957 
0958     /* trigger a software interrupt for the vector
0959      * associated to the queue to schedule NAPI handler
0960      */
0961     q_vector = ring->q_vector;
0962     if (q_vector)
0963         ice_trigger_sw_intr(hw, q_vector);
0964 
0965     status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
0966                  txq_meta->tc, 1, &txq_meta->q_handle,
0967                  &txq_meta->q_id, &txq_meta->q_teid, rst_src,
0968                  rel_vmvf_num, NULL);
0969 
0970     /* if the disable queue command was exercised during an
0971      * active reset flow, -EBUSY is returned.
0972      * This is not an error as the reset operation disables
0973      * queues at the hardware level anyway.
0974      */
0975     if (status == -EBUSY) {
0976         dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
0977     } else if (status == -ENOENT) {
0978         dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
0979     } else if (status) {
0980         dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
0981             status);
0982         return status;
0983     }
0984 
0985     return 0;
0986 }
0987 
0988 /**
0989  * ice_fill_txq_meta - Prepare the Tx queue's meta data
0990  * @vsi: VSI that ring belongs to
0991  * @ring: ring that txq_meta will be based on
0992  * @txq_meta: a helper struct that wraps Tx queue's information
0993  *
0994  * Set up a helper struct that will contain all the necessary fields that
0995  * are needed for stopping Tx queue
0996  */
0997 void
0998 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
0999           struct ice_txq_meta *txq_meta)
1000 {
1001     struct ice_channel *ch = ring->ch;
1002     u8 tc;
1003 
1004     if (IS_ENABLED(CONFIG_DCB))
1005         tc = ring->dcb_tc;
1006     else
1007         tc = 0;
1008 
1009     txq_meta->q_id = ring->reg_idx;
1010     txq_meta->q_teid = ring->txq_teid;
1011     txq_meta->q_handle = ring->q_handle;
1012     if (ch) {
1013         txq_meta->vsi_idx = ch->ch_vsi->idx;
1014         txq_meta->tc = 0;
1015     } else {
1016         txq_meta->vsi_idx = vsi->idx;
1017         txq_meta->tc = tc;
1018     }
1019 }