Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright(c) 1999 - 2018 Intel Corporation. */
0003 
0004 #include "ixgbe.h"
0005 #include "ixgbe_sriov.h"
0006 
0007 #ifdef CONFIG_IXGBE_DCB
0008 /**
0009  * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
0010  * @adapter: board private structure to initialize
0011  *
0012  * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
0013  * will also try to cache the proper offsets if RSS/FCoE are enabled along
0014  * with VMDq.
0015  *
0016  **/
0017 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
0018 {
0019 #ifdef IXGBE_FCOE
0020     struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
0021 #endif /* IXGBE_FCOE */
0022     struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
0023     int i;
0024     u16 reg_idx, pool;
0025     u8 tcs = adapter->hw_tcs;
0026 
0027     /* verify we have DCB queueing enabled before proceeding */
0028     if (tcs <= 1)
0029         return false;
0030 
0031     /* verify we have VMDq enabled before proceeding */
0032     if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
0033         return false;
0034 
0035     /* start at VMDq register offset for SR-IOV enabled setups */
0036     reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
0037     for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
0038         /* If we are greater than indices move to next pool */
0039         if ((reg_idx & ~vmdq->mask) >= tcs) {
0040             pool++;
0041             reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
0042         }
0043         adapter->rx_ring[i]->reg_idx = reg_idx;
0044         adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
0045     }
0046 
0047     reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
0048     for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
0049         /* If we are greater than indices move to next pool */
0050         if ((reg_idx & ~vmdq->mask) >= tcs)
0051             reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
0052         adapter->tx_ring[i]->reg_idx = reg_idx;
0053     }
0054 
0055 #ifdef IXGBE_FCOE
0056     /* nothing to do if FCoE is disabled */
0057     if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
0058         return true;
0059 
0060     /* The work is already done if the FCoE ring is shared */
0061     if (fcoe->offset < tcs)
0062         return true;
0063 
0064     /* The FCoE rings exist separately, we need to move their reg_idx */
0065     if (fcoe->indices) {
0066         u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
0067         u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
0068 
0069         reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
0070         for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
0071             reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
0072             adapter->rx_ring[i]->reg_idx = reg_idx;
0073             adapter->rx_ring[i]->netdev = adapter->netdev;
0074             reg_idx++;
0075         }
0076 
0077         reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
0078         for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
0079             reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
0080             adapter->tx_ring[i]->reg_idx = reg_idx;
0081             reg_idx++;
0082         }
0083     }
0084 
0085 #endif /* IXGBE_FCOE */
0086     return true;
0087 }
0088 
0089 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
0090 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
0091                     unsigned int *tx, unsigned int *rx)
0092 {
0093     struct ixgbe_hw *hw = &adapter->hw;
0094     u8 num_tcs = adapter->hw_tcs;
0095 
0096     *tx = 0;
0097     *rx = 0;
0098 
0099     switch (hw->mac.type) {
0100     case ixgbe_mac_82598EB:
0101         /* TxQs/TC: 4   RxQs/TC: 8 */
0102         *tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
0103         *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
0104         break;
0105     case ixgbe_mac_82599EB:
0106     case ixgbe_mac_X540:
0107     case ixgbe_mac_X550:
0108     case ixgbe_mac_X550EM_x:
0109     case ixgbe_mac_x550em_a:
0110         if (num_tcs > 4) {
0111             /*
0112              * TCs    : TC0/1 TC2/3 TC4-7
0113              * TxQs/TC:    32    16     8
0114              * RxQs/TC:    16    16    16
0115              */
0116             *rx = tc << 4;
0117             if (tc < 3)
0118                 *tx = tc << 5;      /*   0,  32,  64 */
0119             else if (tc < 5)
0120                 *tx = (tc + 2) << 4;    /*  80,  96 */
0121             else
0122                 *tx = (tc + 8) << 3;    /* 104, 112, 120 */
0123         } else {
0124             /*
0125              * TCs    : TC0 TC1 TC2/3
0126              * TxQs/TC:  64  32    16
0127              * RxQs/TC:  32  32    32
0128              */
0129             *rx = tc << 5;
0130             if (tc < 2)
0131                 *tx = tc << 6;      /*  0,  64 */
0132             else
0133                 *tx = (tc + 4) << 4;    /* 96, 112 */
0134         }
0135         break;
0136     default:
0137         break;
0138     }
0139 }
0140 
0141 /**
0142  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
0143  * @adapter: board private structure to initialize
0144  *
0145  * Cache the descriptor ring offsets for DCB to the assigned rings.
0146  *
0147  **/
0148 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
0149 {
0150     u8 num_tcs = adapter->hw_tcs;
0151     unsigned int tx_idx, rx_idx;
0152     int tc, offset, rss_i, i;
0153 
0154     /* verify we have DCB queueing enabled before proceeding */
0155     if (num_tcs <= 1)
0156         return false;
0157 
0158     rss_i = adapter->ring_feature[RING_F_RSS].indices;
0159 
0160     for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
0161         ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
0162         for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
0163             adapter->tx_ring[offset + i]->reg_idx = tx_idx;
0164             adapter->rx_ring[offset + i]->reg_idx = rx_idx;
0165             adapter->rx_ring[offset + i]->netdev = adapter->netdev;
0166             adapter->tx_ring[offset + i]->dcb_tc = tc;
0167             adapter->rx_ring[offset + i]->dcb_tc = tc;
0168         }
0169     }
0170 
0171     return true;
0172 }
0173 
0174 #endif
0175 /**
0176  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
0177  * @adapter: board private structure to initialize
0178  *
0179  * SR-IOV doesn't use any descriptor rings but changes the default if
0180  * no other mapping is used.
0181  *
0182  */
0183 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
0184 {
0185 #ifdef IXGBE_FCOE
0186     struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
0187 #endif /* IXGBE_FCOE */
0188     struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
0189     struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
0190     u16 reg_idx, pool;
0191     int i;
0192 
0193     /* only proceed if VMDq is enabled */
0194     if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
0195         return false;
0196 
0197     /* start at VMDq register offset for SR-IOV enabled setups */
0198     pool = 0;
0199     reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
0200     for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
0201 #ifdef IXGBE_FCOE
0202         /* Allow first FCoE queue to be mapped as RSS */
0203         if (fcoe->offset && (i > fcoe->offset))
0204             break;
0205 #endif
0206         /* If we are greater than indices move to next pool */
0207         if ((reg_idx & ~vmdq->mask) >= rss->indices) {
0208             pool++;
0209             reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
0210         }
0211         adapter->rx_ring[i]->reg_idx = reg_idx;
0212         adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
0213     }
0214 
0215 #ifdef IXGBE_FCOE
0216     /* FCoE uses a linear block of queues so just assigning 1:1 */
0217     for (; i < adapter->num_rx_queues; i++, reg_idx++) {
0218         adapter->rx_ring[i]->reg_idx = reg_idx;
0219         adapter->rx_ring[i]->netdev = adapter->netdev;
0220     }
0221 
0222 #endif
0223     reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
0224     for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
0225 #ifdef IXGBE_FCOE
0226         /* Allow first FCoE queue to be mapped as RSS */
0227         if (fcoe->offset && (i > fcoe->offset))
0228             break;
0229 #endif
0230         /* If we are greater than indices move to next pool */
0231         if ((reg_idx & rss->mask) >= rss->indices)
0232             reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
0233         adapter->tx_ring[i]->reg_idx = reg_idx;
0234     }
0235 
0236 #ifdef IXGBE_FCOE
0237     /* FCoE uses a linear block of queues so just assigning 1:1 */
0238     for (; i < adapter->num_tx_queues; i++, reg_idx++)
0239         adapter->tx_ring[i]->reg_idx = reg_idx;
0240 
0241 #endif
0242 
0243     return true;
0244 }
0245 
0246 /**
0247  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
0248  * @adapter: board private structure to initialize
0249  *
0250  * Cache the descriptor ring offsets for RSS to the assigned rings.
0251  *
0252  **/
0253 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
0254 {
0255     int i, reg_idx;
0256 
0257     for (i = 0; i < adapter->num_rx_queues; i++) {
0258         adapter->rx_ring[i]->reg_idx = i;
0259         adapter->rx_ring[i]->netdev = adapter->netdev;
0260     }
0261     for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
0262         adapter->tx_ring[i]->reg_idx = reg_idx;
0263     for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
0264         adapter->xdp_ring[i]->reg_idx = reg_idx;
0265 
0266     return true;
0267 }
0268 
0269 /**
0270  * ixgbe_cache_ring_register - Descriptor ring to register mapping
0271  * @adapter: board private structure to initialize
0272  *
0273  * Once we know the feature-set enabled for the device, we'll cache
0274  * the register offset the descriptor ring is assigned to.
0275  *
0276  * Note, the order the various feature calls is important.  It must start with
0277  * the "most" features enabled at the same time, then trickle down to the
0278  * least amount of features turned on at once.
0279  **/
0280 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
0281 {
0282     /* start with default case */
0283     adapter->rx_ring[0]->reg_idx = 0;
0284     adapter->tx_ring[0]->reg_idx = 0;
0285 
0286 #ifdef CONFIG_IXGBE_DCB
0287     if (ixgbe_cache_ring_dcb_sriov(adapter))
0288         return;
0289 
0290     if (ixgbe_cache_ring_dcb(adapter))
0291         return;
0292 
0293 #endif
0294     if (ixgbe_cache_ring_sriov(adapter))
0295         return;
0296 
0297     ixgbe_cache_ring_rss(adapter);
0298 }
0299 
0300 static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
0301 {
0302     int queues;
0303 
0304     queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids);
0305     return adapter->xdp_prog ? queues : 0;
0306 }
0307 
0308 #define IXGBE_RSS_64Q_MASK  0x3F
0309 #define IXGBE_RSS_16Q_MASK  0xF
0310 #define IXGBE_RSS_8Q_MASK   0x7
0311 #define IXGBE_RSS_4Q_MASK   0x3
0312 #define IXGBE_RSS_2Q_MASK   0x1
0313 #define IXGBE_RSS_DISABLED_MASK 0x0
0314 
0315 #ifdef CONFIG_IXGBE_DCB
0316 /**
0317  * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
0318  * @adapter: board private structure to initialize
0319  *
0320  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
0321  * and VM pools where appropriate.  Also assign queues based on DCB
0322  * priorities and map accordingly..
0323  *
0324  **/
0325 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
0326 {
0327     int i;
0328     u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
0329     u16 vmdq_m = 0;
0330 #ifdef IXGBE_FCOE
0331     u16 fcoe_i = 0;
0332 #endif
0333     u8 tcs = adapter->hw_tcs;
0334 
0335     /* verify we have DCB queueing enabled before proceeding */
0336     if (tcs <= 1)
0337         return false;
0338 
0339     /* verify we have VMDq enabled before proceeding */
0340     if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
0341         return false;
0342 
0343     /* limit VMDq instances on the PF by number of Tx queues */
0344     vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
0345 
0346     /* Add starting offset to total pool count */
0347     vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
0348 
0349     /* 16 pools w/ 8 TC per pool */
0350     if (tcs > 4) {
0351         vmdq_i = min_t(u16, vmdq_i, 16);
0352         vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
0353     /* 32 pools w/ 4 TC per pool */
0354     } else {
0355         vmdq_i = min_t(u16, vmdq_i, 32);
0356         vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
0357     }
0358 
0359 #ifdef IXGBE_FCOE
0360     /* queues in the remaining pools are available for FCoE */
0361     fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
0362 
0363 #endif
0364     /* remove the starting offset from the pool count */
0365     vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
0366 
0367     /* save features for later use */
0368     adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
0369     adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
0370 
0371     /*
0372      * We do not support DCB, VMDq, and RSS all simultaneously
0373      * so we will disable RSS since it is the lowest priority
0374      */
0375     adapter->ring_feature[RING_F_RSS].indices = 1;
0376     adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
0377 
0378     /* disable ATR as it is not supported when VMDq is enabled */
0379     adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
0380 
0381     adapter->num_rx_pools = vmdq_i;
0382     adapter->num_rx_queues_per_pool = tcs;
0383 
0384     adapter->num_tx_queues = vmdq_i * tcs;
0385     adapter->num_xdp_queues = 0;
0386     adapter->num_rx_queues = vmdq_i * tcs;
0387 
0388 #ifdef IXGBE_FCOE
0389     if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
0390         struct ixgbe_ring_feature *fcoe;
0391 
0392         fcoe = &adapter->ring_feature[RING_F_FCOE];
0393 
0394         /* limit ourselves based on feature limits */
0395         fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
0396 
0397         if (fcoe_i) {
0398             /* alloc queues for FCoE separately */
0399             fcoe->indices = fcoe_i;
0400             fcoe->offset = vmdq_i * tcs;
0401 
0402             /* add queues to adapter */
0403             adapter->num_tx_queues += fcoe_i;
0404             adapter->num_rx_queues += fcoe_i;
0405         } else if (tcs > 1) {
0406             /* use queue belonging to FcoE TC */
0407             fcoe->indices = 1;
0408             fcoe->offset = ixgbe_fcoe_get_tc(adapter);
0409         } else {
0410             adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
0411 
0412             fcoe->indices = 0;
0413             fcoe->offset = 0;
0414         }
0415     }
0416 
0417 #endif /* IXGBE_FCOE */
0418     /* configure TC to queue mapping */
0419     for (i = 0; i < tcs; i++)
0420         netdev_set_tc_queue(adapter->netdev, i, 1, i);
0421 
0422     return true;
0423 }
0424 
0425 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
0426 {
0427     struct net_device *dev = adapter->netdev;
0428     struct ixgbe_ring_feature *f;
0429     int rss_i, rss_m, i;
0430     int tcs;
0431 
0432     /* Map queue offset and counts onto allocated tx queues */
0433     tcs = adapter->hw_tcs;
0434 
0435     /* verify we have DCB queueing enabled before proceeding */
0436     if (tcs <= 1)
0437         return false;
0438 
0439     /* determine the upper limit for our current DCB mode */
0440     rss_i = dev->num_tx_queues / tcs;
0441     if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
0442         /* 8 TC w/ 4 queues per TC */
0443         rss_i = min_t(u16, rss_i, 4);
0444         rss_m = IXGBE_RSS_4Q_MASK;
0445     } else if (tcs > 4) {
0446         /* 8 TC w/ 8 queues per TC */
0447         rss_i = min_t(u16, rss_i, 8);
0448         rss_m = IXGBE_RSS_8Q_MASK;
0449     } else {
0450         /* 4 TC w/ 16 queues per TC */
0451         rss_i = min_t(u16, rss_i, 16);
0452         rss_m = IXGBE_RSS_16Q_MASK;
0453     }
0454 
0455     /* set RSS mask and indices */
0456     f = &adapter->ring_feature[RING_F_RSS];
0457     rss_i = min_t(int, rss_i, f->limit);
0458     f->indices = rss_i;
0459     f->mask = rss_m;
0460 
0461     /* disable ATR as it is not supported when multiple TCs are enabled */
0462     adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
0463 
0464 #ifdef IXGBE_FCOE
0465     /* FCoE enabled queues require special configuration indexed
0466      * by feature specific indices and offset. Here we map FCoE
0467      * indices onto the DCB queue pairs allowing FCoE to own
0468      * configuration later.
0469      */
0470     if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
0471         u8 tc = ixgbe_fcoe_get_tc(adapter);
0472 
0473         f = &adapter->ring_feature[RING_F_FCOE];
0474         f->indices = min_t(u16, rss_i, f->limit);
0475         f->offset = rss_i * tc;
0476     }
0477 
0478 #endif /* IXGBE_FCOE */
0479     for (i = 0; i < tcs; i++)
0480         netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
0481 
0482     adapter->num_tx_queues = rss_i * tcs;
0483     adapter->num_xdp_queues = 0;
0484     adapter->num_rx_queues = rss_i * tcs;
0485 
0486     return true;
0487 }
0488 
0489 #endif
0490 /**
0491  * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
0492  * @adapter: board private structure to initialize
0493  *
0494  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
0495  * and VM pools where appropriate.  If RSS is available, then also try and
0496  * enable RSS and map accordingly.
0497  *
0498  **/
0499 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
0500 {
0501     u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
0502     u16 vmdq_m = 0;
0503     u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
0504     u16 rss_m = IXGBE_RSS_DISABLED_MASK;
0505 #ifdef IXGBE_FCOE
0506     u16 fcoe_i = 0;
0507 #endif
0508 
0509     /* only proceed if SR-IOV is enabled */
0510     if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
0511         return false;
0512 
0513     /* limit l2fwd RSS based on total Tx queue limit */
0514     rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
0515 
0516     /* Add starting offset to total pool count */
0517     vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
0518 
0519     /* double check we are limited to maximum pools */
0520     vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
0521 
0522     /* 64 pool mode with 2 queues per pool */
0523     if (vmdq_i > 32) {
0524         vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
0525         rss_m = IXGBE_RSS_2Q_MASK;
0526         rss_i = min_t(u16, rss_i, 2);
0527     /* 32 pool mode with up to 4 queues per pool */
0528     } else {
0529         vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
0530         rss_m = IXGBE_RSS_4Q_MASK;
0531         /* We can support 4, 2, or 1 queues */
0532         rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
0533     }
0534 
0535 #ifdef IXGBE_FCOE
0536     /* queues in the remaining pools are available for FCoE */
0537     fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
0538 
0539 #endif
0540     /* remove the starting offset from the pool count */
0541     vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
0542 
0543     /* save features for later use */
0544     adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
0545     adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
0546 
0547     /* limit RSS based on user input and save for later use */
0548     adapter->ring_feature[RING_F_RSS].indices = rss_i;
0549     adapter->ring_feature[RING_F_RSS].mask = rss_m;
0550 
0551     adapter->num_rx_pools = vmdq_i;
0552     adapter->num_rx_queues_per_pool = rss_i;
0553 
0554     adapter->num_rx_queues = vmdq_i * rss_i;
0555     adapter->num_tx_queues = vmdq_i * rss_i;
0556     adapter->num_xdp_queues = 0;
0557 
0558     /* disable ATR as it is not supported when VMDq is enabled */
0559     adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
0560 
0561 #ifdef IXGBE_FCOE
0562     /*
0563      * FCoE can use rings from adjacent buffers to allow RSS
0564      * like behavior.  To account for this we need to add the
0565      * FCoE indices to the total ring count.
0566      */
0567     if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
0568         struct ixgbe_ring_feature *fcoe;
0569 
0570         fcoe = &adapter->ring_feature[RING_F_FCOE];
0571 
0572         /* limit ourselves based on feature limits */
0573         fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
0574 
0575         if (vmdq_i > 1 && fcoe_i) {
0576             /* alloc queues for FCoE separately */
0577             fcoe->indices = fcoe_i;
0578             fcoe->offset = vmdq_i * rss_i;
0579         } else {
0580             /* merge FCoE queues with RSS queues */
0581             fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
0582 
0583             /* limit indices to rss_i if MSI-X is disabled */
0584             if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
0585                 fcoe_i = rss_i;
0586 
0587             /* attempt to reserve some queues for just FCoE */
0588             fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
0589             fcoe->offset = fcoe_i - fcoe->indices;
0590 
0591             fcoe_i -= rss_i;
0592         }
0593 
0594         /* add queues to adapter */
0595         adapter->num_tx_queues += fcoe_i;
0596         adapter->num_rx_queues += fcoe_i;
0597     }
0598 
0599 #endif
0600     /* To support macvlan offload we have to use num_tc to
0601      * restrict the queues that can be used by the device.
0602      * By doing this we can avoid reporting a false number of
0603      * queues.
0604      */
0605     if (vmdq_i > 1)
0606         netdev_set_num_tc(adapter->netdev, 1);
0607 
0608     /* populate TC0 for use by pool 0 */
0609     netdev_set_tc_queue(adapter->netdev, 0,
0610                 adapter->num_rx_queues_per_pool, 0);
0611 
0612     return true;
0613 }
0614 
0615 /**
0616  * ixgbe_set_rss_queues - Allocate queues for RSS
0617  * @adapter: board private structure to initialize
0618  *
0619  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
0620  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
0621  *
0622  **/
0623 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
0624 {
0625     struct ixgbe_hw *hw = &adapter->hw;
0626     struct ixgbe_ring_feature *f;
0627     u16 rss_i;
0628 
0629     /* set mask for 16 queue limit of RSS */
0630     f = &adapter->ring_feature[RING_F_RSS];
0631     rss_i = f->limit;
0632 
0633     f->indices = rss_i;
0634 
0635     if (hw->mac.type < ixgbe_mac_X550)
0636         f->mask = IXGBE_RSS_16Q_MASK;
0637     else
0638         f->mask = IXGBE_RSS_64Q_MASK;
0639 
0640     /* disable ATR by default, it will be configured below */
0641     adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
0642 
0643     /*
0644      * Use Flow Director in addition to RSS to ensure the best
0645      * distribution of flows across cores, even when an FDIR flow
0646      * isn't matched.
0647      */
0648     if (rss_i > 1 && adapter->atr_sample_rate) {
0649         f = &adapter->ring_feature[RING_F_FDIR];
0650 
0651         rss_i = f->indices = f->limit;
0652 
0653         if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
0654             adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
0655     }
0656 
0657 #ifdef IXGBE_FCOE
0658     /*
0659      * FCoE can exist on the same rings as standard network traffic
0660      * however it is preferred to avoid that if possible.  In order
0661      * to get the best performance we allocate as many FCoE queues
0662      * as we can and we place them at the end of the ring array to
0663      * avoid sharing queues with standard RSS on systems with 24 or
0664      * more CPUs.
0665      */
0666     if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
0667         struct net_device *dev = adapter->netdev;
0668         u16 fcoe_i;
0669 
0670         f = &adapter->ring_feature[RING_F_FCOE];
0671 
0672         /* merge FCoE queues with RSS queues */
0673         fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
0674         fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
0675 
0676         /* limit indices to rss_i if MSI-X is disabled */
0677         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
0678             fcoe_i = rss_i;
0679 
0680         /* attempt to reserve some queues for just FCoE */
0681         f->indices = min_t(u16, fcoe_i, f->limit);
0682         f->offset = fcoe_i - f->indices;
0683         rss_i = max_t(u16, fcoe_i, rss_i);
0684     }
0685 
0686 #endif /* IXGBE_FCOE */
0687     adapter->num_rx_queues = rss_i;
0688     adapter->num_tx_queues = rss_i;
0689     adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
0690 
0691     return true;
0692 }
0693 
0694 /**
0695  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
0696  * @adapter: board private structure to initialize
0697  *
0698  * This is the top level queue allocation routine.  The order here is very
0699  * important, starting with the "most" number of features turned on at once,
0700  * and ending with the smallest set of features.  This way large combinations
0701  * can be allocated if they're turned on, and smaller combinations are the
0702  * fallthrough conditions.
0703  *
0704  **/
0705 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
0706 {
0707     /* Start with base case */
0708     adapter->num_rx_queues = 1;
0709     adapter->num_tx_queues = 1;
0710     adapter->num_xdp_queues = 0;
0711     adapter->num_rx_pools = 1;
0712     adapter->num_rx_queues_per_pool = 1;
0713 
0714 #ifdef CONFIG_IXGBE_DCB
0715     if (ixgbe_set_dcb_sriov_queues(adapter))
0716         return;
0717 
0718     if (ixgbe_set_dcb_queues(adapter))
0719         return;
0720 
0721 #endif
0722     if (ixgbe_set_sriov_queues(adapter))
0723         return;
0724 
0725     ixgbe_set_rss_queues(adapter);
0726 }
0727 
0728 /**
0729  * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
0730  * @adapter: board private structure
0731  *
0732  * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
0733  * return a negative error code if unable to acquire MSI-X vectors for any
0734  * reason.
0735  */
0736 static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
0737 {
0738     struct ixgbe_hw *hw = &adapter->hw;
0739     int i, vectors, vector_threshold;
0740 
0741     /* We start by asking for one vector per queue pair with XDP queues
0742      * being stacked with TX queues.
0743      */
0744     vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
0745     vectors = max(vectors, adapter->num_xdp_queues);
0746 
0747     /* It is easy to be greedy for MSI-X vectors. However, it really
0748      * doesn't do much good if we have a lot more vectors than CPUs. We'll
0749      * be somewhat conservative and only ask for (roughly) the same number
0750      * of vectors as there are CPUs.
0751      */
0752     vectors = min_t(int, vectors, num_online_cpus());
0753 
0754     /* Some vectors are necessary for non-queue interrupts */
0755     vectors += NON_Q_VECTORS;
0756 
0757     /* Hardware can only support a maximum of hw.mac->max_msix_vectors.
0758      * With features such as RSS and VMDq, we can easily surpass the
0759      * number of Rx and Tx descriptor queues supported by our device.
0760      * Thus, we cap the maximum in the rare cases where the CPU count also
0761      * exceeds our vector limit
0762      */
0763     vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
0764 
0765     /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
0766      * handler, and (2) an Other (Link Status Change, etc.) handler.
0767      */
0768     vector_threshold = MIN_MSIX_COUNT;
0769 
0770     adapter->msix_entries = kcalloc(vectors,
0771                     sizeof(struct msix_entry),
0772                     GFP_KERNEL);
0773     if (!adapter->msix_entries)
0774         return -ENOMEM;
0775 
0776     for (i = 0; i < vectors; i++)
0777         adapter->msix_entries[i].entry = i;
0778 
0779     vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
0780                     vector_threshold, vectors);
0781 
0782     if (vectors < 0) {
0783         /* A negative count of allocated vectors indicates an error in
0784          * acquiring within the specified range of MSI-X vectors
0785          */
0786         e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
0787                vectors);
0788 
0789         adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
0790         kfree(adapter->msix_entries);
0791         adapter->msix_entries = NULL;
0792 
0793         return vectors;
0794     }
0795 
0796     /* we successfully allocated some number of vectors within our
0797      * requested range.
0798      */
0799     adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
0800 
0801     /* Adjust for only the vectors we'll use, which is minimum
0802      * of max_q_vectors, or the number of vectors we were allocated.
0803      */
0804     vectors -= NON_Q_VECTORS;
0805     adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
0806 
0807     return 0;
0808 }
0809 
0810 static void ixgbe_add_ring(struct ixgbe_ring *ring,
0811                struct ixgbe_ring_container *head)
0812 {
0813     ring->next = head->ring;
0814     head->ring = ring;
0815     head->count++;
0816     head->next_update = jiffies + 1;
0817 }
0818 
0819 /**
0820  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
0821  * @adapter: board private structure to initialize
0822  * @v_count: q_vectors allocated on adapter, used for ring interleaving
0823  * @v_idx: index of vector in adapter struct
0824  * @txr_count: total number of Tx rings to allocate
0825  * @txr_idx: index of first Tx ring to allocate
0826  * @xdp_count: total number of XDP rings to allocate
0827  * @xdp_idx: index of first XDP ring to allocate
0828  * @rxr_count: total number of Rx rings to allocate
0829  * @rxr_idx: index of first Rx ring to allocate
0830  *
0831  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
0832  **/
0833 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
0834                 int v_count, int v_idx,
0835                 int txr_count, int txr_idx,
0836                 int xdp_count, int xdp_idx,
0837                 int rxr_count, int rxr_idx)
0838 {
0839     int node = dev_to_node(&adapter->pdev->dev);
0840     struct ixgbe_q_vector *q_vector;
0841     struct ixgbe_ring *ring;
0842     int cpu = -1;
0843     int ring_count;
0844     u8 tcs = adapter->hw_tcs;
0845 
0846     ring_count = txr_count + rxr_count + xdp_count;
0847 
0848     /* customize cpu for Flow Director mapping */
0849     if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
0850         u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
0851         if (rss_i > 1 && adapter->atr_sample_rate) {
0852             cpu = cpumask_local_spread(v_idx, node);
0853             node = cpu_to_node(cpu);
0854         }
0855     }
0856 
0857     /* allocate q_vector and rings */
0858     q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
0859                 GFP_KERNEL, node);
0860     if (!q_vector)
0861         q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
0862                    GFP_KERNEL);
0863     if (!q_vector)
0864         return -ENOMEM;
0865 
0866     /* setup affinity mask and node */
0867     if (cpu != -1)
0868         cpumask_set_cpu(cpu, &q_vector->affinity_mask);
0869     q_vector->numa_node = node;
0870 
0871 #ifdef CONFIG_IXGBE_DCA
0872     /* initialize CPU for DCA */
0873     q_vector->cpu = -1;
0874 
0875 #endif
0876     /* initialize NAPI */
0877     netif_napi_add(adapter->netdev, &q_vector->napi,
0878                ixgbe_poll, 64);
0879 
0880     /* tie q_vector and adapter together */
0881     adapter->q_vector[v_idx] = q_vector;
0882     q_vector->adapter = adapter;
0883     q_vector->v_idx = v_idx;
0884 
0885     /* initialize work limits */
0886     q_vector->tx.work_limit = adapter->tx_work_limit;
0887 
0888     /* Initialize setting for adaptive ITR */
0889     q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
0890                IXGBE_ITR_ADAPTIVE_LATENCY;
0891     q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
0892                IXGBE_ITR_ADAPTIVE_LATENCY;
0893 
0894     /* intialize ITR */
0895     if (txr_count && !rxr_count) {
0896         /* tx only vector */
0897         if (adapter->tx_itr_setting == 1)
0898             q_vector->itr = IXGBE_12K_ITR;
0899         else
0900             q_vector->itr = adapter->tx_itr_setting;
0901     } else {
0902         /* rx or rx/tx vector */
0903         if (adapter->rx_itr_setting == 1)
0904             q_vector->itr = IXGBE_20K_ITR;
0905         else
0906             q_vector->itr = adapter->rx_itr_setting;
0907     }
0908 
0909     /* initialize pointer to rings */
0910     ring = q_vector->ring;
0911 
0912     while (txr_count) {
0913         /* assign generic ring traits */
0914         ring->dev = &adapter->pdev->dev;
0915         ring->netdev = adapter->netdev;
0916 
0917         /* configure backlink on ring */
0918         ring->q_vector = q_vector;
0919 
0920         /* update q_vector Tx values */
0921         ixgbe_add_ring(ring, &q_vector->tx);
0922 
0923         /* apply Tx specific ring traits */
0924         ring->count = adapter->tx_ring_count;
0925         ring->queue_index = txr_idx;
0926 
0927         /* assign ring to adapter */
0928         WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
0929 
0930         /* update count and index */
0931         txr_count--;
0932         txr_idx += v_count;
0933 
0934         /* push pointer to next ring */
0935         ring++;
0936     }
0937 
0938     while (xdp_count) {
0939         /* assign generic ring traits */
0940         ring->dev = &adapter->pdev->dev;
0941         ring->netdev = adapter->netdev;
0942 
0943         /* configure backlink on ring */
0944         ring->q_vector = q_vector;
0945 
0946         /* update q_vector Tx values */
0947         ixgbe_add_ring(ring, &q_vector->tx);
0948 
0949         /* apply Tx specific ring traits */
0950         ring->count = adapter->tx_ring_count;
0951         ring->queue_index = xdp_idx;
0952         set_ring_xdp(ring);
0953         spin_lock_init(&ring->tx_lock);
0954 
0955         /* assign ring to adapter */
0956         WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
0957 
0958         /* update count and index */
0959         xdp_count--;
0960         xdp_idx++;
0961 
0962         /* push pointer to next ring */
0963         ring++;
0964     }
0965 
0966     while (rxr_count) {
0967         /* assign generic ring traits */
0968         ring->dev = &adapter->pdev->dev;
0969         ring->netdev = adapter->netdev;
0970 
0971         /* configure backlink on ring */
0972         ring->q_vector = q_vector;
0973 
0974         /* update q_vector Rx values */
0975         ixgbe_add_ring(ring, &q_vector->rx);
0976 
0977         /*
0978          * 82599 errata, UDP frames with a 0 checksum
0979          * can be marked as checksum errors.
0980          */
0981         if (adapter->hw.mac.type == ixgbe_mac_82599EB)
0982             set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
0983 
0984 #ifdef IXGBE_FCOE
0985         if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
0986             struct ixgbe_ring_feature *f;
0987             f = &adapter->ring_feature[RING_F_FCOE];
0988             if ((rxr_idx >= f->offset) &&
0989                 (rxr_idx < f->offset + f->indices))
0990                 set_bit(__IXGBE_RX_FCOE, &ring->state);
0991         }
0992 
0993 #endif /* IXGBE_FCOE */
0994         /* apply Rx specific ring traits */
0995         ring->count = adapter->rx_ring_count;
0996         ring->queue_index = rxr_idx;
0997 
0998         /* assign ring to adapter */
0999         WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
1000 
1001         /* update count and index */
1002         rxr_count--;
1003         rxr_idx += v_count;
1004 
1005         /* push pointer to next ring */
1006         ring++;
1007     }
1008 
1009     return 0;
1010 }
1011 
1012 /**
1013  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
1014  * @adapter: board private structure to initialize
1015  * @v_idx: Index of vector to be freed
1016  *
1017  * This function frees the memory allocated to the q_vector.  In addition if
1018  * NAPI is enabled it will delete any references to the NAPI struct prior
1019  * to freeing the q_vector.
1020  **/
1021 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
1022 {
1023     struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
1024     struct ixgbe_ring *ring;
1025 
1026     ixgbe_for_each_ring(ring, q_vector->tx) {
1027         if (ring_is_xdp(ring))
1028             WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
1029         else
1030             WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
1031     }
1032 
1033     ixgbe_for_each_ring(ring, q_vector->rx)
1034         WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
1035 
1036     adapter->q_vector[v_idx] = NULL;
1037     __netif_napi_del(&q_vector->napi);
1038 
1039     if (static_key_enabled(&ixgbe_xdp_locking_key))
1040         static_branch_dec(&ixgbe_xdp_locking_key);
1041 
1042     /*
1043      * after a call to __netif_napi_del() napi may still be used and
1044      * ixgbe_get_stats64() might access the rings on this vector,
1045      * we must wait a grace period before freeing it.
1046      */
1047     kfree_rcu(q_vector, rcu);
1048 }
1049 
1050 /**
1051  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
1052  * @adapter: board private structure to initialize
1053  *
1054  * We allocate one q_vector per queue interrupt.  If allocation fails we
1055  * return -ENOMEM.
1056  **/
1057 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1058 {
1059     int q_vectors = adapter->num_q_vectors;
1060     int rxr_remaining = adapter->num_rx_queues;
1061     int txr_remaining = adapter->num_tx_queues;
1062     int xdp_remaining = adapter->num_xdp_queues;
1063     int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1064     int err, i;
1065 
1066     /* only one q_vector if MSI-X is disabled. */
1067     if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1068         q_vectors = 1;
1069 
1070     if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1071         for (; rxr_remaining; v_idx++) {
1072             err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1073                            0, 0, 0, 0, 1, rxr_idx);
1074 
1075             if (err)
1076                 goto err_out;
1077 
1078             /* update counts and index */
1079             rxr_remaining--;
1080             rxr_idx++;
1081         }
1082     }
1083 
1084     for (; v_idx < q_vectors; v_idx++) {
1085         int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1086         int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1087         int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
1088 
1089         err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1090                        tqpv, txr_idx,
1091                        xqpv, xdp_idx,
1092                        rqpv, rxr_idx);
1093 
1094         if (err)
1095             goto err_out;
1096 
1097         /* update counts and index */
1098         rxr_remaining -= rqpv;
1099         txr_remaining -= tqpv;
1100         xdp_remaining -= xqpv;
1101         rxr_idx++;
1102         txr_idx++;
1103         xdp_idx += xqpv;
1104     }
1105 
1106     for (i = 0; i < adapter->num_rx_queues; i++) {
1107         if (adapter->rx_ring[i])
1108             adapter->rx_ring[i]->ring_idx = i;
1109     }
1110 
1111     for (i = 0; i < adapter->num_tx_queues; i++) {
1112         if (adapter->tx_ring[i])
1113             adapter->tx_ring[i]->ring_idx = i;
1114     }
1115 
1116     for (i = 0; i < adapter->num_xdp_queues; i++) {
1117         if (adapter->xdp_ring[i])
1118             adapter->xdp_ring[i]->ring_idx = i;
1119     }
1120 
1121     return 0;
1122 
1123 err_out:
1124     adapter->num_tx_queues = 0;
1125     adapter->num_xdp_queues = 0;
1126     adapter->num_rx_queues = 0;
1127     adapter->num_q_vectors = 0;
1128 
1129     while (v_idx--)
1130         ixgbe_free_q_vector(adapter, v_idx);
1131 
1132     return -ENOMEM;
1133 }
1134 
1135 /**
1136  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1137  * @adapter: board private structure to initialize
1138  *
1139  * This function frees the memory allocated to the q_vectors.  In addition if
1140  * NAPI is enabled it will delete any references to the NAPI struct prior
1141  * to freeing the q_vector.
1142  **/
1143 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1144 {
1145     int v_idx = adapter->num_q_vectors;
1146 
1147     adapter->num_tx_queues = 0;
1148     adapter->num_xdp_queues = 0;
1149     adapter->num_rx_queues = 0;
1150     adapter->num_q_vectors = 0;
1151 
1152     while (v_idx--)
1153         ixgbe_free_q_vector(adapter, v_idx);
1154 }
1155 
1156 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1157 {
1158     if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1159         adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1160         pci_disable_msix(adapter->pdev);
1161         kfree(adapter->msix_entries);
1162         adapter->msix_entries = NULL;
1163     } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1164         adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1165         pci_disable_msi(adapter->pdev);
1166     }
1167 }
1168 
1169 /**
1170  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1171  * @adapter: board private structure to initialize
1172  *
1173  * Attempt to configure the interrupts using the best available
1174  * capabilities of the hardware and the kernel.
1175  **/
1176 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1177 {
1178     int err;
1179 
1180     /* We will try to get MSI-X interrupts first */
1181     if (!ixgbe_acquire_msix_vectors(adapter))
1182         return;
1183 
1184     /* At this point, we do not have MSI-X capabilities. We need to
1185      * reconfigure or disable various features which require MSI-X
1186      * capability.
1187      */
1188 
1189     /* Disable DCB unless we only have a single traffic class */
1190     if (adapter->hw_tcs > 1) {
1191         e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1192         netdev_reset_tc(adapter->netdev);
1193 
1194         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1195             adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1196 
1197         adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1198         adapter->temp_dcb_cfg.pfc_mode_enable = false;
1199         adapter->dcb_cfg.pfc_mode_enable = false;
1200     }
1201 
1202     adapter->hw_tcs = 0;
1203     adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1204     adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1205 
1206     /* Disable SR-IOV support */
1207     e_dev_warn("Disabling SR-IOV support\n");
1208     ixgbe_disable_sriov(adapter);
1209 
1210     /* Disable RSS */
1211     e_dev_warn("Disabling RSS support\n");
1212     adapter->ring_feature[RING_F_RSS].limit = 1;
1213 
1214     /* recalculate number of queues now that many features have been
1215      * changed or disabled.
1216      */
1217     ixgbe_set_num_queues(adapter);
1218     adapter->num_q_vectors = 1;
1219 
1220     err = pci_enable_msi(adapter->pdev);
1221     if (err)
1222         e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1223                err);
1224     else
1225         adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1226 }
1227 
1228 /**
1229  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1230  * @adapter: board private structure to initialize
1231  *
1232  * We determine which interrupt scheme to use based on...
1233  * - Kernel support (MSI, MSI-X)
1234  *   - which can be user-defined (via MODULE_PARAM)
1235  * - Hardware queue count (num_*_queues)
1236  *   - defined by miscellaneous hardware support/features (RSS, etc.)
1237  **/
1238 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1239 {
1240     int err;
1241 
1242     /* Number of supported queues */
1243     ixgbe_set_num_queues(adapter);
1244 
1245     /* Set interrupt mode */
1246     ixgbe_set_interrupt_capability(adapter);
1247 
1248     err = ixgbe_alloc_q_vectors(adapter);
1249     if (err) {
1250         e_dev_err("Unable to allocate memory for queue vectors\n");
1251         goto err_alloc_q_vectors;
1252     }
1253 
1254     ixgbe_cache_ring_register(adapter);
1255 
1256     e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
1257            (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1258            adapter->num_rx_queues, adapter->num_tx_queues,
1259            adapter->num_xdp_queues);
1260 
1261     set_bit(__IXGBE_DOWN, &adapter->state);
1262 
1263     return 0;
1264 
1265 err_alloc_q_vectors:
1266     ixgbe_reset_interrupt_capability(adapter);
1267     return err;
1268 }
1269 
1270 /**
1271  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1272  * @adapter: board private structure to clear interrupt scheme on
1273  *
1274  * We go through and clear interrupt specific resources and reset the structure
1275  * to pre-load conditions
1276  **/
1277 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1278 {
1279     adapter->num_tx_queues = 0;
1280     adapter->num_xdp_queues = 0;
1281     adapter->num_rx_queues = 0;
1282 
1283     ixgbe_free_q_vectors(adapter);
1284     ixgbe_reset_interrupt_capability(adapter);
1285 }
1286 
1287 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1288                u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
1289 {
1290     struct ixgbe_adv_tx_context_desc *context_desc;
1291     u16 i = tx_ring->next_to_use;
1292 
1293     context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1294 
1295     i++;
1296     tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1297 
1298     /* set bits to identify this as an advanced context descriptor */
1299     type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1300 
1301     context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
1302     context_desc->fceof_saidx   = cpu_to_le32(fceof_saidx);
1303     context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
1304     context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1305 }
1306