0001
0002
0003
0004 #include "ixgbe.h"
0005 #include "ixgbe_sriov.h"
0006
0007 #ifdef CONFIG_IXGBE_DCB
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
0018 {
0019 #ifdef IXGBE_FCOE
0020 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
0021 #endif
0022 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
0023 int i;
0024 u16 reg_idx, pool;
0025 u8 tcs = adapter->hw_tcs;
0026
0027
0028 if (tcs <= 1)
0029 return false;
0030
0031
0032 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
0033 return false;
0034
0035
0036 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
0037 for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
0038
0039 if ((reg_idx & ~vmdq->mask) >= tcs) {
0040 pool++;
0041 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
0042 }
0043 adapter->rx_ring[i]->reg_idx = reg_idx;
0044 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
0045 }
0046
0047 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
0048 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
0049
0050 if ((reg_idx & ~vmdq->mask) >= tcs)
0051 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
0052 adapter->tx_ring[i]->reg_idx = reg_idx;
0053 }
0054
0055 #ifdef IXGBE_FCOE
0056
0057 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
0058 return true;
0059
0060
0061 if (fcoe->offset < tcs)
0062 return true;
0063
0064
0065 if (fcoe->indices) {
0066 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
0067 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
0068
0069 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
0070 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
0071 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
0072 adapter->rx_ring[i]->reg_idx = reg_idx;
0073 adapter->rx_ring[i]->netdev = adapter->netdev;
0074 reg_idx++;
0075 }
0076
0077 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
0078 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
0079 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
0080 adapter->tx_ring[i]->reg_idx = reg_idx;
0081 reg_idx++;
0082 }
0083 }
0084
0085 #endif
0086 return true;
0087 }
0088
0089
0090 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
0091 unsigned int *tx, unsigned int *rx)
0092 {
0093 struct ixgbe_hw *hw = &adapter->hw;
0094 u8 num_tcs = adapter->hw_tcs;
0095
0096 *tx = 0;
0097 *rx = 0;
0098
0099 switch (hw->mac.type) {
0100 case ixgbe_mac_82598EB:
0101
0102 *tx = tc << 2;
0103 *rx = tc << 3;
0104 break;
0105 case ixgbe_mac_82599EB:
0106 case ixgbe_mac_X540:
0107 case ixgbe_mac_X550:
0108 case ixgbe_mac_X550EM_x:
0109 case ixgbe_mac_x550em_a:
0110 if (num_tcs > 4) {
0111
0112
0113
0114
0115
0116 *rx = tc << 4;
0117 if (tc < 3)
0118 *tx = tc << 5;
0119 else if (tc < 5)
0120 *tx = (tc + 2) << 4;
0121 else
0122 *tx = (tc + 8) << 3;
0123 } else {
0124
0125
0126
0127
0128
0129 *rx = tc << 5;
0130 if (tc < 2)
0131 *tx = tc << 6;
0132 else
0133 *tx = (tc + 4) << 4;
0134 }
0135 break;
0136 default:
0137 break;
0138 }
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
0149 {
0150 u8 num_tcs = adapter->hw_tcs;
0151 unsigned int tx_idx, rx_idx;
0152 int tc, offset, rss_i, i;
0153
0154
0155 if (num_tcs <= 1)
0156 return false;
0157
0158 rss_i = adapter->ring_feature[RING_F_RSS].indices;
0159
0160 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
0161 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
0162 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
0163 adapter->tx_ring[offset + i]->reg_idx = tx_idx;
0164 adapter->rx_ring[offset + i]->reg_idx = rx_idx;
0165 adapter->rx_ring[offset + i]->netdev = adapter->netdev;
0166 adapter->tx_ring[offset + i]->dcb_tc = tc;
0167 adapter->rx_ring[offset + i]->dcb_tc = tc;
0168 }
0169 }
0170
0171 return true;
0172 }
0173
0174 #endif
0175
0176
0177
0178
0179
0180
0181
0182
0183 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
0184 {
0185 #ifdef IXGBE_FCOE
0186 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
0187 #endif
0188 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
0189 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
0190 u16 reg_idx, pool;
0191 int i;
0192
0193
0194 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
0195 return false;
0196
0197
0198 pool = 0;
0199 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
0200 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
0201 #ifdef IXGBE_FCOE
0202
0203 if (fcoe->offset && (i > fcoe->offset))
0204 break;
0205 #endif
0206
0207 if ((reg_idx & ~vmdq->mask) >= rss->indices) {
0208 pool++;
0209 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
0210 }
0211 adapter->rx_ring[i]->reg_idx = reg_idx;
0212 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
0213 }
0214
0215 #ifdef IXGBE_FCOE
0216
0217 for (; i < adapter->num_rx_queues; i++, reg_idx++) {
0218 adapter->rx_ring[i]->reg_idx = reg_idx;
0219 adapter->rx_ring[i]->netdev = adapter->netdev;
0220 }
0221
0222 #endif
0223 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
0224 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
0225 #ifdef IXGBE_FCOE
0226
0227 if (fcoe->offset && (i > fcoe->offset))
0228 break;
0229 #endif
0230
0231 if ((reg_idx & rss->mask) >= rss->indices)
0232 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
0233 adapter->tx_ring[i]->reg_idx = reg_idx;
0234 }
0235
0236 #ifdef IXGBE_FCOE
0237
0238 for (; i < adapter->num_tx_queues; i++, reg_idx++)
0239 adapter->tx_ring[i]->reg_idx = reg_idx;
0240
0241 #endif
0242
0243 return true;
0244 }
0245
0246
0247
0248
0249
0250
0251
0252
0253 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
0254 {
0255 int i, reg_idx;
0256
0257 for (i = 0; i < adapter->num_rx_queues; i++) {
0258 adapter->rx_ring[i]->reg_idx = i;
0259 adapter->rx_ring[i]->netdev = adapter->netdev;
0260 }
0261 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
0262 adapter->tx_ring[i]->reg_idx = reg_idx;
0263 for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
0264 adapter->xdp_ring[i]->reg_idx = reg_idx;
0265
0266 return true;
0267 }
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
0281 {
0282
0283 adapter->rx_ring[0]->reg_idx = 0;
0284 adapter->tx_ring[0]->reg_idx = 0;
0285
0286 #ifdef CONFIG_IXGBE_DCB
0287 if (ixgbe_cache_ring_dcb_sriov(adapter))
0288 return;
0289
0290 if (ixgbe_cache_ring_dcb(adapter))
0291 return;
0292
0293 #endif
0294 if (ixgbe_cache_ring_sriov(adapter))
0295 return;
0296
0297 ixgbe_cache_ring_rss(adapter);
0298 }
0299
0300 static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
0301 {
0302 int queues;
0303
0304 queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids);
0305 return adapter->xdp_prog ? queues : 0;
0306 }
0307
0308 #define IXGBE_RSS_64Q_MASK 0x3F
0309 #define IXGBE_RSS_16Q_MASK 0xF
0310 #define IXGBE_RSS_8Q_MASK 0x7
0311 #define IXGBE_RSS_4Q_MASK 0x3
0312 #define IXGBE_RSS_2Q_MASK 0x1
0313 #define IXGBE_RSS_DISABLED_MASK 0x0
0314
0315 #ifdef CONFIG_IXGBE_DCB
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
0326 {
0327 int i;
0328 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
0329 u16 vmdq_m = 0;
0330 #ifdef IXGBE_FCOE
0331 u16 fcoe_i = 0;
0332 #endif
0333 u8 tcs = adapter->hw_tcs;
0334
0335
0336 if (tcs <= 1)
0337 return false;
0338
0339
0340 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
0341 return false;
0342
0343
0344 vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
0345
0346
0347 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
0348
0349
0350 if (tcs > 4) {
0351 vmdq_i = min_t(u16, vmdq_i, 16);
0352 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
0353
0354 } else {
0355 vmdq_i = min_t(u16, vmdq_i, 32);
0356 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
0357 }
0358
0359 #ifdef IXGBE_FCOE
0360
0361 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
0362
0363 #endif
0364
0365 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
0366
0367
0368 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
0369 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
0370
0371
0372
0373
0374
0375 adapter->ring_feature[RING_F_RSS].indices = 1;
0376 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
0377
0378
0379 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
0380
0381 adapter->num_rx_pools = vmdq_i;
0382 adapter->num_rx_queues_per_pool = tcs;
0383
0384 adapter->num_tx_queues = vmdq_i * tcs;
0385 adapter->num_xdp_queues = 0;
0386 adapter->num_rx_queues = vmdq_i * tcs;
0387
0388 #ifdef IXGBE_FCOE
0389 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
0390 struct ixgbe_ring_feature *fcoe;
0391
0392 fcoe = &adapter->ring_feature[RING_F_FCOE];
0393
0394
0395 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
0396
0397 if (fcoe_i) {
0398
0399 fcoe->indices = fcoe_i;
0400 fcoe->offset = vmdq_i * tcs;
0401
0402
0403 adapter->num_tx_queues += fcoe_i;
0404 adapter->num_rx_queues += fcoe_i;
0405 } else if (tcs > 1) {
0406
0407 fcoe->indices = 1;
0408 fcoe->offset = ixgbe_fcoe_get_tc(adapter);
0409 } else {
0410 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
0411
0412 fcoe->indices = 0;
0413 fcoe->offset = 0;
0414 }
0415 }
0416
0417 #endif
0418
0419 for (i = 0; i < tcs; i++)
0420 netdev_set_tc_queue(adapter->netdev, i, 1, i);
0421
0422 return true;
0423 }
0424
0425 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
0426 {
0427 struct net_device *dev = adapter->netdev;
0428 struct ixgbe_ring_feature *f;
0429 int rss_i, rss_m, i;
0430 int tcs;
0431
0432
0433 tcs = adapter->hw_tcs;
0434
0435
0436 if (tcs <= 1)
0437 return false;
0438
0439
0440 rss_i = dev->num_tx_queues / tcs;
0441 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
0442
0443 rss_i = min_t(u16, rss_i, 4);
0444 rss_m = IXGBE_RSS_4Q_MASK;
0445 } else if (tcs > 4) {
0446
0447 rss_i = min_t(u16, rss_i, 8);
0448 rss_m = IXGBE_RSS_8Q_MASK;
0449 } else {
0450
0451 rss_i = min_t(u16, rss_i, 16);
0452 rss_m = IXGBE_RSS_16Q_MASK;
0453 }
0454
0455
0456 f = &adapter->ring_feature[RING_F_RSS];
0457 rss_i = min_t(int, rss_i, f->limit);
0458 f->indices = rss_i;
0459 f->mask = rss_m;
0460
0461
0462 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
0463
0464 #ifdef IXGBE_FCOE
0465
0466
0467
0468
0469
0470 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
0471 u8 tc = ixgbe_fcoe_get_tc(adapter);
0472
0473 f = &adapter->ring_feature[RING_F_FCOE];
0474 f->indices = min_t(u16, rss_i, f->limit);
0475 f->offset = rss_i * tc;
0476 }
0477
0478 #endif
0479 for (i = 0; i < tcs; i++)
0480 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
0481
0482 adapter->num_tx_queues = rss_i * tcs;
0483 adapter->num_xdp_queues = 0;
0484 adapter->num_rx_queues = rss_i * tcs;
0485
0486 return true;
0487 }
0488
0489 #endif
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
0500 {
0501 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
0502 u16 vmdq_m = 0;
0503 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
0504 u16 rss_m = IXGBE_RSS_DISABLED_MASK;
0505 #ifdef IXGBE_FCOE
0506 u16 fcoe_i = 0;
0507 #endif
0508
0509
0510 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
0511 return false;
0512
0513
0514 rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
0515
0516
0517 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
0518
0519
0520 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
0521
0522
0523 if (vmdq_i > 32) {
0524 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
0525 rss_m = IXGBE_RSS_2Q_MASK;
0526 rss_i = min_t(u16, rss_i, 2);
0527
0528 } else {
0529 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
0530 rss_m = IXGBE_RSS_4Q_MASK;
0531
0532 rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
0533 }
0534
0535 #ifdef IXGBE_FCOE
0536
0537 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
0538
0539 #endif
0540
0541 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
0542
0543
0544 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
0545 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
0546
0547
0548 adapter->ring_feature[RING_F_RSS].indices = rss_i;
0549 adapter->ring_feature[RING_F_RSS].mask = rss_m;
0550
0551 adapter->num_rx_pools = vmdq_i;
0552 adapter->num_rx_queues_per_pool = rss_i;
0553
0554 adapter->num_rx_queues = vmdq_i * rss_i;
0555 adapter->num_tx_queues = vmdq_i * rss_i;
0556 adapter->num_xdp_queues = 0;
0557
0558
0559 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
0560
0561 #ifdef IXGBE_FCOE
0562
0563
0564
0565
0566
0567 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
0568 struct ixgbe_ring_feature *fcoe;
0569
0570 fcoe = &adapter->ring_feature[RING_F_FCOE];
0571
0572
0573 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
0574
0575 if (vmdq_i > 1 && fcoe_i) {
0576
0577 fcoe->indices = fcoe_i;
0578 fcoe->offset = vmdq_i * rss_i;
0579 } else {
0580
0581 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
0582
0583
0584 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
0585 fcoe_i = rss_i;
0586
0587
0588 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
0589 fcoe->offset = fcoe_i - fcoe->indices;
0590
0591 fcoe_i -= rss_i;
0592 }
0593
0594
0595 adapter->num_tx_queues += fcoe_i;
0596 adapter->num_rx_queues += fcoe_i;
0597 }
0598
0599 #endif
0600
0601
0602
0603
0604
0605 if (vmdq_i > 1)
0606 netdev_set_num_tc(adapter->netdev, 1);
0607
0608
0609 netdev_set_tc_queue(adapter->netdev, 0,
0610 adapter->num_rx_queues_per_pool, 0);
0611
0612 return true;
0613 }
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
0624 {
0625 struct ixgbe_hw *hw = &adapter->hw;
0626 struct ixgbe_ring_feature *f;
0627 u16 rss_i;
0628
0629
0630 f = &adapter->ring_feature[RING_F_RSS];
0631 rss_i = f->limit;
0632
0633 f->indices = rss_i;
0634
0635 if (hw->mac.type < ixgbe_mac_X550)
0636 f->mask = IXGBE_RSS_16Q_MASK;
0637 else
0638 f->mask = IXGBE_RSS_64Q_MASK;
0639
0640
0641 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
0642
0643
0644
0645
0646
0647
0648 if (rss_i > 1 && adapter->atr_sample_rate) {
0649 f = &adapter->ring_feature[RING_F_FDIR];
0650
0651 rss_i = f->indices = f->limit;
0652
0653 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
0654 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
0655 }
0656
0657 #ifdef IXGBE_FCOE
0658
0659
0660
0661
0662
0663
0664
0665
0666 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
0667 struct net_device *dev = adapter->netdev;
0668 u16 fcoe_i;
0669
0670 f = &adapter->ring_feature[RING_F_FCOE];
0671
0672
0673 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
0674 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
0675
0676
0677 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
0678 fcoe_i = rss_i;
0679
0680
0681 f->indices = min_t(u16, fcoe_i, f->limit);
0682 f->offset = fcoe_i - f->indices;
0683 rss_i = max_t(u16, fcoe_i, rss_i);
0684 }
0685
0686 #endif
0687 adapter->num_rx_queues = rss_i;
0688 adapter->num_tx_queues = rss_i;
0689 adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
0690
0691 return true;
0692 }
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
0706 {
0707
0708 adapter->num_rx_queues = 1;
0709 adapter->num_tx_queues = 1;
0710 adapter->num_xdp_queues = 0;
0711 adapter->num_rx_pools = 1;
0712 adapter->num_rx_queues_per_pool = 1;
0713
0714 #ifdef CONFIG_IXGBE_DCB
0715 if (ixgbe_set_dcb_sriov_queues(adapter))
0716 return;
0717
0718 if (ixgbe_set_dcb_queues(adapter))
0719 return;
0720
0721 #endif
0722 if (ixgbe_set_sriov_queues(adapter))
0723 return;
0724
0725 ixgbe_set_rss_queues(adapter);
0726 }
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736 static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
0737 {
0738 struct ixgbe_hw *hw = &adapter->hw;
0739 int i, vectors, vector_threshold;
0740
0741
0742
0743
0744 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
0745 vectors = max(vectors, adapter->num_xdp_queues);
0746
0747
0748
0749
0750
0751
0752 vectors = min_t(int, vectors, num_online_cpus());
0753
0754
0755 vectors += NON_Q_VECTORS;
0756
0757
0758
0759
0760
0761
0762
0763 vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
0764
0765
0766
0767
0768 vector_threshold = MIN_MSIX_COUNT;
0769
0770 adapter->msix_entries = kcalloc(vectors,
0771 sizeof(struct msix_entry),
0772 GFP_KERNEL);
0773 if (!adapter->msix_entries)
0774 return -ENOMEM;
0775
0776 for (i = 0; i < vectors; i++)
0777 adapter->msix_entries[i].entry = i;
0778
0779 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
0780 vector_threshold, vectors);
0781
0782 if (vectors < 0) {
0783
0784
0785
0786 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
0787 vectors);
0788
0789 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
0790 kfree(adapter->msix_entries);
0791 adapter->msix_entries = NULL;
0792
0793 return vectors;
0794 }
0795
0796
0797
0798
0799 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
0800
0801
0802
0803
0804 vectors -= NON_Q_VECTORS;
0805 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
0806
0807 return 0;
0808 }
0809
0810 static void ixgbe_add_ring(struct ixgbe_ring *ring,
0811 struct ixgbe_ring_container *head)
0812 {
0813 ring->next = head->ring;
0814 head->ring = ring;
0815 head->count++;
0816 head->next_update = jiffies + 1;
0817 }
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
0834 int v_count, int v_idx,
0835 int txr_count, int txr_idx,
0836 int xdp_count, int xdp_idx,
0837 int rxr_count, int rxr_idx)
0838 {
0839 int node = dev_to_node(&adapter->pdev->dev);
0840 struct ixgbe_q_vector *q_vector;
0841 struct ixgbe_ring *ring;
0842 int cpu = -1;
0843 int ring_count;
0844 u8 tcs = adapter->hw_tcs;
0845
0846 ring_count = txr_count + rxr_count + xdp_count;
0847
0848
0849 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
0850 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
0851 if (rss_i > 1 && adapter->atr_sample_rate) {
0852 cpu = cpumask_local_spread(v_idx, node);
0853 node = cpu_to_node(cpu);
0854 }
0855 }
0856
0857
0858 q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
0859 GFP_KERNEL, node);
0860 if (!q_vector)
0861 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
0862 GFP_KERNEL);
0863 if (!q_vector)
0864 return -ENOMEM;
0865
0866
0867 if (cpu != -1)
0868 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
0869 q_vector->numa_node = node;
0870
0871 #ifdef CONFIG_IXGBE_DCA
0872
0873 q_vector->cpu = -1;
0874
0875 #endif
0876
0877 netif_napi_add(adapter->netdev, &q_vector->napi,
0878 ixgbe_poll, 64);
0879
0880
0881 adapter->q_vector[v_idx] = q_vector;
0882 q_vector->adapter = adapter;
0883 q_vector->v_idx = v_idx;
0884
0885
0886 q_vector->tx.work_limit = adapter->tx_work_limit;
0887
0888
0889 q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
0890 IXGBE_ITR_ADAPTIVE_LATENCY;
0891 q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
0892 IXGBE_ITR_ADAPTIVE_LATENCY;
0893
0894
0895 if (txr_count && !rxr_count) {
0896
0897 if (adapter->tx_itr_setting == 1)
0898 q_vector->itr = IXGBE_12K_ITR;
0899 else
0900 q_vector->itr = adapter->tx_itr_setting;
0901 } else {
0902
0903 if (adapter->rx_itr_setting == 1)
0904 q_vector->itr = IXGBE_20K_ITR;
0905 else
0906 q_vector->itr = adapter->rx_itr_setting;
0907 }
0908
0909
0910 ring = q_vector->ring;
0911
0912 while (txr_count) {
0913
0914 ring->dev = &adapter->pdev->dev;
0915 ring->netdev = adapter->netdev;
0916
0917
0918 ring->q_vector = q_vector;
0919
0920
0921 ixgbe_add_ring(ring, &q_vector->tx);
0922
0923
0924 ring->count = adapter->tx_ring_count;
0925 ring->queue_index = txr_idx;
0926
0927
0928 WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
0929
0930
0931 txr_count--;
0932 txr_idx += v_count;
0933
0934
0935 ring++;
0936 }
0937
0938 while (xdp_count) {
0939
0940 ring->dev = &adapter->pdev->dev;
0941 ring->netdev = adapter->netdev;
0942
0943
0944 ring->q_vector = q_vector;
0945
0946
0947 ixgbe_add_ring(ring, &q_vector->tx);
0948
0949
0950 ring->count = adapter->tx_ring_count;
0951 ring->queue_index = xdp_idx;
0952 set_ring_xdp(ring);
0953 spin_lock_init(&ring->tx_lock);
0954
0955
0956 WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
0957
0958
0959 xdp_count--;
0960 xdp_idx++;
0961
0962
0963 ring++;
0964 }
0965
0966 while (rxr_count) {
0967
0968 ring->dev = &adapter->pdev->dev;
0969 ring->netdev = adapter->netdev;
0970
0971
0972 ring->q_vector = q_vector;
0973
0974
0975 ixgbe_add_ring(ring, &q_vector->rx);
0976
0977
0978
0979
0980
0981 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
0982 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
0983
0984 #ifdef IXGBE_FCOE
0985 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
0986 struct ixgbe_ring_feature *f;
0987 f = &adapter->ring_feature[RING_F_FCOE];
0988 if ((rxr_idx >= f->offset) &&
0989 (rxr_idx < f->offset + f->indices))
0990 set_bit(__IXGBE_RX_FCOE, &ring->state);
0991 }
0992
0993 #endif
0994
0995 ring->count = adapter->rx_ring_count;
0996 ring->queue_index = rxr_idx;
0997
0998
0999 WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
1000
1001
1002 rxr_count--;
1003 rxr_idx += v_count;
1004
1005
1006 ring++;
1007 }
1008
1009 return 0;
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
1022 {
1023 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
1024 struct ixgbe_ring *ring;
1025
1026 ixgbe_for_each_ring(ring, q_vector->tx) {
1027 if (ring_is_xdp(ring))
1028 WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
1029 else
1030 WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
1031 }
1032
1033 ixgbe_for_each_ring(ring, q_vector->rx)
1034 WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
1035
1036 adapter->q_vector[v_idx] = NULL;
1037 __netif_napi_del(&q_vector->napi);
1038
1039 if (static_key_enabled(&ixgbe_xdp_locking_key))
1040 static_branch_dec(&ixgbe_xdp_locking_key);
1041
1042
1043
1044
1045
1046
1047 kfree_rcu(q_vector, rcu);
1048 }
1049
1050
1051
1052
1053
1054
1055
1056
1057 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1058 {
1059 int q_vectors = adapter->num_q_vectors;
1060 int rxr_remaining = adapter->num_rx_queues;
1061 int txr_remaining = adapter->num_tx_queues;
1062 int xdp_remaining = adapter->num_xdp_queues;
1063 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1064 int err, i;
1065
1066
1067 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1068 q_vectors = 1;
1069
1070 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1071 for (; rxr_remaining; v_idx++) {
1072 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1073 0, 0, 0, 0, 1, rxr_idx);
1074
1075 if (err)
1076 goto err_out;
1077
1078
1079 rxr_remaining--;
1080 rxr_idx++;
1081 }
1082 }
1083
1084 for (; v_idx < q_vectors; v_idx++) {
1085 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1086 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1087 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
1088
1089 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1090 tqpv, txr_idx,
1091 xqpv, xdp_idx,
1092 rqpv, rxr_idx);
1093
1094 if (err)
1095 goto err_out;
1096
1097
1098 rxr_remaining -= rqpv;
1099 txr_remaining -= tqpv;
1100 xdp_remaining -= xqpv;
1101 rxr_idx++;
1102 txr_idx++;
1103 xdp_idx += xqpv;
1104 }
1105
1106 for (i = 0; i < adapter->num_rx_queues; i++) {
1107 if (adapter->rx_ring[i])
1108 adapter->rx_ring[i]->ring_idx = i;
1109 }
1110
1111 for (i = 0; i < adapter->num_tx_queues; i++) {
1112 if (adapter->tx_ring[i])
1113 adapter->tx_ring[i]->ring_idx = i;
1114 }
1115
1116 for (i = 0; i < adapter->num_xdp_queues; i++) {
1117 if (adapter->xdp_ring[i])
1118 adapter->xdp_ring[i]->ring_idx = i;
1119 }
1120
1121 return 0;
1122
1123 err_out:
1124 adapter->num_tx_queues = 0;
1125 adapter->num_xdp_queues = 0;
1126 adapter->num_rx_queues = 0;
1127 adapter->num_q_vectors = 0;
1128
1129 while (v_idx--)
1130 ixgbe_free_q_vector(adapter, v_idx);
1131
1132 return -ENOMEM;
1133 }
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1144 {
1145 int v_idx = adapter->num_q_vectors;
1146
1147 adapter->num_tx_queues = 0;
1148 adapter->num_xdp_queues = 0;
1149 adapter->num_rx_queues = 0;
1150 adapter->num_q_vectors = 0;
1151
1152 while (v_idx--)
1153 ixgbe_free_q_vector(adapter, v_idx);
1154 }
1155
1156 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1157 {
1158 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1159 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1160 pci_disable_msix(adapter->pdev);
1161 kfree(adapter->msix_entries);
1162 adapter->msix_entries = NULL;
1163 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1164 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1165 pci_disable_msi(adapter->pdev);
1166 }
1167 }
1168
1169
1170
1171
1172
1173
1174
1175
1176 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1177 {
1178 int err;
1179
1180
1181 if (!ixgbe_acquire_msix_vectors(adapter))
1182 return;
1183
1184
1185
1186
1187
1188
1189
1190 if (adapter->hw_tcs > 1) {
1191 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1192 netdev_reset_tc(adapter->netdev);
1193
1194 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1195 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1196
1197 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1198 adapter->temp_dcb_cfg.pfc_mode_enable = false;
1199 adapter->dcb_cfg.pfc_mode_enable = false;
1200 }
1201
1202 adapter->hw_tcs = 0;
1203 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1204 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1205
1206
1207 e_dev_warn("Disabling SR-IOV support\n");
1208 ixgbe_disable_sriov(adapter);
1209
1210
1211 e_dev_warn("Disabling RSS support\n");
1212 adapter->ring_feature[RING_F_RSS].limit = 1;
1213
1214
1215
1216
1217 ixgbe_set_num_queues(adapter);
1218 adapter->num_q_vectors = 1;
1219
1220 err = pci_enable_msi(adapter->pdev);
1221 if (err)
1222 e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1223 err);
1224 else
1225 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1226 }
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1239 {
1240 int err;
1241
1242
1243 ixgbe_set_num_queues(adapter);
1244
1245
1246 ixgbe_set_interrupt_capability(adapter);
1247
1248 err = ixgbe_alloc_q_vectors(adapter);
1249 if (err) {
1250 e_dev_err("Unable to allocate memory for queue vectors\n");
1251 goto err_alloc_q_vectors;
1252 }
1253
1254 ixgbe_cache_ring_register(adapter);
1255
1256 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
1257 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1258 adapter->num_rx_queues, adapter->num_tx_queues,
1259 adapter->num_xdp_queues);
1260
1261 set_bit(__IXGBE_DOWN, &adapter->state);
1262
1263 return 0;
1264
1265 err_alloc_q_vectors:
1266 ixgbe_reset_interrupt_capability(adapter);
1267 return err;
1268 }
1269
1270
1271
1272
1273
1274
1275
1276
1277 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1278 {
1279 adapter->num_tx_queues = 0;
1280 adapter->num_xdp_queues = 0;
1281 adapter->num_rx_queues = 0;
1282
1283 ixgbe_free_q_vectors(adapter);
1284 ixgbe_reset_interrupt_capability(adapter);
1285 }
1286
1287 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1288 u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
1289 {
1290 struct ixgbe_adv_tx_context_desc *context_desc;
1291 u16 i = tx_ring->next_to_use;
1292
1293 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1294
1295 i++;
1296 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1297
1298
1299 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1300
1301 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1302 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
1303 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1304 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1305 }
1306