0001
0002
0003
0004 #include <linux/etherdevice.h>
0005 #include <linux/of_net.h>
0006 #include <linux/pci.h>
0007 #include <linux/bpf.h>
0008 #include <generated/utsrelease.h>
0009 #include <linux/crash_dump.h>
0010
0011
0012 #include "i40e.h"
0013 #include "i40e_diag.h"
0014 #include "i40e_xsk.h"
0015 #include <net/udp_tunnel.h>
0016 #include <net/xdp_sock_drv.h>
0017
0018
0019
0020
0021 #define CREATE_TRACE_POINTS
0022 #include "i40e_trace.h"
0023
0024 const char i40e_driver_name[] = "i40e";
0025 static const char i40e_driver_string[] =
0026 "Intel(R) Ethernet Connection XL710 Network Driver";
0027
0028 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
0029
0030
0031 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
0032 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
0033 static int i40e_add_vsi(struct i40e_vsi *vsi);
0034 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
0035 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
0036 static int i40e_setup_misc_vector(struct i40e_pf *pf);
0037 static void i40e_determine_queue_usage(struct i40e_pf *pf);
0038 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
0039 static void i40e_prep_for_reset(struct i40e_pf *pf);
0040 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
0041 bool lock_acquired);
0042 static int i40e_reset(struct i40e_pf *pf);
0043 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
0044 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
0045 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
0046 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
0047 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
0048 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
0049 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
0050 static int i40e_get_capabilities(struct i40e_pf *pf,
0051 enum i40e_admin_queue_opc list_type);
0052 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
0053
0054
0055
0056
0057
0058
0059
0060
0061 static const struct pci_device_id i40e_pci_tbl[] = {
0062 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
0063 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
0064 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
0065 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
0066 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
0067 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
0068 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
0069 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
0070 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
0071 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
0072 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
0073 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
0074 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
0075 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
0076 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
0077 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
0078 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
0079 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
0080 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
0081 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
0082 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
0083 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
0084 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
0085 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
0086 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
0087
0088 {0, }
0089 };
0090 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
0091
0092 #define I40E_MAX_VF_COUNT 128
0093 static int debug = -1;
0094 module_param(debug, uint, 0);
0095 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
0096
0097 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
0098 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
0099 MODULE_LICENSE("GPL v2");
0100
0101 static struct workqueue_struct *i40e_wq;
0102
0103 static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
0104 struct net_device *netdev, int delta)
0105 {
0106 struct netdev_hw_addr *ha;
0107
0108 if (!f || !netdev)
0109 return;
0110
0111 netdev_for_each_mc_addr(ha, netdev) {
0112 if (ether_addr_equal(ha->addr, f->macaddr)) {
0113 ha->refcount += delta;
0114 if (ha->refcount <= 0)
0115 ha->refcount = 1;
0116 break;
0117 }
0118 }
0119 }
0120
0121
0122
0123
0124
0125
0126
0127
0128 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
0129 u64 size, u32 alignment)
0130 {
0131 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
0132
0133 mem->size = ALIGN(size, alignment);
0134 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
0135 GFP_KERNEL);
0136 if (!mem->va)
0137 return -ENOMEM;
0138
0139 return 0;
0140 }
0141
0142
0143
0144
0145
0146
0147 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
0148 {
0149 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
0150
0151 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
0152 mem->va = NULL;
0153 mem->pa = 0;
0154 mem->size = 0;
0155
0156 return 0;
0157 }
0158
0159
0160
0161
0162
0163
0164
0165 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
0166 u32 size)
0167 {
0168 mem->size = size;
0169 mem->va = kzalloc(size, GFP_KERNEL);
0170
0171 if (!mem->va)
0172 return -ENOMEM;
0173
0174 return 0;
0175 }
0176
0177
0178
0179
0180
0181
0182 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
0183 {
0184
0185 kfree(mem->va);
0186 mem->va = NULL;
0187 mem->size = 0;
0188
0189 return 0;
0190 }
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
0202 u16 needed, u16 id)
0203 {
0204 int ret = -ENOMEM;
0205 int i, j;
0206
0207 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
0208 dev_info(&pf->pdev->dev,
0209 "param err: pile=%s needed=%d id=0x%04x\n",
0210 pile ? "<valid>" : "<null>", needed, id);
0211 return -EINVAL;
0212 }
0213
0214
0215
0216
0217 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
0218 if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
0219 dev_err(&pf->pdev->dev,
0220 "Cannot allocate queue %d for I40E_VSI_FDIR\n",
0221 pile->num_entries - 1);
0222 return -ENOMEM;
0223 }
0224 pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
0225 return pile->num_entries - 1;
0226 }
0227
0228 i = 0;
0229 while (i < pile->num_entries) {
0230
0231 if (pile->list[i] & I40E_PILE_VALID_BIT) {
0232 i++;
0233 continue;
0234 }
0235
0236
0237 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
0238 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
0239 break;
0240 }
0241
0242 if (j == needed) {
0243
0244 for (j = 0; j < needed; j++)
0245 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
0246 ret = i;
0247 break;
0248 }
0249
0250
0251 i += j;
0252 }
0253
0254 return ret;
0255 }
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
0266 {
0267 int valid_id = (id | I40E_PILE_VALID_BIT);
0268 int count = 0;
0269 u16 i;
0270
0271 if (!pile || index >= pile->num_entries)
0272 return -EINVAL;
0273
0274 for (i = index;
0275 i < pile->num_entries && pile->list[i] == valid_id;
0276 i++) {
0277 pile->list[i] = 0;
0278 count++;
0279 }
0280
0281
0282 return count;
0283 }
0284
0285
0286
0287
0288
0289
0290 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
0291 {
0292 int i;
0293
0294 for (i = 0; i < pf->num_alloc_vsi; i++)
0295 if (pf->vsi[i] && (pf->vsi[i]->id == id))
0296 return pf->vsi[i];
0297
0298 return NULL;
0299 }
0300
0301
0302
0303
0304
0305
0306
0307 void i40e_service_event_schedule(struct i40e_pf *pf)
0308 {
0309 if ((!test_bit(__I40E_DOWN, pf->state) &&
0310 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
0311 test_bit(__I40E_RECOVERY_MODE, pf->state))
0312 queue_work(i40e_wq, &pf->service_task);
0313 }
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
0325 {
0326 struct i40e_netdev_priv *np = netdev_priv(netdev);
0327 struct i40e_vsi *vsi = np->vsi;
0328 struct i40e_pf *pf = vsi->back;
0329 struct i40e_ring *tx_ring = NULL;
0330 unsigned int i;
0331 u32 head, val;
0332
0333 pf->tx_timeout_count++;
0334
0335
0336 for (i = 0; i < vsi->num_queue_pairs; i++) {
0337 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
0338 if (txqueue ==
0339 vsi->tx_rings[i]->queue_index) {
0340 tx_ring = vsi->tx_rings[i];
0341 break;
0342 }
0343 }
0344 }
0345
0346 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
0347 pf->tx_timeout_recovery_level = 1;
0348 else if (time_before(jiffies,
0349 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
0350 return;
0351
0352
0353 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
0354 return;
0355
0356 if (tx_ring) {
0357 head = i40e_get_head(tx_ring);
0358
0359 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
0360 val = rd32(&pf->hw,
0361 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
0362 tx_ring->vsi->base_vector - 1));
0363 else
0364 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
0365
0366 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
0367 vsi->seid, txqueue, tx_ring->next_to_clean,
0368 head, tx_ring->next_to_use,
0369 readl(tx_ring->tail), val);
0370 }
0371
0372 pf->tx_timeout_last_recovery = jiffies;
0373 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
0374 pf->tx_timeout_recovery_level, txqueue);
0375
0376 switch (pf->tx_timeout_recovery_level) {
0377 case 1:
0378 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
0379 break;
0380 case 2:
0381 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
0382 break;
0383 case 3:
0384 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
0385 break;
0386 default:
0387 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
0388 set_bit(__I40E_DOWN_REQUESTED, pf->state);
0389 set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
0390 break;
0391 }
0392
0393 i40e_service_event_schedule(pf);
0394 pf->tx_timeout_recovery_level++;
0395 }
0396
0397
0398
0399
0400
0401
0402
0403
0404 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
0405 {
0406 return &vsi->net_stats;
0407 }
0408
0409
0410
0411
0412
0413
0414 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
0415 struct rtnl_link_stats64 *stats)
0416 {
0417 u64 bytes, packets;
0418 unsigned int start;
0419
0420 do {
0421 start = u64_stats_fetch_begin_irq(&ring->syncp);
0422 packets = ring->stats.packets;
0423 bytes = ring->stats.bytes;
0424 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
0425
0426 stats->tx_packets += packets;
0427 stats->tx_bytes += bytes;
0428 }
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
0439 struct rtnl_link_stats64 *stats)
0440 {
0441 struct i40e_netdev_priv *np = netdev_priv(netdev);
0442 struct i40e_vsi *vsi = np->vsi;
0443 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
0444 struct i40e_ring *ring;
0445 int i;
0446
0447 if (test_bit(__I40E_VSI_DOWN, vsi->state))
0448 return;
0449
0450 if (!vsi->tx_rings)
0451 return;
0452
0453 rcu_read_lock();
0454 for (i = 0; i < vsi->num_queue_pairs; i++) {
0455 u64 bytes, packets;
0456 unsigned int start;
0457
0458 ring = READ_ONCE(vsi->tx_rings[i]);
0459 if (!ring)
0460 continue;
0461 i40e_get_netdev_stats_struct_tx(ring, stats);
0462
0463 if (i40e_enabled_xdp_vsi(vsi)) {
0464 ring = READ_ONCE(vsi->xdp_rings[i]);
0465 if (!ring)
0466 continue;
0467 i40e_get_netdev_stats_struct_tx(ring, stats);
0468 }
0469
0470 ring = READ_ONCE(vsi->rx_rings[i]);
0471 if (!ring)
0472 continue;
0473 do {
0474 start = u64_stats_fetch_begin_irq(&ring->syncp);
0475 packets = ring->stats.packets;
0476 bytes = ring->stats.bytes;
0477 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
0478
0479 stats->rx_packets += packets;
0480 stats->rx_bytes += bytes;
0481
0482 }
0483 rcu_read_unlock();
0484
0485
0486 stats->multicast = vsi_stats->multicast;
0487 stats->tx_errors = vsi_stats->tx_errors;
0488 stats->tx_dropped = vsi_stats->tx_dropped;
0489 stats->rx_errors = vsi_stats->rx_errors;
0490 stats->rx_dropped = vsi_stats->rx_dropped;
0491 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
0492 stats->rx_length_errors = vsi_stats->rx_length_errors;
0493 }
0494
0495
0496
0497
0498
0499 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
0500 {
0501 struct rtnl_link_stats64 *ns;
0502 int i;
0503
0504 if (!vsi)
0505 return;
0506
0507 ns = i40e_get_vsi_stats_struct(vsi);
0508 memset(ns, 0, sizeof(*ns));
0509 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
0510 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
0511 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
0512 if (vsi->rx_rings && vsi->rx_rings[0]) {
0513 for (i = 0; i < vsi->num_queue_pairs; i++) {
0514 memset(&vsi->rx_rings[i]->stats, 0,
0515 sizeof(vsi->rx_rings[i]->stats));
0516 memset(&vsi->rx_rings[i]->rx_stats, 0,
0517 sizeof(vsi->rx_rings[i]->rx_stats));
0518 memset(&vsi->tx_rings[i]->stats, 0,
0519 sizeof(vsi->tx_rings[i]->stats));
0520 memset(&vsi->tx_rings[i]->tx_stats, 0,
0521 sizeof(vsi->tx_rings[i]->tx_stats));
0522 }
0523 }
0524 vsi->stat_offsets_loaded = false;
0525 }
0526
0527
0528
0529
0530
0531 void i40e_pf_reset_stats(struct i40e_pf *pf)
0532 {
0533 int i;
0534
0535 memset(&pf->stats, 0, sizeof(pf->stats));
0536 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
0537 pf->stat_offsets_loaded = false;
0538
0539 for (i = 0; i < I40E_MAX_VEB; i++) {
0540 if (pf->veb[i]) {
0541 memset(&pf->veb[i]->stats, 0,
0542 sizeof(pf->veb[i]->stats));
0543 memset(&pf->veb[i]->stats_offsets, 0,
0544 sizeof(pf->veb[i]->stats_offsets));
0545 memset(&pf->veb[i]->tc_stats, 0,
0546 sizeof(pf->veb[i]->tc_stats));
0547 memset(&pf->veb[i]->tc_stats_offsets, 0,
0548 sizeof(pf->veb[i]->tc_stats_offsets));
0549 pf->veb[i]->stat_offsets_loaded = false;
0550 }
0551 }
0552 pf->hw_csum_rx_error = 0;
0553 }
0554
0555
0556
0557
0558
0559
0560 static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
0561 {
0562 int pf_count = i40e_get_pf_count(hw);
0563
0564 if (vsi->type == I40E_VSI_SRIOV)
0565 return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
0566
0567 return hw->port + BIT(7);
0568 }
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584 static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
0585 bool offset_loaded, u64 *offset, u64 *stat)
0586 {
0587 u64 new_data;
0588
0589 new_data = rd64(hw, loreg);
0590
0591 if (!offset_loaded || new_data < *offset)
0592 *offset = new_data;
0593 *stat = new_data - *offset;
0594 }
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
0612 bool offset_loaded, u64 *offset, u64 *stat)
0613 {
0614 u64 new_data;
0615
0616 if (hw->device_id == I40E_DEV_ID_QEMU) {
0617 new_data = rd32(hw, loreg);
0618 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
0619 } else {
0620 new_data = rd64(hw, loreg);
0621 }
0622 if (!offset_loaded)
0623 *offset = new_data;
0624 if (likely(new_data >= *offset))
0625 *stat = new_data - *offset;
0626 else
0627 *stat = (new_data + BIT_ULL(48)) - *offset;
0628 *stat &= 0xFFFFFFFFFFFFULL;
0629 }
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
0640 bool offset_loaded, u64 *offset, u64 *stat)
0641 {
0642 u32 new_data;
0643
0644 new_data = rd32(hw, reg);
0645 if (!offset_loaded)
0646 *offset = new_data;
0647 if (likely(new_data >= *offset))
0648 *stat = (u32)(new_data - *offset);
0649 else
0650 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
0651 }
0652
0653
0654
0655
0656
0657
0658
0659 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
0660 {
0661 u32 new_data = rd32(hw, reg);
0662
0663 wr32(hw, reg, 1);
0664 *stat += new_data;
0665 }
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676 static void
0677 i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
0678 int stat_idx, bool offset_loaded,
0679 struct i40e_eth_stats *stat_offset,
0680 struct i40e_eth_stats *stat)
0681 {
0682 u64 rx_rdpc, rx_rxerr;
0683
0684 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
0685 &stat_offset->rx_discards, &rx_rdpc);
0686 i40e_stat_update64(hw,
0687 I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
0688 I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
0689 offset_loaded, &stat_offset->rx_discards_other,
0690 &rx_rxerr);
0691
0692 stat->rx_discards = rx_rdpc + rx_rxerr;
0693 }
0694
0695
0696
0697
0698
0699 void i40e_update_eth_stats(struct i40e_vsi *vsi)
0700 {
0701 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
0702 struct i40e_pf *pf = vsi->back;
0703 struct i40e_hw *hw = &pf->hw;
0704 struct i40e_eth_stats *oes;
0705 struct i40e_eth_stats *es;
0706
0707 es = &vsi->eth_stats;
0708 oes = &vsi->eth_stats_offsets;
0709
0710
0711 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
0712 vsi->stat_offsets_loaded,
0713 &oes->tx_errors, &es->tx_errors);
0714 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
0715 vsi->stat_offsets_loaded,
0716 &oes->rx_discards, &es->rx_discards);
0717 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
0718 vsi->stat_offsets_loaded,
0719 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
0720
0721 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
0722 I40E_GLV_GORCL(stat_idx),
0723 vsi->stat_offsets_loaded,
0724 &oes->rx_bytes, &es->rx_bytes);
0725 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
0726 I40E_GLV_UPRCL(stat_idx),
0727 vsi->stat_offsets_loaded,
0728 &oes->rx_unicast, &es->rx_unicast);
0729 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
0730 I40E_GLV_MPRCL(stat_idx),
0731 vsi->stat_offsets_loaded,
0732 &oes->rx_multicast, &es->rx_multicast);
0733 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
0734 I40E_GLV_BPRCL(stat_idx),
0735 vsi->stat_offsets_loaded,
0736 &oes->rx_broadcast, &es->rx_broadcast);
0737
0738 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
0739 I40E_GLV_GOTCL(stat_idx),
0740 vsi->stat_offsets_loaded,
0741 &oes->tx_bytes, &es->tx_bytes);
0742 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
0743 I40E_GLV_UPTCL(stat_idx),
0744 vsi->stat_offsets_loaded,
0745 &oes->tx_unicast, &es->tx_unicast);
0746 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
0747 I40E_GLV_MPTCL(stat_idx),
0748 vsi->stat_offsets_loaded,
0749 &oes->tx_multicast, &es->tx_multicast);
0750 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
0751 I40E_GLV_BPTCL(stat_idx),
0752 vsi->stat_offsets_loaded,
0753 &oes->tx_broadcast, &es->tx_broadcast);
0754
0755 i40e_stats_update_rx_discards(vsi, hw, stat_idx,
0756 vsi->stat_offsets_loaded, oes, es);
0757
0758 vsi->stat_offsets_loaded = true;
0759 }
0760
0761
0762
0763
0764
0765 void i40e_update_veb_stats(struct i40e_veb *veb)
0766 {
0767 struct i40e_pf *pf = veb->pf;
0768 struct i40e_hw *hw = &pf->hw;
0769 struct i40e_eth_stats *oes;
0770 struct i40e_eth_stats *es;
0771 struct i40e_veb_tc_stats *veb_oes;
0772 struct i40e_veb_tc_stats *veb_es;
0773 int i, idx = 0;
0774
0775 idx = veb->stats_idx;
0776 es = &veb->stats;
0777 oes = &veb->stats_offsets;
0778 veb_es = &veb->tc_stats;
0779 veb_oes = &veb->tc_stats_offsets;
0780
0781
0782 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
0783 veb->stat_offsets_loaded,
0784 &oes->tx_discards, &es->tx_discards);
0785 if (hw->revision_id > 0)
0786 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
0787 veb->stat_offsets_loaded,
0788 &oes->rx_unknown_protocol,
0789 &es->rx_unknown_protocol);
0790 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
0791 veb->stat_offsets_loaded,
0792 &oes->rx_bytes, &es->rx_bytes);
0793 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
0794 veb->stat_offsets_loaded,
0795 &oes->rx_unicast, &es->rx_unicast);
0796 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
0797 veb->stat_offsets_loaded,
0798 &oes->rx_multicast, &es->rx_multicast);
0799 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
0800 veb->stat_offsets_loaded,
0801 &oes->rx_broadcast, &es->rx_broadcast);
0802
0803 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
0804 veb->stat_offsets_loaded,
0805 &oes->tx_bytes, &es->tx_bytes);
0806 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
0807 veb->stat_offsets_loaded,
0808 &oes->tx_unicast, &es->tx_unicast);
0809 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
0810 veb->stat_offsets_loaded,
0811 &oes->tx_multicast, &es->tx_multicast);
0812 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
0813 veb->stat_offsets_loaded,
0814 &oes->tx_broadcast, &es->tx_broadcast);
0815 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
0816 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
0817 I40E_GLVEBTC_RPCL(i, idx),
0818 veb->stat_offsets_loaded,
0819 &veb_oes->tc_rx_packets[i],
0820 &veb_es->tc_rx_packets[i]);
0821 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
0822 I40E_GLVEBTC_RBCL(i, idx),
0823 veb->stat_offsets_loaded,
0824 &veb_oes->tc_rx_bytes[i],
0825 &veb_es->tc_rx_bytes[i]);
0826 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
0827 I40E_GLVEBTC_TPCL(i, idx),
0828 veb->stat_offsets_loaded,
0829 &veb_oes->tc_tx_packets[i],
0830 &veb_es->tc_tx_packets[i]);
0831 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
0832 I40E_GLVEBTC_TBCL(i, idx),
0833 veb->stat_offsets_loaded,
0834 &veb_oes->tc_tx_bytes[i],
0835 &veb_es->tc_tx_bytes[i]);
0836 }
0837 veb->stat_offsets_loaded = true;
0838 }
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
0851 {
0852 u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
0853 struct i40e_pf *pf = vsi->back;
0854 struct rtnl_link_stats64 *ons;
0855 struct rtnl_link_stats64 *ns;
0856 struct i40e_eth_stats *oes;
0857 struct i40e_eth_stats *es;
0858 u64 tx_restart, tx_busy;
0859 struct i40e_ring *p;
0860 u64 bytes, packets;
0861 unsigned int start;
0862 u64 tx_linearize;
0863 u64 tx_force_wb;
0864 u64 tx_stopped;
0865 u64 rx_p, rx_b;
0866 u64 tx_p, tx_b;
0867 u16 q;
0868
0869 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
0870 test_bit(__I40E_CONFIG_BUSY, pf->state))
0871 return;
0872
0873 ns = i40e_get_vsi_stats_struct(vsi);
0874 ons = &vsi->net_stats_offsets;
0875 es = &vsi->eth_stats;
0876 oes = &vsi->eth_stats_offsets;
0877
0878
0879
0880
0881 rx_b = rx_p = 0;
0882 tx_b = tx_p = 0;
0883 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
0884 tx_stopped = 0;
0885 rx_page = 0;
0886 rx_buf = 0;
0887 rx_reuse = 0;
0888 rx_alloc = 0;
0889 rx_waive = 0;
0890 rx_busy = 0;
0891 rcu_read_lock();
0892 for (q = 0; q < vsi->num_queue_pairs; q++) {
0893
0894 p = READ_ONCE(vsi->tx_rings[q]);
0895 if (!p)
0896 continue;
0897
0898 do {
0899 start = u64_stats_fetch_begin_irq(&p->syncp);
0900 packets = p->stats.packets;
0901 bytes = p->stats.bytes;
0902 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
0903 tx_b += bytes;
0904 tx_p += packets;
0905 tx_restart += p->tx_stats.restart_queue;
0906 tx_busy += p->tx_stats.tx_busy;
0907 tx_linearize += p->tx_stats.tx_linearize;
0908 tx_force_wb += p->tx_stats.tx_force_wb;
0909 tx_stopped += p->tx_stats.tx_stopped;
0910
0911
0912 p = READ_ONCE(vsi->rx_rings[q]);
0913 if (!p)
0914 continue;
0915
0916 do {
0917 start = u64_stats_fetch_begin_irq(&p->syncp);
0918 packets = p->stats.packets;
0919 bytes = p->stats.bytes;
0920 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
0921 rx_b += bytes;
0922 rx_p += packets;
0923 rx_buf += p->rx_stats.alloc_buff_failed;
0924 rx_page += p->rx_stats.alloc_page_failed;
0925 rx_reuse += p->rx_stats.page_reuse_count;
0926 rx_alloc += p->rx_stats.page_alloc_count;
0927 rx_waive += p->rx_stats.page_waive_count;
0928 rx_busy += p->rx_stats.page_busy_count;
0929
0930 if (i40e_enabled_xdp_vsi(vsi)) {
0931
0932 p = READ_ONCE(vsi->xdp_rings[q]);
0933 if (!p)
0934 continue;
0935
0936 do {
0937 start = u64_stats_fetch_begin_irq(&p->syncp);
0938 packets = p->stats.packets;
0939 bytes = p->stats.bytes;
0940 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
0941 tx_b += bytes;
0942 tx_p += packets;
0943 tx_restart += p->tx_stats.restart_queue;
0944 tx_busy += p->tx_stats.tx_busy;
0945 tx_linearize += p->tx_stats.tx_linearize;
0946 tx_force_wb += p->tx_stats.tx_force_wb;
0947 }
0948 }
0949 rcu_read_unlock();
0950 vsi->tx_restart = tx_restart;
0951 vsi->tx_busy = tx_busy;
0952 vsi->tx_linearize = tx_linearize;
0953 vsi->tx_force_wb = tx_force_wb;
0954 vsi->tx_stopped = tx_stopped;
0955 vsi->rx_page_failed = rx_page;
0956 vsi->rx_buf_failed = rx_buf;
0957 vsi->rx_page_reuse = rx_reuse;
0958 vsi->rx_page_alloc = rx_alloc;
0959 vsi->rx_page_waive = rx_waive;
0960 vsi->rx_page_busy = rx_busy;
0961
0962 ns->rx_packets = rx_p;
0963 ns->rx_bytes = rx_b;
0964 ns->tx_packets = tx_p;
0965 ns->tx_bytes = tx_b;
0966
0967
0968 i40e_update_eth_stats(vsi);
0969 ons->tx_errors = oes->tx_errors;
0970 ns->tx_errors = es->tx_errors;
0971 ons->multicast = oes->rx_multicast;
0972 ns->multicast = es->rx_multicast;
0973 ons->rx_dropped = oes->rx_discards;
0974 ns->rx_dropped = es->rx_discards;
0975 ons->tx_dropped = oes->tx_discards;
0976 ns->tx_dropped = es->tx_discards;
0977
0978
0979 if (vsi == pf->vsi[pf->lan_vsi]) {
0980 ns->rx_crc_errors = pf->stats.crc_errors;
0981 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
0982 ns->rx_length_errors = pf->stats.rx_length_errors;
0983 }
0984 }
0985
0986
0987
0988
0989
0990 static void i40e_update_pf_stats(struct i40e_pf *pf)
0991 {
0992 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
0993 struct i40e_hw_port_stats *nsd = &pf->stats;
0994 struct i40e_hw *hw = &pf->hw;
0995 u32 val;
0996 int i;
0997
0998 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
0999 I40E_GLPRT_GORCL(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1002 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1003 I40E_GLPRT_GOTCL(hw->port),
1004 pf->stat_offsets_loaded,
1005 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1006 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->eth.rx_discards,
1009 &nsd->eth.rx_discards);
1010 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1011 I40E_GLPRT_UPRCL(hw->port),
1012 pf->stat_offsets_loaded,
1013 &osd->eth.rx_unicast,
1014 &nsd->eth.rx_unicast);
1015 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1016 I40E_GLPRT_MPRCL(hw->port),
1017 pf->stat_offsets_loaded,
1018 &osd->eth.rx_multicast,
1019 &nsd->eth.rx_multicast);
1020 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1021 I40E_GLPRT_BPRCL(hw->port),
1022 pf->stat_offsets_loaded,
1023 &osd->eth.rx_broadcast,
1024 &nsd->eth.rx_broadcast);
1025 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1026 I40E_GLPRT_UPTCL(hw->port),
1027 pf->stat_offsets_loaded,
1028 &osd->eth.tx_unicast,
1029 &nsd->eth.tx_unicast);
1030 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1031 I40E_GLPRT_MPTCL(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->eth.tx_multicast,
1034 &nsd->eth.tx_multicast);
1035 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1036 I40E_GLPRT_BPTCL(hw->port),
1037 pf->stat_offsets_loaded,
1038 &osd->eth.tx_broadcast,
1039 &nsd->eth.tx_broadcast);
1040
1041 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->tx_dropped_link_down,
1044 &nsd->tx_dropped_link_down);
1045
1046 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->crc_errors, &nsd->crc_errors);
1049
1050 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1051 pf->stat_offsets_loaded,
1052 &osd->illegal_bytes, &nsd->illegal_bytes);
1053
1054 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1055 pf->stat_offsets_loaded,
1056 &osd->mac_local_faults,
1057 &nsd->mac_local_faults);
1058 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1059 pf->stat_offsets_loaded,
1060 &osd->mac_remote_faults,
1061 &nsd->mac_remote_faults);
1062
1063 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1064 pf->stat_offsets_loaded,
1065 &osd->rx_length_errors,
1066 &nsd->rx_length_errors);
1067
1068 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1069 pf->stat_offsets_loaded,
1070 &osd->link_xon_rx, &nsd->link_xon_rx);
1071 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1072 pf->stat_offsets_loaded,
1073 &osd->link_xon_tx, &nsd->link_xon_tx);
1074 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1075 pf->stat_offsets_loaded,
1076 &osd->link_xoff_rx, &nsd->link_xoff_rx);
1077 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1078 pf->stat_offsets_loaded,
1079 &osd->link_xoff_tx, &nsd->link_xoff_tx);
1080
1081 for (i = 0; i < 8; i++) {
1082 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1083 pf->stat_offsets_loaded,
1084 &osd->priority_xoff_rx[i],
1085 &nsd->priority_xoff_rx[i]);
1086 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1087 pf->stat_offsets_loaded,
1088 &osd->priority_xon_rx[i],
1089 &nsd->priority_xon_rx[i]);
1090 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1091 pf->stat_offsets_loaded,
1092 &osd->priority_xon_tx[i],
1093 &nsd->priority_xon_tx[i]);
1094 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1095 pf->stat_offsets_loaded,
1096 &osd->priority_xoff_tx[i],
1097 &nsd->priority_xoff_tx[i]);
1098 i40e_stat_update32(hw,
1099 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1100 pf->stat_offsets_loaded,
1101 &osd->priority_xon_2_xoff[i],
1102 &nsd->priority_xon_2_xoff[i]);
1103 }
1104
1105 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1106 I40E_GLPRT_PRC64L(hw->port),
1107 pf->stat_offsets_loaded,
1108 &osd->rx_size_64, &nsd->rx_size_64);
1109 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1110 I40E_GLPRT_PRC127L(hw->port),
1111 pf->stat_offsets_loaded,
1112 &osd->rx_size_127, &nsd->rx_size_127);
1113 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1114 I40E_GLPRT_PRC255L(hw->port),
1115 pf->stat_offsets_loaded,
1116 &osd->rx_size_255, &nsd->rx_size_255);
1117 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1118 I40E_GLPRT_PRC511L(hw->port),
1119 pf->stat_offsets_loaded,
1120 &osd->rx_size_511, &nsd->rx_size_511);
1121 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1122 I40E_GLPRT_PRC1023L(hw->port),
1123 pf->stat_offsets_loaded,
1124 &osd->rx_size_1023, &nsd->rx_size_1023);
1125 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1126 I40E_GLPRT_PRC1522L(hw->port),
1127 pf->stat_offsets_loaded,
1128 &osd->rx_size_1522, &nsd->rx_size_1522);
1129 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1130 I40E_GLPRT_PRC9522L(hw->port),
1131 pf->stat_offsets_loaded,
1132 &osd->rx_size_big, &nsd->rx_size_big);
1133
1134 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1135 I40E_GLPRT_PTC64L(hw->port),
1136 pf->stat_offsets_loaded,
1137 &osd->tx_size_64, &nsd->tx_size_64);
1138 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1139 I40E_GLPRT_PTC127L(hw->port),
1140 pf->stat_offsets_loaded,
1141 &osd->tx_size_127, &nsd->tx_size_127);
1142 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1143 I40E_GLPRT_PTC255L(hw->port),
1144 pf->stat_offsets_loaded,
1145 &osd->tx_size_255, &nsd->tx_size_255);
1146 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1147 I40E_GLPRT_PTC511L(hw->port),
1148 pf->stat_offsets_loaded,
1149 &osd->tx_size_511, &nsd->tx_size_511);
1150 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1151 I40E_GLPRT_PTC1023L(hw->port),
1152 pf->stat_offsets_loaded,
1153 &osd->tx_size_1023, &nsd->tx_size_1023);
1154 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1155 I40E_GLPRT_PTC1522L(hw->port),
1156 pf->stat_offsets_loaded,
1157 &osd->tx_size_1522, &nsd->tx_size_1522);
1158 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1159 I40E_GLPRT_PTC9522L(hw->port),
1160 pf->stat_offsets_loaded,
1161 &osd->tx_size_big, &nsd->tx_size_big);
1162
1163 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1164 pf->stat_offsets_loaded,
1165 &osd->rx_undersize, &nsd->rx_undersize);
1166 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1167 pf->stat_offsets_loaded,
1168 &osd->rx_fragments, &nsd->rx_fragments);
1169 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1170 pf->stat_offsets_loaded,
1171 &osd->rx_oversize, &nsd->rx_oversize);
1172 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1173 pf->stat_offsets_loaded,
1174 &osd->rx_jabber, &nsd->rx_jabber);
1175
1176
1177 i40e_stat_update_and_clear32(hw,
1178 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1179 &nsd->fd_atr_match);
1180 i40e_stat_update_and_clear32(hw,
1181 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1182 &nsd->fd_sb_match);
1183 i40e_stat_update_and_clear32(hw,
1184 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1185 &nsd->fd_atr_tunnel_match);
1186
1187 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1188 nsd->tx_lpi_status =
1189 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1190 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1191 nsd->rx_lpi_status =
1192 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1193 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1194 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1195 pf->stat_offsets_loaded,
1196 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1197 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1198 pf->stat_offsets_loaded,
1199 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1200
1201 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1202 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1203 nsd->fd_sb_status = true;
1204 else
1205 nsd->fd_sb_status = false;
1206
1207 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1208 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1209 nsd->fd_atr_status = true;
1210 else
1211 nsd->fd_atr_status = false;
1212
1213 pf->stat_offsets_loaded = true;
1214 }
1215
1216
1217
1218
1219
1220
1221
1222 void i40e_update_stats(struct i40e_vsi *vsi)
1223 {
1224 struct i40e_pf *pf = vsi->back;
1225
1226 if (vsi == pf->vsi[pf->lan_vsi])
1227 i40e_update_pf_stats(pf);
1228
1229 i40e_update_vsi_stats(vsi);
1230 }
1231
1232
1233
1234
1235
1236
1237
1238 int i40e_count_filters(struct i40e_vsi *vsi)
1239 {
1240 struct i40e_mac_filter *f;
1241 struct hlist_node *h;
1242 int bkt;
1243 int cnt = 0;
1244
1245 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1246 ++cnt;
1247
1248 return cnt;
1249 }
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1260 const u8 *macaddr, s16 vlan)
1261 {
1262 struct i40e_mac_filter *f;
1263 u64 key;
1264
1265 if (!vsi || !macaddr)
1266 return NULL;
1267
1268 key = i40e_addr_to_hkey(macaddr);
1269 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1270 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1271 (vlan == f->vlan))
1272 return f;
1273 }
1274 return NULL;
1275 }
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1286 {
1287 struct i40e_mac_filter *f;
1288 u64 key;
1289
1290 if (!vsi || !macaddr)
1291 return NULL;
1292
1293 key = i40e_addr_to_hkey(macaddr);
1294 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1295 if ((ether_addr_equal(macaddr, f->macaddr)))
1296 return f;
1297 }
1298 return NULL;
1299 }
1300
1301
1302
1303
1304
1305
1306
1307 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1308 {
1309
1310 if (vsi->info.pvid)
1311 return true;
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 return vsi->has_vlan_filter;
1334 }
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1366 struct hlist_head *tmp_add_list,
1367 struct hlist_head *tmp_del_list,
1368 int vlan_filters)
1369 {
1370 s16 pvid = le16_to_cpu(vsi->info.pvid);
1371 struct i40e_mac_filter *f, *add_head;
1372 struct i40e_new_mac_filter *new;
1373 struct hlist_node *h;
1374 int bkt, new_vlan;
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 hlist_for_each_entry(new, tmp_add_list, hlist) {
1392 if (pvid && new->f->vlan != pvid)
1393 new->f->vlan = pvid;
1394 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1395 new->f->vlan = 0;
1396 else if (!vlan_filters && new->f->vlan == 0)
1397 new->f->vlan = I40E_VLAN_ANY;
1398 }
1399
1400
1401 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1402
1403
1404
1405
1406
1407 if ((pvid && f->vlan != pvid) ||
1408 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1409 (!vlan_filters && f->vlan == 0)) {
1410
1411 if (pvid)
1412 new_vlan = pvid;
1413 else if (vlan_filters)
1414 new_vlan = 0;
1415 else
1416 new_vlan = I40E_VLAN_ANY;
1417
1418
1419 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1420 if (!add_head)
1421 return -ENOMEM;
1422
1423
1424 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1425 if (!new)
1426 return -ENOMEM;
1427
1428 new->f = add_head;
1429 new->state = add_head->state;
1430
1431
1432 hlist_add_head(&new->hlist, tmp_add_list);
1433
1434
1435 f->state = I40E_FILTER_REMOVE;
1436 hash_del(&f->hlist);
1437 hlist_add_head(&f->hlist, tmp_del_list);
1438 }
1439 }
1440
1441 vsi->has_vlan_filter = !!vlan_filters;
1442
1443 return 0;
1444 }
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
1461 struct i40e_new_mac_filter *new_mac,
1462 struct i40e_mac_filter *f,
1463 int vlan_filters,
1464 bool trusted)
1465 {
1466 s16 pvid = le16_to_cpu(vsi->info.pvid);
1467 struct i40e_pf *pf = vsi->back;
1468 bool is_any;
1469
1470 if (new_mac)
1471 f = new_mac->f;
1472
1473 if (pvid && f->vlan != pvid)
1474 return pvid;
1475
1476 is_any = (trusted ||
1477 !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
1478
1479 if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1480 (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1481 (is_any && !vlan_filters && f->vlan == 0)) {
1482 if (is_any)
1483 return I40E_VLAN_ANY;
1484 else
1485 return 0;
1486 }
1487
1488 return f->vlan;
1489 }
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510 static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
1511 struct hlist_head *tmp_add_list,
1512 struct hlist_head *tmp_del_list,
1513 int vlan_filters,
1514 bool trusted)
1515 {
1516 struct i40e_mac_filter *f, *add_head;
1517 struct i40e_new_mac_filter *new_mac;
1518 struct hlist_node *h;
1519 int bkt, new_vlan;
1520
1521 hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
1522 new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
1523 vlan_filters, trusted);
1524 }
1525
1526 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1527 new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
1528 trusted);
1529 if (new_vlan != f->vlan) {
1530 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1531 if (!add_head)
1532 return -ENOMEM;
1533
1534 new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC);
1535 if (!new_mac)
1536 return -ENOMEM;
1537 new_mac->f = add_head;
1538 new_mac->state = add_head->state;
1539
1540
1541 hlist_add_head(&new_mac->hlist, tmp_add_list);
1542
1543
1544 f->state = I40E_FILTER_REMOVE;
1545 hash_del(&f->hlist);
1546 hlist_add_head(&f->hlist, tmp_del_list);
1547 }
1548 }
1549
1550 vsi->has_vlan_filter = !!vlan_filters;
1551 return 0;
1552 }
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1563 {
1564 struct i40e_aqc_remove_macvlan_element_data element;
1565 struct i40e_pf *pf = vsi->back;
1566
1567
1568 if (vsi->type != I40E_VSI_MAIN)
1569 return;
1570
1571 memset(&element, 0, sizeof(element));
1572 ether_addr_copy(element.mac_addr, macaddr);
1573 element.vlan_tag = 0;
1574
1575 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1576 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1577
1578 memset(&element, 0, sizeof(element));
1579 ether_addr_copy(element.mac_addr, macaddr);
1580 element.vlan_tag = 0;
1581
1582 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1583 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1584 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1585 }
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1599 const u8 *macaddr, s16 vlan)
1600 {
1601 struct i40e_mac_filter *f;
1602 u64 key;
1603
1604 if (!vsi || !macaddr)
1605 return NULL;
1606
1607 f = i40e_find_filter(vsi, macaddr, vlan);
1608 if (!f) {
1609 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1610 if (!f)
1611 return NULL;
1612
1613
1614
1615
1616 if (vlan >= 0)
1617 vsi->has_vlan_filter = true;
1618
1619 ether_addr_copy(f->macaddr, macaddr);
1620 f->vlan = vlan;
1621 f->state = I40E_FILTER_NEW;
1622 INIT_HLIST_NODE(&f->hlist);
1623
1624 key = i40e_addr_to_hkey(macaddr);
1625 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1626
1627 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1628 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1629 }
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 if (f->state == I40E_FILTER_REMOVE)
1640 f->state = I40E_FILTER_ACTIVE;
1641
1642 return f;
1643 }
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1661 {
1662 if (!f)
1663 return;
1664
1665
1666
1667
1668
1669 if ((f->state == I40E_FILTER_FAILED) ||
1670 (f->state == I40E_FILTER_NEW)) {
1671 hash_del(&f->hlist);
1672 kfree(f);
1673 } else {
1674 f->state = I40E_FILTER_REMOVE;
1675 }
1676
1677 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1678 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1679 }
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1694 {
1695 struct i40e_mac_filter *f;
1696
1697 if (!vsi || !macaddr)
1698 return;
1699
1700 f = i40e_find_filter(vsi, macaddr, vlan);
1701 __i40e_del_filter(vsi, f);
1702 }
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1717 const u8 *macaddr)
1718 {
1719 struct i40e_mac_filter *f, *add = NULL;
1720 struct hlist_node *h;
1721 int bkt;
1722
1723 if (vsi->info.pvid)
1724 return i40e_add_filter(vsi, macaddr,
1725 le16_to_cpu(vsi->info.pvid));
1726
1727 if (!i40e_is_vsi_in_vlan(vsi))
1728 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1729
1730 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1731 if (f->state == I40E_FILTER_REMOVE)
1732 continue;
1733 add = i40e_add_filter(vsi, macaddr, f->vlan);
1734 if (!add)
1735 return NULL;
1736 }
1737
1738 return add;
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1752 {
1753 struct i40e_mac_filter *f;
1754 struct hlist_node *h;
1755 bool found = false;
1756 int bkt;
1757
1758 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1759 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1760 if (ether_addr_equal(macaddr, f->macaddr)) {
1761 __i40e_del_filter(vsi, f);
1762 found = true;
1763 }
1764 }
1765
1766 if (found)
1767 return 0;
1768 else
1769 return -ENOENT;
1770 }
1771
1772
1773
1774
1775
1776
1777
1778
1779 static int i40e_set_mac(struct net_device *netdev, void *p)
1780 {
1781 struct i40e_netdev_priv *np = netdev_priv(netdev);
1782 struct i40e_vsi *vsi = np->vsi;
1783 struct i40e_pf *pf = vsi->back;
1784 struct i40e_hw *hw = &pf->hw;
1785 struct sockaddr *addr = p;
1786
1787 if (!is_valid_ether_addr(addr->sa_data))
1788 return -EADDRNOTAVAIL;
1789
1790 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1791 netdev_info(netdev, "already using mac address %pM\n",
1792 addr->sa_data);
1793 return 0;
1794 }
1795
1796 if (test_bit(__I40E_DOWN, pf->state) ||
1797 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1798 return -EADDRNOTAVAIL;
1799
1800 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1801 netdev_info(netdev, "returning to hw mac address %pM\n",
1802 hw->mac.addr);
1803 else
1804 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1805
1806
1807
1808
1809
1810
1811
1812 spin_lock_bh(&vsi->mac_filter_hash_lock);
1813 i40e_del_mac_filter(vsi, netdev->dev_addr);
1814 eth_hw_addr_set(netdev, addr->sa_data);
1815 i40e_add_mac_filter(vsi, netdev->dev_addr);
1816 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1817
1818 if (vsi->type == I40E_VSI_MAIN) {
1819 i40e_status ret;
1820
1821 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1822 addr->sa_data, NULL);
1823 if (ret)
1824 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1825 i40e_stat_str(hw, ret),
1826 i40e_aq_str(hw, hw->aq.asq_last_status));
1827 }
1828
1829
1830
1831
1832 i40e_service_event_schedule(pf);
1833 return 0;
1834 }
1835
1836
1837
1838
1839
1840
1841
1842
1843 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1844 u8 *lut, u16 lut_size)
1845 {
1846 struct i40e_pf *pf = vsi->back;
1847 struct i40e_hw *hw = &pf->hw;
1848 int ret = 0;
1849
1850 if (seed) {
1851 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1852 (struct i40e_aqc_get_set_rss_key_data *)seed;
1853 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1854 if (ret) {
1855 dev_info(&pf->pdev->dev,
1856 "Cannot set RSS key, err %s aq_err %s\n",
1857 i40e_stat_str(hw, ret),
1858 i40e_aq_str(hw, hw->aq.asq_last_status));
1859 return ret;
1860 }
1861 }
1862 if (lut) {
1863 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1864
1865 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1866 if (ret) {
1867 dev_info(&pf->pdev->dev,
1868 "Cannot set RSS lut, err %s aq_err %s\n",
1869 i40e_stat_str(hw, ret),
1870 i40e_aq_str(hw, hw->aq.asq_last_status));
1871 return ret;
1872 }
1873 }
1874 return ret;
1875 }
1876
1877
1878
1879
1880
1881 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1882 {
1883 struct i40e_pf *pf = vsi->back;
1884 u8 seed[I40E_HKEY_ARRAY_SIZE];
1885 u8 *lut;
1886 int ret;
1887
1888 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1889 return 0;
1890 if (!vsi->rss_size)
1891 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1892 vsi->num_queue_pairs);
1893 if (!vsi->rss_size)
1894 return -EINVAL;
1895 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1896 if (!lut)
1897 return -ENOMEM;
1898
1899
1900
1901
1902 if (vsi->rss_lut_user)
1903 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1904 else
1905 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1906 if (vsi->rss_hkey_user)
1907 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1908 else
1909 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1910 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1911 kfree(lut);
1912 return ret;
1913 }
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1924 struct i40e_vsi_context *ctxt,
1925 u8 enabled_tc)
1926 {
1927 u16 qcount = 0, max_qcount, qmap, sections = 0;
1928 int i, override_q, pow, num_qps, ret;
1929 u8 netdev_tc = 0, offset = 0;
1930
1931 if (vsi->type != I40E_VSI_MAIN)
1932 return -EINVAL;
1933 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1934 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1935 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1936 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1937 num_qps = vsi->mqprio_qopt.qopt.count[0];
1938
1939
1940 pow = ilog2(num_qps);
1941 if (!is_power_of_2(num_qps))
1942 pow++;
1943 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1944 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1945
1946
1947 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1948 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1949
1950 if (vsi->tc_config.enabled_tc & BIT(i)) {
1951 offset = vsi->mqprio_qopt.qopt.offset[i];
1952 qcount = vsi->mqprio_qopt.qopt.count[i];
1953 if (qcount > max_qcount)
1954 max_qcount = qcount;
1955 vsi->tc_config.tc_info[i].qoffset = offset;
1956 vsi->tc_config.tc_info[i].qcount = qcount;
1957 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1958 } else {
1959
1960
1961
1962
1963 vsi->tc_config.tc_info[i].qoffset = 0;
1964 vsi->tc_config.tc_info[i].qcount = 1;
1965 vsi->tc_config.tc_info[i].netdev_tc = 0;
1966 }
1967 }
1968
1969
1970 vsi->num_queue_pairs = offset + qcount;
1971
1972
1973 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1974 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1975 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1976 ctxt->info.valid_sections |= cpu_to_le16(sections);
1977
1978
1979 vsi->rss_size = max_qcount;
1980 ret = i40e_vsi_config_rss(vsi);
1981 if (ret) {
1982 dev_info(&vsi->back->pdev->dev,
1983 "Failed to reconfig rss for num_queues (%u)\n",
1984 max_qcount);
1985 return ret;
1986 }
1987 vsi->reconfig_rss = true;
1988 dev_dbg(&vsi->back->pdev->dev,
1989 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1990
1991
1992
1993
1994 override_q = vsi->mqprio_qopt.qopt.count[0];
1995 if (override_q && override_q < vsi->num_queue_pairs) {
1996 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1997 vsi->next_base_queue = override_q;
1998 }
1999 return 0;
2000 }
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
2012 struct i40e_vsi_context *ctxt,
2013 u8 enabled_tc,
2014 bool is_add)
2015 {
2016 struct i40e_pf *pf = vsi->back;
2017 u16 num_tc_qps = 0;
2018 u16 sections = 0;
2019 u8 netdev_tc = 0;
2020 u16 numtc = 1;
2021 u16 qcount;
2022 u8 offset;
2023 u16 qmap;
2024 int i;
2025
2026 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2027 offset = 0;
2028
2029 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
2030
2031 if (vsi->type == I40E_VSI_MAIN) {
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041 if (vsi->req_queue_pairs > 0)
2042 vsi->num_queue_pairs = vsi->req_queue_pairs;
2043 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2044 vsi->num_queue_pairs = pf->num_lan_msix;
2045 else
2046 vsi->num_queue_pairs = 1;
2047 }
2048
2049
2050 if (vsi->type == I40E_VSI_MAIN ||
2051 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
2052 num_tc_qps = vsi->num_queue_pairs;
2053 else
2054 num_tc_qps = vsi->alloc_queue_pairs;
2055
2056 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2057
2058 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2059 if (enabled_tc & BIT(i))
2060 numtc++;
2061 }
2062 if (!numtc) {
2063 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
2064 numtc = 1;
2065 }
2066 num_tc_qps = num_tc_qps / numtc;
2067 num_tc_qps = min_t(int, num_tc_qps,
2068 i40e_pf_get_max_q_per_tc(pf));
2069 }
2070
2071 vsi->tc_config.numtc = numtc;
2072 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
2073
2074
2075 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2076 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
2077
2078
2079 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2080
2081 if (vsi->tc_config.enabled_tc & BIT(i)) {
2082
2083 int pow, num_qps;
2084
2085 switch (vsi->type) {
2086 case I40E_VSI_MAIN:
2087 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
2088 I40E_FLAG_FD_ATR_ENABLED)) ||
2089 vsi->tc_config.enabled_tc != 1) {
2090 qcount = min_t(int, pf->alloc_rss_size,
2091 num_tc_qps);
2092 break;
2093 }
2094 fallthrough;
2095 case I40E_VSI_FDIR:
2096 case I40E_VSI_SRIOV:
2097 case I40E_VSI_VMDQ2:
2098 default:
2099 qcount = num_tc_qps;
2100 WARN_ON(i != 0);
2101 break;
2102 }
2103 vsi->tc_config.tc_info[i].qoffset = offset;
2104 vsi->tc_config.tc_info[i].qcount = qcount;
2105
2106
2107 num_qps = qcount;
2108 pow = 0;
2109 while (num_qps && (BIT_ULL(pow) < qcount)) {
2110 pow++;
2111 num_qps >>= 1;
2112 }
2113
2114 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
2115 qmap =
2116 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2117 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
2118
2119 offset += qcount;
2120 } else {
2121
2122
2123
2124
2125 vsi->tc_config.tc_info[i].qoffset = 0;
2126 vsi->tc_config.tc_info[i].qcount = 1;
2127 vsi->tc_config.tc_info[i].netdev_tc = 0;
2128
2129 qmap = 0;
2130 }
2131 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
2132 }
2133
2134 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
2135 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
2136 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
2137 vsi->num_queue_pairs = offset;
2138
2139
2140 if (is_add) {
2141 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
2142
2143 ctxt->info.up_enable_bits = enabled_tc;
2144 }
2145 if (vsi->type == I40E_VSI_SRIOV) {
2146 ctxt->info.mapping_flags |=
2147 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2148 for (i = 0; i < vsi->num_queue_pairs; i++)
2149 ctxt->info.queue_mapping[i] =
2150 cpu_to_le16(vsi->base_queue + i);
2151 } else {
2152 ctxt->info.mapping_flags |=
2153 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2154 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
2155 }
2156 ctxt->info.valid_sections |= cpu_to_le16(sections);
2157 }
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
2168 {
2169 struct i40e_netdev_priv *np = netdev_priv(netdev);
2170 struct i40e_vsi *vsi = np->vsi;
2171
2172 if (i40e_add_mac_filter(vsi, addr))
2173 return 0;
2174 else
2175 return -ENOMEM;
2176 }
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
2187 {
2188 struct i40e_netdev_priv *np = netdev_priv(netdev);
2189 struct i40e_vsi *vsi = np->vsi;
2190
2191
2192
2193
2194
2195
2196 if (ether_addr_equal(addr, netdev->dev_addr))
2197 return 0;
2198
2199 i40e_del_mac_filter(vsi, addr);
2200
2201 return 0;
2202 }
2203
2204
2205
2206
2207
2208 static void i40e_set_rx_mode(struct net_device *netdev)
2209 {
2210 struct i40e_netdev_priv *np = netdev_priv(netdev);
2211 struct i40e_vsi *vsi = np->vsi;
2212
2213 spin_lock_bh(&vsi->mac_filter_hash_lock);
2214
2215 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2216 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2217
2218 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2219
2220
2221 if (vsi->current_netdev_flags != vsi->netdev->flags) {
2222 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2223 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
2224 }
2225 }
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2236 struct hlist_head *from)
2237 {
2238 struct i40e_mac_filter *f;
2239 struct hlist_node *h;
2240
2241 hlist_for_each_entry_safe(f, h, from, hlist) {
2242 u64 key = i40e_addr_to_hkey(f->macaddr);
2243
2244
2245 hlist_del(&f->hlist);
2246 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2247 }
2248 }
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2259 struct hlist_head *from)
2260 {
2261 struct i40e_new_mac_filter *new;
2262 struct hlist_node *h;
2263
2264 hlist_for_each_entry_safe(new, h, from, hlist) {
2265
2266 hlist_del(&new->hlist);
2267 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2268 kfree(new);
2269 }
2270 }
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280 static
2281 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2282 {
2283 hlist_for_each_entry_continue(next, hlist) {
2284 if (!is_broadcast_ether_addr(next->f->macaddr))
2285 return next;
2286 }
2287
2288 return NULL;
2289 }
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301 static int
2302 i40e_update_filter_state(int count,
2303 struct i40e_aqc_add_macvlan_element_data *add_list,
2304 struct i40e_new_mac_filter *add_head)
2305 {
2306 int retval = 0;
2307 int i;
2308
2309 for (i = 0; i < count; i++) {
2310
2311
2312
2313
2314
2315
2316 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2317 add_head->state = I40E_FILTER_FAILED;
2318 } else {
2319 add_head->state = I40E_FILTER_ACTIVE;
2320 retval++;
2321 }
2322
2323 add_head = i40e_next_filter(add_head);
2324 if (!add_head)
2325 break;
2326 }
2327
2328 return retval;
2329 }
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344 static
2345 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2346 struct i40e_aqc_remove_macvlan_element_data *list,
2347 int num_del, int *retval)
2348 {
2349 struct i40e_hw *hw = &vsi->back->hw;
2350 enum i40e_admin_queue_err aq_status;
2351 i40e_status aq_ret;
2352
2353 aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
2354 &aq_status);
2355
2356
2357 if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
2358 *retval = -EIO;
2359 dev_info(&vsi->back->pdev->dev,
2360 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2361 vsi_name, i40e_stat_str(hw, aq_ret),
2362 i40e_aq_str(hw, aq_status));
2363 }
2364 }
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378 static
2379 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2380 struct i40e_aqc_add_macvlan_element_data *list,
2381 struct i40e_new_mac_filter *add_head,
2382 int num_add)
2383 {
2384 struct i40e_hw *hw = &vsi->back->hw;
2385 enum i40e_admin_queue_err aq_status;
2386 int fcnt;
2387
2388 i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
2389 fcnt = i40e_update_filter_state(num_add, list, add_head);
2390
2391 if (fcnt != num_add) {
2392 if (vsi->type == I40E_VSI_MAIN) {
2393 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2394 dev_warn(&vsi->back->pdev->dev,
2395 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2396 i40e_aq_str(hw, aq_status), vsi_name);
2397 } else if (vsi->type == I40E_VSI_SRIOV ||
2398 vsi->type == I40E_VSI_VMDQ1 ||
2399 vsi->type == I40E_VSI_VMDQ2) {
2400 dev_warn(&vsi->back->pdev->dev,
2401 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2402 i40e_aq_str(hw, aq_status), vsi_name,
2403 vsi_name);
2404 } else {
2405 dev_warn(&vsi->back->pdev->dev,
2406 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2407 i40e_aq_str(hw, aq_status), vsi_name,
2408 vsi->type);
2409 }
2410 }
2411 }
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425 static i40e_status
2426 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2427 struct i40e_mac_filter *f)
2428 {
2429 bool enable = f->state == I40E_FILTER_NEW;
2430 struct i40e_hw *hw = &vsi->back->hw;
2431 i40e_status aq_ret;
2432
2433 if (f->vlan == I40E_VLAN_ANY) {
2434 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2435 vsi->seid,
2436 enable,
2437 NULL);
2438 } else {
2439 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2440 vsi->seid,
2441 enable,
2442 f->vlan,
2443 NULL);
2444 }
2445
2446 if (aq_ret) {
2447 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2448 dev_warn(&vsi->back->pdev->dev,
2449 "Error %s, forcing overflow promiscuous on %s\n",
2450 i40e_aq_str(hw, hw->aq.asq_last_status),
2451 vsi_name);
2452 }
2453
2454 return aq_ret;
2455 }
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2467 {
2468 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2469 struct i40e_hw *hw = &pf->hw;
2470 i40e_status aq_ret;
2471
2472 if (vsi->type == I40E_VSI_MAIN &&
2473 pf->lan_veb != I40E_NO_VEB &&
2474 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2475
2476
2477
2478
2479
2480 if (promisc)
2481 aq_ret = i40e_aq_set_default_vsi(hw,
2482 vsi->seid,
2483 NULL);
2484 else
2485 aq_ret = i40e_aq_clear_default_vsi(hw,
2486 vsi->seid,
2487 NULL);
2488 if (aq_ret) {
2489 dev_info(&pf->pdev->dev,
2490 "Set default VSI failed, err %s, aq_err %s\n",
2491 i40e_stat_str(hw, aq_ret),
2492 i40e_aq_str(hw, hw->aq.asq_last_status));
2493 }
2494 } else {
2495 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2496 hw,
2497 vsi->seid,
2498 promisc, NULL,
2499 true);
2500 if (aq_ret) {
2501 dev_info(&pf->pdev->dev,
2502 "set unicast promisc failed, err %s, aq_err %s\n",
2503 i40e_stat_str(hw, aq_ret),
2504 i40e_aq_str(hw, hw->aq.asq_last_status));
2505 }
2506 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2507 hw,
2508 vsi->seid,
2509 promisc, NULL);
2510 if (aq_ret) {
2511 dev_info(&pf->pdev->dev,
2512 "set multicast promisc failed, err %s, aq_err %s\n",
2513 i40e_stat_str(hw, aq_ret),
2514 i40e_aq_str(hw, hw->aq.asq_last_status));
2515 }
2516 }
2517
2518 if (!aq_ret)
2519 pf->cur_promisc = promisc;
2520
2521 return aq_ret;
2522 }
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2533 {
2534 struct hlist_head tmp_add_list, tmp_del_list;
2535 struct i40e_mac_filter *f;
2536 struct i40e_new_mac_filter *new, *add_head = NULL;
2537 struct i40e_hw *hw = &vsi->back->hw;
2538 bool old_overflow, new_overflow;
2539 unsigned int failed_filters = 0;
2540 unsigned int vlan_filters = 0;
2541 char vsi_name[16] = "PF";
2542 int filter_list_len = 0;
2543 i40e_status aq_ret = 0;
2544 u32 changed_flags = 0;
2545 struct hlist_node *h;
2546 struct i40e_pf *pf;
2547 int num_add = 0;
2548 int num_del = 0;
2549 int retval = 0;
2550 u16 cmd_flags;
2551 int list_size;
2552 int bkt;
2553
2554
2555 struct i40e_aqc_add_macvlan_element_data *add_list;
2556 struct i40e_aqc_remove_macvlan_element_data *del_list;
2557
2558 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2559 usleep_range(1000, 2000);
2560 pf = vsi->back;
2561
2562 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2563
2564 if (vsi->netdev) {
2565 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2566 vsi->current_netdev_flags = vsi->netdev->flags;
2567 }
2568
2569 INIT_HLIST_HEAD(&tmp_add_list);
2570 INIT_HLIST_HEAD(&tmp_del_list);
2571
2572 if (vsi->type == I40E_VSI_SRIOV)
2573 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2574 else if (vsi->type != I40E_VSI_MAIN)
2575 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2576
2577 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2578 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2579
2580 spin_lock_bh(&vsi->mac_filter_hash_lock);
2581
2582 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2583 if (f->state == I40E_FILTER_REMOVE) {
2584
2585 hash_del(&f->hlist);
2586 hlist_add_head(&f->hlist, &tmp_del_list);
2587
2588
2589 continue;
2590 }
2591 if (f->state == I40E_FILTER_NEW) {
2592
2593 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2594 if (!new)
2595 goto err_no_memory_locked;
2596
2597
2598 new->f = f;
2599 new->state = f->state;
2600
2601
2602 hlist_add_head(&new->hlist, &tmp_add_list);
2603 }
2604
2605
2606
2607
2608
2609 if (f->vlan > 0)
2610 vlan_filters++;
2611 }
2612
2613 if (vsi->type != I40E_VSI_SRIOV)
2614 retval = i40e_correct_mac_vlan_filters
2615 (vsi, &tmp_add_list, &tmp_del_list,
2616 vlan_filters);
2617 else
2618 retval = i40e_correct_vf_mac_vlan_filters
2619 (vsi, &tmp_add_list, &tmp_del_list,
2620 vlan_filters, pf->vf[vsi->vf_id].trusted);
2621
2622 hlist_for_each_entry(new, &tmp_add_list, hlist)
2623 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
2624
2625 if (retval)
2626 goto err_no_memory_locked;
2627
2628 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2629 }
2630
2631
2632 if (!hlist_empty(&tmp_del_list)) {
2633 filter_list_len = hw->aq.asq_buf_size /
2634 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2635 list_size = filter_list_len *
2636 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2637 del_list = kzalloc(list_size, GFP_ATOMIC);
2638 if (!del_list)
2639 goto err_no_memory;
2640
2641 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2642 cmd_flags = 0;
2643
2644
2645
2646
2647 if (is_broadcast_ether_addr(f->macaddr)) {
2648 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2649
2650 hlist_del(&f->hlist);
2651 kfree(f);
2652 continue;
2653 }
2654
2655
2656 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2657 if (f->vlan == I40E_VLAN_ANY) {
2658 del_list[num_del].vlan_tag = 0;
2659 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2660 } else {
2661 del_list[num_del].vlan_tag =
2662 cpu_to_le16((u16)(f->vlan));
2663 }
2664
2665 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2666 del_list[num_del].flags = cmd_flags;
2667 num_del++;
2668
2669
2670 if (num_del == filter_list_len) {
2671 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2672 num_del, &retval);
2673 memset(del_list, 0, list_size);
2674 num_del = 0;
2675 }
2676
2677
2678
2679 hlist_del(&f->hlist);
2680 kfree(f);
2681 }
2682
2683 if (num_del) {
2684 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2685 num_del, &retval);
2686 }
2687
2688 kfree(del_list);
2689 del_list = NULL;
2690 }
2691
2692 if (!hlist_empty(&tmp_add_list)) {
2693
2694 filter_list_len = hw->aq.asq_buf_size /
2695 sizeof(struct i40e_aqc_add_macvlan_element_data);
2696 list_size = filter_list_len *
2697 sizeof(struct i40e_aqc_add_macvlan_element_data);
2698 add_list = kzalloc(list_size, GFP_ATOMIC);
2699 if (!add_list)
2700 goto err_no_memory;
2701
2702 num_add = 0;
2703 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2704
2705
2706
2707 if (is_broadcast_ether_addr(new->f->macaddr)) {
2708 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2709 new->f))
2710 new->state = I40E_FILTER_FAILED;
2711 else
2712 new->state = I40E_FILTER_ACTIVE;
2713 continue;
2714 }
2715
2716
2717 if (num_add == 0)
2718 add_head = new;
2719 cmd_flags = 0;
2720 ether_addr_copy(add_list[num_add].mac_addr,
2721 new->f->macaddr);
2722 if (new->f->vlan == I40E_VLAN_ANY) {
2723 add_list[num_add].vlan_tag = 0;
2724 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2725 } else {
2726 add_list[num_add].vlan_tag =
2727 cpu_to_le16((u16)(new->f->vlan));
2728 }
2729 add_list[num_add].queue_number = 0;
2730
2731 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2732 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2733 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2734 num_add++;
2735
2736
2737 if (num_add == filter_list_len) {
2738 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2739 add_head, num_add);
2740 memset(add_list, 0, list_size);
2741 num_add = 0;
2742 }
2743 }
2744 if (num_add) {
2745 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2746 num_add);
2747 }
2748
2749
2750
2751 spin_lock_bh(&vsi->mac_filter_hash_lock);
2752 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2753
2754 if (new->f->state == I40E_FILTER_NEW)
2755 new->f->state = new->state;
2756 hlist_del(&new->hlist);
2757 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2758 kfree(new);
2759 }
2760 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2761 kfree(add_list);
2762 add_list = NULL;
2763 }
2764
2765
2766 spin_lock_bh(&vsi->mac_filter_hash_lock);
2767 vsi->active_filters = 0;
2768 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2769 if (f->state == I40E_FILTER_ACTIVE)
2770 vsi->active_filters++;
2771 else if (f->state == I40E_FILTER_FAILED)
2772 failed_filters++;
2773 }
2774 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2775
2776
2777
2778
2779
2780 if (old_overflow && !failed_filters &&
2781 vsi->active_filters < vsi->promisc_threshold) {
2782 dev_info(&pf->pdev->dev,
2783 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2784 vsi_name);
2785 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2786 vsi->promisc_threshold = 0;
2787 }
2788
2789
2790 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2791 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2792 goto out;
2793 }
2794
2795 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2796
2797
2798
2799
2800 if (!old_overflow && new_overflow)
2801 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2802
2803
2804 if (changed_flags & IFF_ALLMULTI) {
2805 bool cur_multipromisc;
2806
2807 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2808 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2809 vsi->seid,
2810 cur_multipromisc,
2811 NULL);
2812 if (aq_ret) {
2813 retval = i40e_aq_rc_to_posix(aq_ret,
2814 hw->aq.asq_last_status);
2815 dev_info(&pf->pdev->dev,
2816 "set multi promisc failed on %s, err %s aq_err %s\n",
2817 vsi_name,
2818 i40e_stat_str(hw, aq_ret),
2819 i40e_aq_str(hw, hw->aq.asq_last_status));
2820 } else {
2821 dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2822 cur_multipromisc ? "entering" : "leaving");
2823 }
2824 }
2825
2826 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2827 bool cur_promisc;
2828
2829 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2830 new_overflow);
2831 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2832 if (aq_ret) {
2833 retval = i40e_aq_rc_to_posix(aq_ret,
2834 hw->aq.asq_last_status);
2835 dev_info(&pf->pdev->dev,
2836 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2837 cur_promisc ? "on" : "off",
2838 vsi_name,
2839 i40e_stat_str(hw, aq_ret),
2840 i40e_aq_str(hw, hw->aq.asq_last_status));
2841 }
2842 }
2843 out:
2844
2845 if (retval)
2846 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2847
2848 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2849 return retval;
2850
2851 err_no_memory:
2852
2853 spin_lock_bh(&vsi->mac_filter_hash_lock);
2854 err_no_memory_locked:
2855 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2856 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2857 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2858
2859 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2860 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2861 return -ENOMEM;
2862 }
2863
2864
2865
2866
2867
2868 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2869 {
2870 int v;
2871
2872 if (!pf)
2873 return;
2874 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2875 return;
2876 if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2877 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2878 return;
2879 }
2880
2881 for (v = 0; v < pf->num_alloc_vsi; v++) {
2882 if (pf->vsi[v] &&
2883 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2884 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2885 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2886
2887 if (ret) {
2888
2889 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2890 pf->state);
2891 break;
2892 }
2893 }
2894 }
2895 }
2896
2897
2898
2899
2900
2901 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2902 {
2903 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2904 return I40E_RXBUFFER_2048;
2905 else
2906 return I40E_RXBUFFER_3072;
2907 }
2908
2909
2910
2911
2912
2913
2914
2915
2916 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2917 {
2918 struct i40e_netdev_priv *np = netdev_priv(netdev);
2919 struct i40e_vsi *vsi = np->vsi;
2920 struct i40e_pf *pf = vsi->back;
2921
2922 if (i40e_enabled_xdp_vsi(vsi)) {
2923 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2924
2925 if (frame_size > i40e_max_xdp_frame_size(vsi))
2926 return -EINVAL;
2927 }
2928
2929 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2930 netdev->mtu, new_mtu);
2931 netdev->mtu = new_mtu;
2932 if (netif_running(netdev))
2933 i40e_vsi_reinit_locked(vsi);
2934 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2935 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2936 return 0;
2937 }
2938
2939
2940
2941
2942
2943
2944
2945 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2946 {
2947 struct i40e_netdev_priv *np = netdev_priv(netdev);
2948 struct i40e_pf *pf = np->vsi->back;
2949
2950 switch (cmd) {
2951 case SIOCGHWTSTAMP:
2952 return i40e_ptp_get_ts_config(pf, ifr);
2953 case SIOCSHWTSTAMP:
2954 return i40e_ptp_set_ts_config(pf, ifr);
2955 default:
2956 return -EOPNOTSUPP;
2957 }
2958 }
2959
2960
2961
2962
2963
2964 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2965 {
2966 struct i40e_vsi_context ctxt;
2967 i40e_status ret;
2968
2969
2970 if (vsi->info.pvid)
2971 return;
2972
2973 if ((vsi->info.valid_sections &
2974 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2975 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2976 return;
2977
2978 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2979 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2980 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2981
2982 ctxt.seid = vsi->seid;
2983 ctxt.info = vsi->info;
2984 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2985 if (ret) {
2986 dev_info(&vsi->back->pdev->dev,
2987 "update vlan stripping failed, err %s aq_err %s\n",
2988 i40e_stat_str(&vsi->back->hw, ret),
2989 i40e_aq_str(&vsi->back->hw,
2990 vsi->back->hw.aq.asq_last_status));
2991 }
2992 }
2993
2994
2995
2996
2997
2998 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2999 {
3000 struct i40e_vsi_context ctxt;
3001 i40e_status ret;
3002
3003
3004 if (vsi->info.pvid)
3005 return;
3006
3007 if ((vsi->info.valid_sections &
3008 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
3009 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3010 I40E_AQ_VSI_PVLAN_EMOD_MASK))
3011 return;
3012
3013 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3014 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3015 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3016
3017 ctxt.seid = vsi->seid;
3018 ctxt.info = vsi->info;
3019 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3020 if (ret) {
3021 dev_info(&vsi->back->pdev->dev,
3022 "update vlan stripping failed, err %s aq_err %s\n",
3023 i40e_stat_str(&vsi->back->hw, ret),
3024 i40e_aq_str(&vsi->back->hw,
3025 vsi->back->hw.aq.asq_last_status));
3026 }
3027 }
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3043 {
3044 struct i40e_mac_filter *f, *add_f;
3045 struct hlist_node *h;
3046 int bkt;
3047
3048 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058 if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
3059 f->state = I40E_FILTER_ACTIVE;
3060 continue;
3061 } else if (f->state == I40E_FILTER_REMOVE) {
3062 continue;
3063 }
3064 add_f = i40e_add_filter(vsi, f->macaddr, vid);
3065 if (!add_f) {
3066 dev_info(&vsi->back->pdev->dev,
3067 "Could not add vlan filter %d for %pM\n",
3068 vid, f->macaddr);
3069 return -ENOMEM;
3070 }
3071 }
3072
3073 return 0;
3074 }
3075
3076
3077
3078
3079
3080
3081 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
3082 {
3083 int err;
3084
3085 if (vsi->info.pvid)
3086 return -EINVAL;
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096 if (!vid)
3097 return 0;
3098
3099
3100 spin_lock_bh(&vsi->mac_filter_hash_lock);
3101 err = i40e_add_vlan_all_mac(vsi, vid);
3102 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3103 if (err)
3104 return err;
3105
3106
3107
3108
3109 i40e_service_event_schedule(vsi->back);
3110 return 0;
3111 }
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3127 {
3128 struct i40e_mac_filter *f;
3129 struct hlist_node *h;
3130 int bkt;
3131
3132 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3133 if (f->vlan == vid)
3134 __i40e_del_filter(vsi, f);
3135 }
3136 }
3137
3138
3139
3140
3141
3142
3143 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
3144 {
3145 if (!vid || vsi->info.pvid)
3146 return;
3147
3148 spin_lock_bh(&vsi->mac_filter_hash_lock);
3149 i40e_rm_vlan_all_mac(vsi, vid);
3150 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3151
3152
3153
3154
3155 i40e_service_event_schedule(vsi->back);
3156 }
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
3167 __always_unused __be16 proto, u16 vid)
3168 {
3169 struct i40e_netdev_priv *np = netdev_priv(netdev);
3170 struct i40e_vsi *vsi = np->vsi;
3171 int ret = 0;
3172
3173 if (vid >= VLAN_N_VID)
3174 return -EINVAL;
3175
3176 ret = i40e_vsi_add_vlan(vsi, vid);
3177 if (!ret)
3178 set_bit(vid, vsi->active_vlans);
3179
3180 return ret;
3181 }
3182
3183
3184
3185
3186
3187
3188
3189 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
3190 __always_unused __be16 proto, u16 vid)
3191 {
3192 struct i40e_netdev_priv *np = netdev_priv(netdev);
3193 struct i40e_vsi *vsi = np->vsi;
3194
3195 if (vid >= VLAN_N_VID)
3196 return;
3197 set_bit(vid, vsi->active_vlans);
3198 }
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
3209 __always_unused __be16 proto, u16 vid)
3210 {
3211 struct i40e_netdev_priv *np = netdev_priv(netdev);
3212 struct i40e_vsi *vsi = np->vsi;
3213
3214
3215
3216
3217
3218 i40e_vsi_kill_vlan(vsi, vid);
3219
3220 clear_bit(vid, vsi->active_vlans);
3221
3222 return 0;
3223 }
3224
3225
3226
3227
3228
3229 static void i40e_restore_vlan(struct i40e_vsi *vsi)
3230 {
3231 u16 vid;
3232
3233 if (!vsi->netdev)
3234 return;
3235
3236 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3237 i40e_vlan_stripping_enable(vsi);
3238 else
3239 i40e_vlan_stripping_disable(vsi);
3240
3241 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
3242 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
3243 vid);
3244 }
3245
3246
3247
3248
3249
3250
3251 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
3252 {
3253 struct i40e_vsi_context ctxt;
3254 i40e_status ret;
3255
3256 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3257 vsi->info.pvid = cpu_to_le16(vid);
3258 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
3259 I40E_AQ_VSI_PVLAN_INSERT_PVID |
3260 I40E_AQ_VSI_PVLAN_EMOD_STR;
3261
3262 ctxt.seid = vsi->seid;
3263 ctxt.info = vsi->info;
3264 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3265 if (ret) {
3266 dev_info(&vsi->back->pdev->dev,
3267 "add pvid failed, err %s aq_err %s\n",
3268 i40e_stat_str(&vsi->back->hw, ret),
3269 i40e_aq_str(&vsi->back->hw,
3270 vsi->back->hw.aq.asq_last_status));
3271 return -ENOENT;
3272 }
3273
3274 return 0;
3275 }
3276
3277
3278
3279
3280
3281
3282
3283 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3284 {
3285 vsi->info.pvid = 0;
3286
3287 i40e_vlan_stripping_disable(vsi);
3288 }
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3301 {
3302 int i, err = 0;
3303
3304 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3305 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3306
3307 if (!i40e_enabled_xdp_vsi(vsi))
3308 return err;
3309
3310 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3311 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3312
3313 return err;
3314 }
3315
3316
3317
3318
3319
3320
3321
3322 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3323 {
3324 int i;
3325
3326 if (vsi->tx_rings) {
3327 for (i = 0; i < vsi->num_queue_pairs; i++)
3328 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3329 i40e_free_tx_resources(vsi->tx_rings[i]);
3330 }
3331
3332 if (vsi->xdp_rings) {
3333 for (i = 0; i < vsi->num_queue_pairs; i++)
3334 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3335 i40e_free_tx_resources(vsi->xdp_rings[i]);
3336 }
3337 }
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3350 {
3351 int i, err = 0;
3352
3353 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3354 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3355 return err;
3356 }
3357
3358
3359
3360
3361
3362
3363
3364 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3365 {
3366 int i;
3367
3368 if (!vsi->rx_rings)
3369 return;
3370
3371 for (i = 0; i < vsi->num_queue_pairs; i++)
3372 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3373 i40e_free_rx_resources(vsi->rx_rings[i]);
3374 }
3375
3376
3377
3378
3379
3380
3381
3382
3383 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3384 {
3385 int cpu;
3386
3387 if (!ring->q_vector || !ring->netdev || ring->ch)
3388 return;
3389
3390
3391 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3392 return;
3393
3394 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3395 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3396 ring->queue_index);
3397 }
3398
3399
3400
3401
3402
3403
3404
3405 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3406 {
3407 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3408 int qid = ring->queue_index;
3409
3410 if (ring_is_xdp(ring))
3411 qid -= ring->vsi->alloc_queue_pairs;
3412
3413 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3414 return NULL;
3415
3416 return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3417 }
3418
3419
3420
3421
3422
3423
3424
3425 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3426 {
3427 struct i40e_vsi *vsi = ring->vsi;
3428 u16 pf_q = vsi->base_queue + ring->queue_index;
3429 struct i40e_hw *hw = &vsi->back->hw;
3430 struct i40e_hmc_obj_txq tx_ctx;
3431 i40e_status err = 0;
3432 u32 qtx_ctl = 0;
3433
3434 if (ring_is_xdp(ring))
3435 ring->xsk_pool = i40e_xsk_pool(ring);
3436
3437
3438 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3439 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3440 ring->atr_count = 0;
3441 } else {
3442 ring->atr_sample_rate = 0;
3443 }
3444
3445
3446 i40e_config_xps_tx_ring(ring);
3447
3448
3449 memset(&tx_ctx, 0, sizeof(tx_ctx));
3450
3451 tx_ctx.new_context = 1;
3452 tx_ctx.base = (ring->dma / 128);
3453 tx_ctx.qlen = ring->count;
3454 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3455 I40E_FLAG_FD_ATR_ENABLED));
3456 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3457
3458 if (vsi->type != I40E_VSI_FDIR)
3459 tx_ctx.head_wb_ena = 1;
3460 tx_ctx.head_wb_addr = ring->dma +
3461 (ring->count * sizeof(struct i40e_tx_desc));
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474 if (ring->ch)
3475 tx_ctx.rdylist =
3476 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3477
3478 else
3479 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3480
3481 tx_ctx.rdylist_act = 0;
3482
3483
3484 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3485 if (err) {
3486 dev_info(&vsi->back->pdev->dev,
3487 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3488 ring->queue_index, pf_q, err);
3489 return -ENOMEM;
3490 }
3491
3492
3493 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3494 if (err) {
3495 dev_info(&vsi->back->pdev->dev,
3496 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3497 ring->queue_index, pf_q, err);
3498 return -ENOMEM;
3499 }
3500
3501
3502 if (ring->ch) {
3503 if (ring->ch->type == I40E_VSI_VMDQ2)
3504 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3505 else
3506 return -EINVAL;
3507
3508 qtx_ctl |= (ring->ch->vsi_number <<
3509 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3510 I40E_QTX_CTL_VFVM_INDX_MASK;
3511 } else {
3512 if (vsi->type == I40E_VSI_VMDQ2) {
3513 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3514 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3515 I40E_QTX_CTL_VFVM_INDX_MASK;
3516 } else {
3517 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3518 }
3519 }
3520
3521 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3522 I40E_QTX_CTL_PF_INDX_MASK);
3523 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3524 i40e_flush(hw);
3525
3526
3527 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3528
3529 return 0;
3530 }
3531
3532
3533
3534
3535
3536
3537
3538 static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3539 {
3540 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
3541 }
3542
3543
3544
3545
3546
3547
3548
3549 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3550 {
3551 struct i40e_vsi *vsi = ring->vsi;
3552 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3553 u16 pf_q = vsi->base_queue + ring->queue_index;
3554 struct i40e_hw *hw = &vsi->back->hw;
3555 struct i40e_hmc_obj_rxq rx_ctx;
3556 i40e_status err = 0;
3557 bool ok;
3558 int ret;
3559
3560 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3561
3562
3563 memset(&rx_ctx, 0, sizeof(rx_ctx));
3564
3565 if (ring->vsi->type == I40E_VSI_MAIN)
3566 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3567
3568 kfree(ring->rx_bi);
3569 ring->xsk_pool = i40e_xsk_pool(ring);
3570 if (ring->xsk_pool) {
3571 ret = i40e_alloc_rx_bi_zc(ring);
3572 if (ret)
3573 return ret;
3574 ring->rx_buf_len =
3575 xsk_pool_get_rx_frame_size(ring->xsk_pool);
3576
3577
3578
3579
3580 chain_len = 1;
3581 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3582 MEM_TYPE_XSK_BUFF_POOL,
3583 NULL);
3584 if (ret)
3585 return ret;
3586 dev_info(&vsi->back->pdev->dev,
3587 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3588 ring->queue_index);
3589
3590 } else {
3591 ret = i40e_alloc_rx_bi(ring);
3592 if (ret)
3593 return ret;
3594 ring->rx_buf_len = vsi->rx_buf_len;
3595 if (ring->vsi->type == I40E_VSI_MAIN) {
3596 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3597 MEM_TYPE_PAGE_SHARED,
3598 NULL);
3599 if (ret)
3600 return ret;
3601 }
3602 }
3603
3604 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3605 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3606
3607 rx_ctx.base = (ring->dma / 128);
3608 rx_ctx.qlen = ring->count;
3609
3610
3611 rx_ctx.dsize = 0;
3612
3613
3614
3615
3616 rx_ctx.hsplit_0 = 0;
3617
3618 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3619 if (hw->revision_id == 0)
3620 rx_ctx.lrxqthresh = 0;
3621 else
3622 rx_ctx.lrxqthresh = 1;
3623 rx_ctx.crcstrip = 1;
3624 rx_ctx.l2tsel = 1;
3625
3626 rx_ctx.showiv = 0;
3627
3628 rx_ctx.prefena = 1;
3629
3630
3631 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3632 if (err) {
3633 dev_info(&vsi->back->pdev->dev,
3634 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3635 ring->queue_index, pf_q, err);
3636 return -ENOMEM;
3637 }
3638
3639
3640 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3641 if (err) {
3642 dev_info(&vsi->back->pdev->dev,
3643 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3644 ring->queue_index, pf_q, err);
3645 return -ENOMEM;
3646 }
3647
3648
3649 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3650 clear_ring_build_skb_enabled(ring);
3651 else
3652 set_ring_build_skb_enabled(ring);
3653
3654 ring->rx_offset = i40e_rx_offset(ring);
3655
3656
3657 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3658 writel(0, ring->tail);
3659
3660 if (ring->xsk_pool) {
3661 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3662 ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3663 } else {
3664 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3665 }
3666 if (!ok) {
3667
3668
3669
3670 dev_info(&vsi->back->pdev->dev,
3671 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3672 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3673 ring->queue_index, pf_q);
3674 }
3675
3676 return 0;
3677 }
3678
3679
3680
3681
3682
3683
3684
3685 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3686 {
3687 int err = 0;
3688 u16 i;
3689
3690 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3691 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3692
3693 if (err || !i40e_enabled_xdp_vsi(vsi))
3694 return err;
3695
3696 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3697 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3698
3699 return err;
3700 }
3701
3702
3703
3704
3705
3706
3707
3708 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3709 {
3710 int err = 0;
3711 u16 i;
3712
3713 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3714 vsi->max_frame = I40E_MAX_RXBUFFER;
3715 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3716 #if (PAGE_SIZE < 8192)
3717 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3718 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3719 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3720 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3721 #endif
3722 } else {
3723 vsi->max_frame = I40E_MAX_RXBUFFER;
3724 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3725 I40E_RXBUFFER_2048;
3726 }
3727
3728
3729 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3730 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3731
3732 return err;
3733 }
3734
3735
3736
3737
3738
3739 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3740 {
3741 struct i40e_ring *tx_ring, *rx_ring;
3742 u16 qoffset, qcount;
3743 int i, n;
3744
3745 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3746
3747 for (i = 0; i < vsi->num_queue_pairs; i++) {
3748 rx_ring = vsi->rx_rings[i];
3749 tx_ring = vsi->tx_rings[i];
3750 rx_ring->dcb_tc = 0;
3751 tx_ring->dcb_tc = 0;
3752 }
3753 return;
3754 }
3755
3756 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3757 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3758 continue;
3759
3760 qoffset = vsi->tc_config.tc_info[n].qoffset;
3761 qcount = vsi->tc_config.tc_info[n].qcount;
3762 for (i = qoffset; i < (qoffset + qcount); i++) {
3763 rx_ring = vsi->rx_rings[i];
3764 tx_ring = vsi->tx_rings[i];
3765 rx_ring->dcb_tc = n;
3766 tx_ring->dcb_tc = n;
3767 }
3768 }
3769 }
3770
3771
3772
3773
3774
3775 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3776 {
3777 if (vsi->netdev)
3778 i40e_set_rx_mode(vsi->netdev);
3779 }
3780
3781
3782
3783
3784
3785
3786
3787 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3788 {
3789 pf->fd_tcp4_filter_cnt = 0;
3790 pf->fd_udp4_filter_cnt = 0;
3791 pf->fd_sctp4_filter_cnt = 0;
3792 pf->fd_ip4_filter_cnt = 0;
3793 pf->fd_tcp6_filter_cnt = 0;
3794 pf->fd_udp6_filter_cnt = 0;
3795 pf->fd_sctp6_filter_cnt = 0;
3796 pf->fd_ip6_filter_cnt = 0;
3797 }
3798
3799
3800
3801
3802
3803
3804
3805
3806 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3807 {
3808 struct i40e_fdir_filter *filter;
3809 struct i40e_pf *pf = vsi->back;
3810 struct hlist_node *node;
3811
3812 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3813 return;
3814
3815
3816 i40e_reset_fdir_filter_cnt(pf);
3817
3818 hlist_for_each_entry_safe(filter, node,
3819 &pf->fdir_filter_list, fdir_node) {
3820 i40e_add_del_fdir(vsi, filter, true);
3821 }
3822 }
3823
3824
3825
3826
3827
3828 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3829 {
3830 int err;
3831
3832 i40e_set_vsi_rx_mode(vsi);
3833 i40e_restore_vlan(vsi);
3834 i40e_vsi_config_dcb_rings(vsi);
3835 err = i40e_vsi_configure_tx(vsi);
3836 if (!err)
3837 err = i40e_vsi_configure_rx(vsi);
3838
3839 return err;
3840 }
3841
3842
3843
3844
3845
3846 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3847 {
3848 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3849 struct i40e_pf *pf = vsi->back;
3850 struct i40e_hw *hw = &pf->hw;
3851 u16 vector;
3852 int i, q;
3853 u32 qp;
3854
3855
3856
3857
3858
3859 qp = vsi->base_queue;
3860 vector = vsi->base_vector;
3861 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3862 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3863
3864 q_vector->rx.next_update = jiffies + 1;
3865 q_vector->rx.target_itr =
3866 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3867 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3868 q_vector->rx.target_itr >> 1);
3869 q_vector->rx.current_itr = q_vector->rx.target_itr;
3870
3871 q_vector->tx.next_update = jiffies + 1;
3872 q_vector->tx.target_itr =
3873 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3874 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3875 q_vector->tx.target_itr >> 1);
3876 q_vector->tx.current_itr = q_vector->tx.target_itr;
3877
3878 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3879 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3880
3881
3882 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3883 for (q = 0; q < q_vector->num_ringpairs; q++) {
3884 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3885 u32 val;
3886
3887 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3888 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3889 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3890 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3891 (I40E_QUEUE_TYPE_TX <<
3892 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3893
3894 wr32(hw, I40E_QINT_RQCTL(qp), val);
3895
3896 if (has_xdp) {
3897 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3898 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3899 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3900 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3901 (I40E_QUEUE_TYPE_TX <<
3902 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3903
3904 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3905 }
3906
3907 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3908 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3909 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3910 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3911 (I40E_QUEUE_TYPE_RX <<
3912 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3913
3914
3915 if (q == (q_vector->num_ringpairs - 1))
3916 val |= (I40E_QUEUE_END_OF_LIST <<
3917 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3918
3919 wr32(hw, I40E_QINT_TQCTL(qp), val);
3920 qp++;
3921 }
3922 }
3923
3924 i40e_flush(hw);
3925 }
3926
3927
3928
3929
3930
3931 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3932 {
3933 struct i40e_hw *hw = &pf->hw;
3934 u32 val;
3935
3936
3937 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3938 rd32(hw, I40E_PFINT_ICR0);
3939
3940 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3941 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3942 I40E_PFINT_ICR0_ENA_GRST_MASK |
3943 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3944 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3945 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3946 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3947 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3948
3949 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3950 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3951
3952 if (pf->flags & I40E_FLAG_PTP)
3953 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3954
3955 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3956
3957
3958 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3959 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3960
3961
3962 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3963 }
3964
3965
3966
3967
3968
3969 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3970 {
3971 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3972 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3973 struct i40e_pf *pf = vsi->back;
3974 struct i40e_hw *hw = &pf->hw;
3975 u32 val;
3976
3977
3978 q_vector->rx.next_update = jiffies + 1;
3979 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3980 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3981 q_vector->rx.current_itr = q_vector->rx.target_itr;
3982 q_vector->tx.next_update = jiffies + 1;
3983 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3984 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3985 q_vector->tx.current_itr = q_vector->tx.target_itr;
3986
3987 i40e_enable_misc_int_causes(pf);
3988
3989
3990 wr32(hw, I40E_PFINT_LNKLST0, 0);
3991
3992
3993 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3994 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3995 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3996 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3997
3998 wr32(hw, I40E_QINT_RQCTL(0), val);
3999
4000 if (i40e_enabled_xdp_vsi(vsi)) {
4001 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4002 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
4003 (I40E_QUEUE_TYPE_TX
4004 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
4005
4006 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
4007 }
4008
4009 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4010 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
4011 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
4012
4013 wr32(hw, I40E_QINT_TQCTL(0), val);
4014 i40e_flush(hw);
4015 }
4016
4017
4018
4019
4020
4021 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
4022 {
4023 struct i40e_hw *hw = &pf->hw;
4024
4025 wr32(hw, I40E_PFINT_DYN_CTL0,
4026 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4027 i40e_flush(hw);
4028 }
4029
4030
4031
4032
4033
4034 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
4035 {
4036 struct i40e_hw *hw = &pf->hw;
4037 u32 val;
4038
4039 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4040 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4041 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4042
4043 wr32(hw, I40E_PFINT_DYN_CTL0, val);
4044 i40e_flush(hw);
4045 }
4046
4047
4048
4049
4050
4051
4052 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
4053 {
4054 struct i40e_q_vector *q_vector = data;
4055
4056 if (!q_vector->tx.ring && !q_vector->rx.ring)
4057 return IRQ_HANDLED;
4058
4059 napi_schedule_irqoff(&q_vector->napi);
4060
4061 return IRQ_HANDLED;
4062 }
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
4073 const cpumask_t *mask)
4074 {
4075 struct i40e_q_vector *q_vector =
4076 container_of(notify, struct i40e_q_vector, affinity_notify);
4077
4078 cpumask_copy(&q_vector->affinity_mask, mask);
4079 }
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089 static void i40e_irq_affinity_release(struct kref *ref) {}
4090
4091
4092
4093
4094
4095
4096
4097
4098 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
4099 {
4100 int q_vectors = vsi->num_q_vectors;
4101 struct i40e_pf *pf = vsi->back;
4102 int base = vsi->base_vector;
4103 int rx_int_idx = 0;
4104 int tx_int_idx = 0;
4105 int vector, err;
4106 int irq_num;
4107 int cpu;
4108
4109 for (vector = 0; vector < q_vectors; vector++) {
4110 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
4111
4112 irq_num = pf->msix_entries[base + vector].vector;
4113
4114 if (q_vector->tx.ring && q_vector->rx.ring) {
4115 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4116 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
4117 tx_int_idx++;
4118 } else if (q_vector->rx.ring) {
4119 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4120 "%s-%s-%d", basename, "rx", rx_int_idx++);
4121 } else if (q_vector->tx.ring) {
4122 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4123 "%s-%s-%d", basename, "tx", tx_int_idx++);
4124 } else {
4125
4126 continue;
4127 }
4128 err = request_irq(irq_num,
4129 vsi->irq_handler,
4130 0,
4131 q_vector->name,
4132 q_vector);
4133 if (err) {
4134 dev_info(&pf->pdev->dev,
4135 "MSIX request_irq failed, error: %d\n", err);
4136 goto free_queue_irqs;
4137 }
4138
4139
4140 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
4141 q_vector->affinity_notify.release = i40e_irq_affinity_release;
4142 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
4143
4144
4145
4146
4147
4148
4149 cpu = cpumask_local_spread(q_vector->v_idx, -1);
4150 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
4151 }
4152
4153 vsi->irqs_ready = true;
4154 return 0;
4155
4156 free_queue_irqs:
4157 while (vector) {
4158 vector--;
4159 irq_num = pf->msix_entries[base + vector].vector;
4160 irq_set_affinity_notifier(irq_num, NULL);
4161 irq_update_affinity_hint(irq_num, NULL);
4162 free_irq(irq_num, &vsi->q_vectors[vector]);
4163 }
4164 return err;
4165 }
4166
4167
4168
4169
4170
4171 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
4172 {
4173 struct i40e_pf *pf = vsi->back;
4174 struct i40e_hw *hw = &pf->hw;
4175 int base = vsi->base_vector;
4176 int i;
4177
4178
4179 for (i = 0; i < vsi->num_queue_pairs; i++) {
4180 u32 val;
4181
4182 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
4183 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
4184 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
4185
4186 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
4187 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
4188 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
4189
4190 if (!i40e_enabled_xdp_vsi(vsi))
4191 continue;
4192 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
4193 }
4194
4195
4196 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4197 for (i = vsi->base_vector;
4198 i < (vsi->num_q_vectors + vsi->base_vector); i++)
4199 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
4200
4201 i40e_flush(hw);
4202 for (i = 0; i < vsi->num_q_vectors; i++)
4203 synchronize_irq(pf->msix_entries[i + base].vector);
4204 } else {
4205
4206 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
4207 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
4208 i40e_flush(hw);
4209 synchronize_irq(pf->pdev->irq);
4210 }
4211 }
4212
4213
4214
4215
4216
4217 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
4218 {
4219 struct i40e_pf *pf = vsi->back;
4220 int i;
4221
4222 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4223 for (i = 0; i < vsi->num_q_vectors; i++)
4224 i40e_irq_dynamic_enable(vsi, i);
4225 } else {
4226 i40e_irq_dynamic_enable_icr0(pf);
4227 }
4228
4229 i40e_flush(&pf->hw);
4230 return 0;
4231 }
4232
4233
4234
4235
4236
4237 static void i40e_free_misc_vector(struct i40e_pf *pf)
4238 {
4239
4240 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
4241 i40e_flush(&pf->hw);
4242
4243 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4244 free_irq(pf->msix_entries[0].vector, pf);
4245 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
4246 }
4247 }
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258 static irqreturn_t i40e_intr(int irq, void *data)
4259 {
4260 struct i40e_pf *pf = (struct i40e_pf *)data;
4261 struct i40e_hw *hw = &pf->hw;
4262 irqreturn_t ret = IRQ_NONE;
4263 u32 icr0, icr0_remaining;
4264 u32 val, ena_mask;
4265
4266 icr0 = rd32(hw, I40E_PFINT_ICR0);
4267 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
4268
4269
4270 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4271 goto enable_intr;
4272
4273
4274 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4275 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4276 pf->sw_int_count++;
4277
4278 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
4279 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4280 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4281 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4282 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4283 }
4284
4285
4286 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4287 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4288 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4289
4290
4291
4292
4293
4294
4295
4296 if (!test_bit(__I40E_DOWN, pf->state))
4297 napi_schedule_irqoff(&q_vector->napi);
4298 }
4299
4300 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4301 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4302 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4303 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4304 }
4305
4306 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4307 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4308 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4309 }
4310
4311 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4312
4313 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4314 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4315
4316 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4317 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4318 } else {
4319 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4320 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4321 }
4322 }
4323
4324 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4325 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4326 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4327 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4328 val = rd32(hw, I40E_GLGEN_RSTAT);
4329 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4330 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4331 if (val == I40E_RESET_CORER) {
4332 pf->corer_count++;
4333 } else if (val == I40E_RESET_GLOBR) {
4334 pf->globr_count++;
4335 } else if (val == I40E_RESET_EMPR) {
4336 pf->empr_count++;
4337 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4338 }
4339 }
4340
4341 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4342 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4343 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4344 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4345 rd32(hw, I40E_PFHMC_ERRORINFO),
4346 rd32(hw, I40E_PFHMC_ERRORDATA));
4347 }
4348
4349 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4350 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4351
4352 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
4353 schedule_work(&pf->ptp_extts0_work);
4354
4355 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
4356 i40e_ptp_tx_hwtstamp(pf);
4357
4358 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4359 }
4360
4361
4362
4363
4364
4365 icr0_remaining = icr0 & ena_mask;
4366 if (icr0_remaining) {
4367 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4368 icr0_remaining);
4369 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4370 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4371 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4372 dev_info(&pf->pdev->dev, "device will be reset\n");
4373 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4374 i40e_service_event_schedule(pf);
4375 }
4376 ena_mask &= ~icr0_remaining;
4377 }
4378 ret = IRQ_HANDLED;
4379
4380 enable_intr:
4381
4382 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4383 if (!test_bit(__I40E_DOWN, pf->state) ||
4384 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4385 i40e_service_event_schedule(pf);
4386 i40e_irq_dynamic_enable_icr0(pf);
4387 }
4388
4389 return ret;
4390 }
4391
4392
4393
4394
4395
4396
4397
4398
4399 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4400 {
4401 struct i40e_vsi *vsi = tx_ring->vsi;
4402 u16 i = tx_ring->next_to_clean;
4403 struct i40e_tx_buffer *tx_buf;
4404 struct i40e_tx_desc *tx_desc;
4405
4406 tx_buf = &tx_ring->tx_bi[i];
4407 tx_desc = I40E_TX_DESC(tx_ring, i);
4408 i -= tx_ring->count;
4409
4410 do {
4411 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4412
4413
4414 if (!eop_desc)
4415 break;
4416
4417
4418 smp_rmb();
4419
4420
4421 if (!(eop_desc->cmd_type_offset_bsz &
4422 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4423 break;
4424
4425
4426 tx_buf->next_to_watch = NULL;
4427
4428 tx_desc->buffer_addr = 0;
4429 tx_desc->cmd_type_offset_bsz = 0;
4430
4431 tx_buf++;
4432 tx_desc++;
4433 i++;
4434 if (unlikely(!i)) {
4435 i -= tx_ring->count;
4436 tx_buf = tx_ring->tx_bi;
4437 tx_desc = I40E_TX_DESC(tx_ring, 0);
4438 }
4439
4440 dma_unmap_single(tx_ring->dev,
4441 dma_unmap_addr(tx_buf, dma),
4442 dma_unmap_len(tx_buf, len),
4443 DMA_TO_DEVICE);
4444 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4445 kfree(tx_buf->raw_buf);
4446
4447 tx_buf->raw_buf = NULL;
4448 tx_buf->tx_flags = 0;
4449 tx_buf->next_to_watch = NULL;
4450 dma_unmap_len_set(tx_buf, len, 0);
4451 tx_desc->buffer_addr = 0;
4452 tx_desc->cmd_type_offset_bsz = 0;
4453
4454
4455 tx_buf++;
4456 tx_desc++;
4457 i++;
4458 if (unlikely(!i)) {
4459 i -= tx_ring->count;
4460 tx_buf = tx_ring->tx_bi;
4461 tx_desc = I40E_TX_DESC(tx_ring, 0);
4462 }
4463
4464
4465 budget--;
4466 } while (likely(budget));
4467
4468 i += tx_ring->count;
4469 tx_ring->next_to_clean = i;
4470
4471 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4472 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4473
4474 return budget > 0;
4475 }
4476
4477
4478
4479
4480
4481
4482 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4483 {
4484 struct i40e_q_vector *q_vector = data;
4485 struct i40e_vsi *vsi;
4486
4487 if (!q_vector->tx.ring)
4488 return IRQ_HANDLED;
4489
4490 vsi = q_vector->tx.ring->vsi;
4491 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4492
4493 return IRQ_HANDLED;
4494 }
4495
4496
4497
4498
4499
4500
4501
4502 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4503 {
4504 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4505 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4506 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4507
4508 tx_ring->q_vector = q_vector;
4509 tx_ring->next = q_vector->tx.ring;
4510 q_vector->tx.ring = tx_ring;
4511 q_vector->tx.count++;
4512
4513
4514 if (i40e_enabled_xdp_vsi(vsi)) {
4515 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4516
4517 xdp_ring->q_vector = q_vector;
4518 xdp_ring->next = q_vector->tx.ring;
4519 q_vector->tx.ring = xdp_ring;
4520 q_vector->tx.count++;
4521 }
4522
4523 rx_ring->q_vector = q_vector;
4524 rx_ring->next = q_vector->rx.ring;
4525 q_vector->rx.ring = rx_ring;
4526 q_vector->rx.count++;
4527 }
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4539 {
4540 int qp_remaining = vsi->num_queue_pairs;
4541 int q_vectors = vsi->num_q_vectors;
4542 int num_ringpairs;
4543 int v_start = 0;
4544 int qp_idx = 0;
4545
4546
4547
4548
4549
4550
4551
4552
4553 for (; v_start < q_vectors; v_start++) {
4554 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4555
4556 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4557
4558 q_vector->num_ringpairs = num_ringpairs;
4559 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4560
4561 q_vector->rx.count = 0;
4562 q_vector->tx.count = 0;
4563 q_vector->rx.ring = NULL;
4564 q_vector->tx.ring = NULL;
4565
4566 while (num_ringpairs--) {
4567 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4568 qp_idx++;
4569 qp_remaining--;
4570 }
4571 }
4572 }
4573
4574
4575
4576
4577
4578
4579 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4580 {
4581 struct i40e_pf *pf = vsi->back;
4582 int err;
4583
4584 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4585 err = i40e_vsi_request_irq_msix(vsi, basename);
4586 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4587 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4588 pf->int_name, pf);
4589 else
4590 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4591 pf->int_name, pf);
4592
4593 if (err)
4594 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4595
4596 return err;
4597 }
4598
4599 #ifdef CONFIG_NET_POLL_CONTROLLER
4600
4601
4602
4603
4604
4605
4606
4607 static void i40e_netpoll(struct net_device *netdev)
4608 {
4609 struct i40e_netdev_priv *np = netdev_priv(netdev);
4610 struct i40e_vsi *vsi = np->vsi;
4611 struct i40e_pf *pf = vsi->back;
4612 int i;
4613
4614
4615 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4616 return;
4617
4618 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4619 for (i = 0; i < vsi->num_q_vectors; i++)
4620 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4621 } else {
4622 i40e_intr(pf->pdev->irq, netdev);
4623 }
4624 }
4625 #endif
4626
4627 #define I40E_QTX_ENA_WAIT_COUNT 50
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4641 {
4642 int i;
4643 u32 tx_reg;
4644
4645 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4646 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4647 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4648 break;
4649
4650 usleep_range(10, 20);
4651 }
4652 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4653 return -ETIMEDOUT;
4654
4655 return 0;
4656 }
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4669 {
4670 struct i40e_hw *hw = &pf->hw;
4671 u32 tx_reg;
4672 int i;
4673
4674
4675 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4676 if (!enable)
4677 usleep_range(10, 20);
4678
4679 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4680 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4681 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4682 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4683 break;
4684 usleep_range(1000, 2000);
4685 }
4686
4687
4688 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4689 return;
4690
4691
4692 if (enable) {
4693 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4694 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4695 } else {
4696 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4697 }
4698
4699 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4700 }
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4711 bool is_xdp, bool enable)
4712 {
4713 int ret;
4714
4715 i40e_control_tx_q(pf, pf_q, enable);
4716
4717
4718 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4719 if (ret) {
4720 dev_info(&pf->pdev->dev,
4721 "VSI seid %d %sTx ring %d %sable timeout\n",
4722 seid, (is_xdp ? "XDP " : ""), pf_q,
4723 (enable ? "en" : "dis"));
4724 }
4725
4726 return ret;
4727 }
4728
4729
4730
4731
4732
4733 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4734 {
4735 struct i40e_pf *pf = vsi->back;
4736 int i, pf_q, ret = 0;
4737
4738 pf_q = vsi->base_queue;
4739 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4740 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4741 pf_q,
4742 false , true);
4743 if (ret)
4744 break;
4745
4746 if (!i40e_enabled_xdp_vsi(vsi))
4747 continue;
4748
4749 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4750 pf_q + vsi->alloc_queue_pairs,
4751 true , true);
4752 if (ret)
4753 break;
4754 }
4755 return ret;
4756 }
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4770 {
4771 int i;
4772 u32 rx_reg;
4773
4774 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4775 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4776 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4777 break;
4778
4779 usleep_range(10, 20);
4780 }
4781 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4782 return -ETIMEDOUT;
4783
4784 return 0;
4785 }
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4798 {
4799 struct i40e_hw *hw = &pf->hw;
4800 u32 rx_reg;
4801 int i;
4802
4803 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4804 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4805 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4806 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4807 break;
4808 usleep_range(1000, 2000);
4809 }
4810
4811
4812 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4813 return;
4814
4815
4816 if (enable)
4817 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4818 else
4819 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4820
4821 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4822 }
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4835 {
4836 int ret = 0;
4837
4838 i40e_control_rx_q(pf, pf_q, enable);
4839
4840
4841 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4842 if (ret)
4843 return ret;
4844
4845 return ret;
4846 }
4847
4848
4849
4850
4851
4852 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4853 {
4854 struct i40e_pf *pf = vsi->back;
4855 int i, pf_q, ret = 0;
4856
4857 pf_q = vsi->base_queue;
4858 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4859 ret = i40e_control_wait_rx_q(pf, pf_q, true);
4860 if (ret) {
4861 dev_info(&pf->pdev->dev,
4862 "VSI seid %d Rx ring %d enable timeout\n",
4863 vsi->seid, pf_q);
4864 break;
4865 }
4866 }
4867
4868 return ret;
4869 }
4870
4871
4872
4873
4874
4875 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4876 {
4877 int ret = 0;
4878
4879
4880 ret = i40e_vsi_enable_rx(vsi);
4881 if (ret)
4882 return ret;
4883 ret = i40e_vsi_enable_tx(vsi);
4884
4885 return ret;
4886 }
4887
4888 #define I40E_DISABLE_TX_GAP_MSEC 50
4889
4890
4891
4892
4893
4894 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4895 {
4896 struct i40e_pf *pf = vsi->back;
4897 int pf_q, err, q_end;
4898
4899
4900 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4901 return i40e_vsi_stop_rings_no_wait(vsi);
4902
4903 q_end = vsi->base_queue + vsi->num_queue_pairs;
4904 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4905 i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
4906
4907 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4908 err = i40e_control_wait_rx_q(pf, pf_q, false);
4909 if (err)
4910 dev_info(&pf->pdev->dev,
4911 "VSI seid %d Rx ring %d disable timeout\n",
4912 vsi->seid, pf_q);
4913 }
4914
4915 msleep(I40E_DISABLE_TX_GAP_MSEC);
4916 pf_q = vsi->base_queue;
4917 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4918 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4919
4920 i40e_vsi_wait_queues_disabled(vsi);
4921 }
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4935 {
4936 struct i40e_pf *pf = vsi->back;
4937 int i, pf_q;
4938
4939 pf_q = vsi->base_queue;
4940 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4941 i40e_control_tx_q(pf, pf_q, false);
4942 i40e_control_rx_q(pf, pf_q, false);
4943 }
4944 }
4945
4946
4947
4948
4949
4950 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4951 {
4952 struct i40e_pf *pf = vsi->back;
4953 struct i40e_hw *hw = &pf->hw;
4954 int base = vsi->base_vector;
4955 u32 val, qp;
4956 int i;
4957
4958 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4959 if (!vsi->q_vectors)
4960 return;
4961
4962 if (!vsi->irqs_ready)
4963 return;
4964
4965 vsi->irqs_ready = false;
4966 for (i = 0; i < vsi->num_q_vectors; i++) {
4967 int irq_num;
4968 u16 vector;
4969
4970 vector = i + base;
4971 irq_num = pf->msix_entries[vector].vector;
4972
4973
4974 if (!vsi->q_vectors[i] ||
4975 !vsi->q_vectors[i]->num_ringpairs)
4976 continue;
4977
4978
4979 irq_set_affinity_notifier(irq_num, NULL);
4980
4981 irq_update_affinity_hint(irq_num, NULL);
4982 free_irq(irq_num, vsi->q_vectors[i]);
4983
4984
4985
4986
4987
4988
4989
4990
4991 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4992 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4993 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4994 val |= I40E_QUEUE_END_OF_LIST
4995 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4996 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4997
4998 while (qp != I40E_QUEUE_END_OF_LIST) {
4999 u32 next;
5000
5001 val = rd32(hw, I40E_QINT_RQCTL(qp));
5002
5003 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5004 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5005 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5006 I40E_QINT_RQCTL_INTEVENT_MASK);
5007
5008 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5009 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5010
5011 wr32(hw, I40E_QINT_RQCTL(qp), val);
5012
5013 val = rd32(hw, I40E_QINT_TQCTL(qp));
5014
5015 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
5016 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
5017
5018 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5019 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5020 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5021 I40E_QINT_TQCTL_INTEVENT_MASK);
5022
5023 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5024 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5025
5026 wr32(hw, I40E_QINT_TQCTL(qp), val);
5027 qp = next;
5028 }
5029 }
5030 } else {
5031 free_irq(pf->pdev->irq, pf);
5032
5033 val = rd32(hw, I40E_PFINT_LNKLST0);
5034 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
5035 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
5036 val |= I40E_QUEUE_END_OF_LIST
5037 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5038 wr32(hw, I40E_PFINT_LNKLST0, val);
5039
5040 val = rd32(hw, I40E_QINT_RQCTL(qp));
5041 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5042 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5043 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5044 I40E_QINT_RQCTL_INTEVENT_MASK);
5045
5046 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5047 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5048
5049 wr32(hw, I40E_QINT_RQCTL(qp), val);
5050
5051 val = rd32(hw, I40E_QINT_TQCTL(qp));
5052
5053 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5054 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5055 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5056 I40E_QINT_TQCTL_INTEVENT_MASK);
5057
5058 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5059 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5060
5061 wr32(hw, I40E_QINT_TQCTL(qp), val);
5062 }
5063 }
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
5075 {
5076 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
5077 struct i40e_ring *ring;
5078
5079 if (!q_vector)
5080 return;
5081
5082
5083 i40e_for_each_ring(ring, q_vector->tx)
5084 ring->q_vector = NULL;
5085
5086 i40e_for_each_ring(ring, q_vector->rx)
5087 ring->q_vector = NULL;
5088
5089
5090 if (vsi->netdev)
5091 netif_napi_del(&q_vector->napi);
5092
5093 vsi->q_vectors[v_idx] = NULL;
5094
5095 kfree_rcu(q_vector, rcu);
5096 }
5097
5098
5099
5100
5101
5102
5103
5104
5105 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
5106 {
5107 int v_idx;
5108
5109 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
5110 i40e_free_q_vector(vsi, v_idx);
5111 }
5112
5113
5114
5115
5116
5117 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
5118 {
5119
5120 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5121 pci_disable_msix(pf->pdev);
5122 kfree(pf->msix_entries);
5123 pf->msix_entries = NULL;
5124 kfree(pf->irq_pile);
5125 pf->irq_pile = NULL;
5126 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
5127 pci_disable_msi(pf->pdev);
5128 }
5129 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
5130 }
5131
5132
5133
5134
5135
5136
5137
5138
5139 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
5140 {
5141 int i;
5142
5143 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
5144 i40e_free_misc_vector(pf);
5145
5146 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
5147 I40E_IWARP_IRQ_PILE_ID);
5148
5149 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
5150 for (i = 0; i < pf->num_alloc_vsi; i++)
5151 if (pf->vsi[i])
5152 i40e_vsi_free_q_vectors(pf->vsi[i]);
5153 i40e_reset_interrupt_capability(pf);
5154 }
5155
5156
5157
5158
5159
5160 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
5161 {
5162 int q_idx;
5163
5164 if (!vsi->netdev)
5165 return;
5166
5167 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5168 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5169
5170 if (q_vector->rx.ring || q_vector->tx.ring)
5171 napi_enable(&q_vector->napi);
5172 }
5173 }
5174
5175
5176
5177
5178
5179 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
5180 {
5181 int q_idx;
5182
5183 if (!vsi->netdev)
5184 return;
5185
5186 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5187 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5188
5189 if (q_vector->rx.ring || q_vector->tx.ring)
5190 napi_disable(&q_vector->napi);
5191 }
5192 }
5193
5194
5195
5196
5197
5198 static void i40e_vsi_close(struct i40e_vsi *vsi)
5199 {
5200 struct i40e_pf *pf = vsi->back;
5201 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
5202 i40e_down(vsi);
5203 i40e_vsi_free_irq(vsi);
5204 i40e_vsi_free_tx_resources(vsi);
5205 i40e_vsi_free_rx_resources(vsi);
5206 vsi->current_netdev_flags = 0;
5207 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
5208 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5209 set_bit(__I40E_CLIENT_RESET, pf->state);
5210 }
5211
5212
5213
5214
5215
5216 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
5217 {
5218 if (test_bit(__I40E_VSI_DOWN, vsi->state))
5219 return;
5220
5221 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
5222 if (vsi->netdev && netif_running(vsi->netdev))
5223 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5224 else
5225 i40e_vsi_close(vsi);
5226 }
5227
5228
5229
5230
5231
5232 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
5233 {
5234 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
5235 return;
5236
5237 if (vsi->netdev && netif_running(vsi->netdev))
5238 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5239 else
5240 i40e_vsi_open(vsi);
5241 }
5242
5243
5244
5245
5246
5247 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
5248 {
5249 int v;
5250
5251 for (v = 0; v < pf->num_alloc_vsi; v++) {
5252 if (pf->vsi[v])
5253 i40e_quiesce_vsi(pf->vsi[v]);
5254 }
5255 }
5256
5257
5258
5259
5260
5261 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
5262 {
5263 int v;
5264
5265 for (v = 0; v < pf->num_alloc_vsi; v++) {
5266 if (pf->vsi[v])
5267 i40e_unquiesce_vsi(pf->vsi[v]);
5268 }
5269 }
5270
5271
5272
5273
5274
5275
5276
5277 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
5278 {
5279 struct i40e_pf *pf = vsi->back;
5280 int i, pf_q, ret;
5281
5282 pf_q = vsi->base_queue;
5283 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5284
5285 ret = i40e_pf_txq_wait(pf, pf_q, false);
5286 if (ret) {
5287 dev_info(&pf->pdev->dev,
5288 "VSI seid %d Tx ring %d disable timeout\n",
5289 vsi->seid, pf_q);
5290 return ret;
5291 }
5292
5293 if (!i40e_enabled_xdp_vsi(vsi))
5294 goto wait_rx;
5295
5296
5297 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5298 false);
5299 if (ret) {
5300 dev_info(&pf->pdev->dev,
5301 "VSI seid %d XDP Tx ring %d disable timeout\n",
5302 vsi->seid, pf_q);
5303 return ret;
5304 }
5305 wait_rx:
5306
5307 ret = i40e_pf_rxq_wait(pf, pf_q, false);
5308 if (ret) {
5309 dev_info(&pf->pdev->dev,
5310 "VSI seid %d Rx ring %d disable timeout\n",
5311 vsi->seid, pf_q);
5312 return ret;
5313 }
5314 }
5315
5316 return 0;
5317 }
5318
5319 #ifdef CONFIG_I40E_DCB
5320
5321
5322
5323
5324
5325
5326
5327 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5328 {
5329 int v, ret = 0;
5330
5331 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5332 if (pf->vsi[v]) {
5333 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5334 if (ret)
5335 break;
5336 }
5337 }
5338
5339 return ret;
5340 }
5341
5342 #endif
5343
5344
5345
5346
5347
5348
5349
5350
5351 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5352 {
5353 struct i40e_dcb_app_priority_table app;
5354 struct i40e_hw *hw = &pf->hw;
5355 u8 enabled_tc = 1;
5356 u8 tc, i;
5357
5358 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5359
5360 for (i = 0; i < dcbcfg->numapps; i++) {
5361 app = dcbcfg->app[i];
5362 if (app.selector == I40E_APP_SEL_TCPIP &&
5363 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5364 tc = dcbcfg->etscfg.prioritytable[app.priority];
5365 enabled_tc |= BIT(tc);
5366 break;
5367 }
5368 }
5369
5370 return enabled_tc;
5371 }
5372
5373
5374
5375
5376
5377
5378
5379 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5380 {
5381 int i, tc_unused = 0;
5382 u8 num_tc = 0;
5383 u8 ret = 0;
5384
5385
5386
5387
5388
5389 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5390 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5391
5392
5393
5394
5395 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5396 if (num_tc & BIT(i)) {
5397 if (!tc_unused) {
5398 ret++;
5399 } else {
5400 pr_err("Non-contiguous TC - Disabling DCB\n");
5401 return 1;
5402 }
5403 } else {
5404 tc_unused = 1;
5405 }
5406 }
5407
5408
5409 if (!ret)
5410 ret = 1;
5411
5412 return ret;
5413 }
5414
5415
5416
5417
5418
5419
5420
5421
5422 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5423 {
5424 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5425 u8 enabled_tc = 1;
5426 u8 i;
5427
5428 for (i = 0; i < num_tc; i++)
5429 enabled_tc |= BIT(i);
5430
5431 return enabled_tc;
5432 }
5433
5434
5435
5436
5437
5438
5439
5440
5441 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5442 {
5443 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5444 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5445 u8 enabled_tc = 1, i;
5446
5447 for (i = 1; i < num_tc; i++)
5448 enabled_tc |= BIT(i);
5449 return enabled_tc;
5450 }
5451
5452
5453
5454
5455
5456
5457
5458 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5459 {
5460 struct i40e_hw *hw = &pf->hw;
5461 u8 i, enabled_tc = 1;
5462 u8 num_tc = 0;
5463 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5464
5465 if (i40e_is_tc_mqprio_enabled(pf))
5466 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5467
5468
5469 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5470 return 1;
5471
5472
5473 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5474 return i40e_dcb_get_num_tc(dcbcfg);
5475
5476
5477 if (pf->hw.func_caps.iscsi)
5478 enabled_tc = i40e_get_iscsi_tc_map(pf);
5479 else
5480 return 1;
5481
5482 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5483 if (enabled_tc & BIT(i))
5484 num_tc++;
5485 }
5486 return num_tc;
5487 }
5488
5489
5490
5491
5492
5493
5494
5495 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5496 {
5497 if (i40e_is_tc_mqprio_enabled(pf))
5498 return i40e_mqprio_get_enabled_tc(pf);
5499
5500
5501
5502
5503 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5504 return I40E_DEFAULT_TRAFFIC_CLASS;
5505
5506
5507 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5508 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5509
5510
5511 if (pf->hw.func_caps.iscsi)
5512 return i40e_get_iscsi_tc_map(pf);
5513 else
5514 return I40E_DEFAULT_TRAFFIC_CLASS;
5515 }
5516
5517
5518
5519
5520
5521
5522
5523 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5524 {
5525 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5526 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5527 struct i40e_pf *pf = vsi->back;
5528 struct i40e_hw *hw = &pf->hw;
5529 i40e_status ret;
5530 u32 tc_bw_max;
5531 int i;
5532
5533
5534 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5535 if (ret) {
5536 dev_info(&pf->pdev->dev,
5537 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5538 i40e_stat_str(&pf->hw, ret),
5539 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5540 return -EINVAL;
5541 }
5542
5543
5544 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5545 NULL);
5546 if (ret) {
5547 dev_info(&pf->pdev->dev,
5548 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5549 i40e_stat_str(&pf->hw, ret),
5550 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5551 return -EINVAL;
5552 }
5553
5554 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5555 dev_info(&pf->pdev->dev,
5556 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5557 bw_config.tc_valid_bits,
5558 bw_ets_config.tc_valid_bits);
5559
5560 }
5561
5562 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5563 vsi->bw_max_quanta = bw_config.max_bw;
5564 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5565 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5566 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5567 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5568 vsi->bw_ets_limit_credits[i] =
5569 le16_to_cpu(bw_ets_config.credits[i]);
5570
5571 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5572 }
5573
5574 return 0;
5575 }
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5586 u8 *bw_share)
5587 {
5588 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5589 struct i40e_pf *pf = vsi->back;
5590 i40e_status ret;
5591 int i;
5592
5593
5594 if (i40e_is_tc_mqprio_enabled(pf))
5595 return 0;
5596 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5597 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5598 if (ret)
5599 dev_info(&pf->pdev->dev,
5600 "Failed to reset tx rate for vsi->seid %u\n",
5601 vsi->seid);
5602 return ret;
5603 }
5604 memset(&bw_data, 0, sizeof(bw_data));
5605 bw_data.tc_valid_bits = enabled_tc;
5606 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5607 bw_data.tc_bw_credits[i] = bw_share[i];
5608
5609 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5610 if (ret) {
5611 dev_info(&pf->pdev->dev,
5612 "AQ command Config VSI BW allocation per TC failed = %d\n",
5613 pf->hw.aq.asq_last_status);
5614 return -EINVAL;
5615 }
5616
5617 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5618 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5619
5620 return 0;
5621 }
5622
5623
5624
5625
5626
5627
5628
5629 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5630 {
5631 struct net_device *netdev = vsi->netdev;
5632 struct i40e_pf *pf = vsi->back;
5633 struct i40e_hw *hw = &pf->hw;
5634 u8 netdev_tc = 0;
5635 int i;
5636 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5637
5638 if (!netdev)
5639 return;
5640
5641 if (!enabled_tc) {
5642 netdev_reset_tc(netdev);
5643 return;
5644 }
5645
5646
5647 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5648 return;
5649
5650
5651 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5652
5653
5654
5655
5656
5657
5658
5659 if (vsi->tc_config.enabled_tc & BIT(i))
5660 netdev_set_tc_queue(netdev,
5661 vsi->tc_config.tc_info[i].netdev_tc,
5662 vsi->tc_config.tc_info[i].qcount,
5663 vsi->tc_config.tc_info[i].qoffset);
5664 }
5665
5666 if (i40e_is_tc_mqprio_enabled(pf))
5667 return;
5668
5669
5670 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5671
5672 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5673
5674 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5675 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5676 }
5677 }
5678
5679
5680
5681
5682
5683
5684 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5685 struct i40e_vsi_context *ctxt)
5686 {
5687
5688
5689
5690
5691 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5692 memcpy(&vsi->info.queue_mapping,
5693 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5694 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5695 sizeof(vsi->info.tc_mapping));
5696 }
5697
5698
5699
5700
5701
5702
5703 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5704 {
5705 struct i40e_vsi_context ctxt = {};
5706 struct i40e_pf *pf;
5707 struct i40e_hw *hw;
5708 int ret;
5709
5710 if (!vsi)
5711 return I40E_ERR_PARAM;
5712 pf = vsi->back;
5713 hw = &pf->hw;
5714
5715 ctxt.seid = vsi->seid;
5716 ctxt.pf_num = hw->pf_id;
5717 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5718 ctxt.uplink_seid = vsi->uplink_seid;
5719 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5720 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5721 ctxt.info = vsi->info;
5722
5723 i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
5724 false);
5725 if (vsi->reconfig_rss) {
5726 vsi->rss_size = min_t(int, pf->alloc_rss_size,
5727 vsi->num_queue_pairs);
5728 ret = i40e_vsi_config_rss(vsi);
5729 if (ret) {
5730 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5731 return ret;
5732 }
5733 vsi->reconfig_rss = false;
5734 }
5735
5736 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5737 if (ret) {
5738 dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
5739 i40e_stat_str(hw, ret),
5740 i40e_aq_str(hw, hw->aq.asq_last_status));
5741 return ret;
5742 }
5743
5744 i40e_vsi_update_queue_map(vsi, &ctxt);
5745 vsi->info.valid_sections = 0;
5746
5747 return ret;
5748 }
5749
5750
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5764 {
5765 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5766 struct i40e_pf *pf = vsi->back;
5767 struct i40e_hw *hw = &pf->hw;
5768 struct i40e_vsi_context ctxt;
5769 int ret = 0;
5770 int i;
5771
5772
5773 if (vsi->tc_config.enabled_tc == enabled_tc &&
5774 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5775 return ret;
5776
5777
5778 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5779 if (enabled_tc & BIT(i))
5780 bw_share[i] = 1;
5781 }
5782
5783 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5784 if (ret) {
5785 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5786
5787 dev_info(&pf->pdev->dev,
5788 "Failed configuring TC map %d for VSI %d\n",
5789 enabled_tc, vsi->seid);
5790 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5791 &bw_config, NULL);
5792 if (ret) {
5793 dev_info(&pf->pdev->dev,
5794 "Failed querying vsi bw info, err %s aq_err %s\n",
5795 i40e_stat_str(hw, ret),
5796 i40e_aq_str(hw, hw->aq.asq_last_status));
5797 goto out;
5798 }
5799 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5800 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5801
5802 if (!valid_tc)
5803 valid_tc = bw_config.tc_valid_bits;
5804
5805 valid_tc |= 1;
5806 dev_info(&pf->pdev->dev,
5807 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5808 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5809 enabled_tc = valid_tc;
5810 }
5811
5812 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5813 if (ret) {
5814 dev_err(&pf->pdev->dev,
5815 "Unable to configure TC map %d for VSI %d\n",
5816 enabled_tc, vsi->seid);
5817 goto out;
5818 }
5819 }
5820
5821
5822 ctxt.seid = vsi->seid;
5823 ctxt.pf_num = vsi->back->hw.pf_id;
5824 ctxt.vf_num = 0;
5825 ctxt.uplink_seid = vsi->uplink_seid;
5826 ctxt.info = vsi->info;
5827 if (i40e_is_tc_mqprio_enabled(pf)) {
5828 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5829 if (ret)
5830 goto out;
5831 } else {
5832 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5833 }
5834
5835
5836
5837
5838 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5839 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5840 vsi->num_queue_pairs);
5841 ret = i40e_vsi_config_rss(vsi);
5842 if (ret) {
5843 dev_info(&vsi->back->pdev->dev,
5844 "Failed to reconfig rss for num_queues\n");
5845 return ret;
5846 }
5847 vsi->reconfig_rss = false;
5848 }
5849 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5850 ctxt.info.valid_sections |=
5851 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5852 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5853 }
5854
5855
5856
5857
5858 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5859 if (ret) {
5860 dev_info(&pf->pdev->dev,
5861 "Update vsi tc config failed, err %s aq_err %s\n",
5862 i40e_stat_str(hw, ret),
5863 i40e_aq_str(hw, hw->aq.asq_last_status));
5864 goto out;
5865 }
5866
5867 i40e_vsi_update_queue_map(vsi, &ctxt);
5868 vsi->info.valid_sections = 0;
5869
5870
5871 ret = i40e_vsi_get_bw_info(vsi);
5872 if (ret) {
5873 dev_info(&pf->pdev->dev,
5874 "Failed updating vsi bw info, err %s aq_err %s\n",
5875 i40e_stat_str(hw, ret),
5876 i40e_aq_str(hw, hw->aq.asq_last_status));
5877 goto out;
5878 }
5879
5880
5881 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5882 out:
5883 return ret;
5884 }
5885
5886
5887
5888
5889
5890
5891 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5892 {
5893 struct i40e_pf *pf = vsi->back;
5894
5895 switch (pf->hw.phy.link_info.link_speed) {
5896 case I40E_LINK_SPEED_40GB:
5897 return 40000;
5898 case I40E_LINK_SPEED_25GB:
5899 return 25000;
5900 case I40E_LINK_SPEED_20GB:
5901 return 20000;
5902 case I40E_LINK_SPEED_10GB:
5903 return 10000;
5904 case I40E_LINK_SPEED_1GB:
5905 return 1000;
5906 default:
5907 return -EINVAL;
5908 }
5909 }
5910
5911
5912
5913
5914
5915
5916
5917
5918 static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
5919 {
5920 if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
5921 dev_warn(&vsi->back->pdev->dev,
5922 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5923 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5924 } else {
5925 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
5926 }
5927
5928 return max_tx_rate;
5929 }
5930
5931
5932
5933
5934
5935
5936
5937
5938
5939 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5940 {
5941 struct i40e_pf *pf = vsi->back;
5942 u64 credits = 0;
5943 int speed = 0;
5944 int ret = 0;
5945
5946 speed = i40e_get_link_speed(vsi);
5947 if (max_tx_rate > speed) {
5948 dev_err(&pf->pdev->dev,
5949 "Invalid max tx rate %llu specified for VSI seid %d.",
5950 max_tx_rate, seid);
5951 return -EINVAL;
5952 }
5953 if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
5954 dev_warn(&pf->pdev->dev,
5955 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5956 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5957 }
5958
5959
5960 credits = max_tx_rate;
5961 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5962 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5963 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5964 if (ret)
5965 dev_err(&pf->pdev->dev,
5966 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5967 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5968 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5969 return ret;
5970 }
5971
5972
5973
5974
5975
5976
5977
5978 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5979 {
5980 enum i40e_admin_queue_err last_aq_status;
5981 struct i40e_cloud_filter *cfilter;
5982 struct i40e_channel *ch, *ch_tmp;
5983 struct i40e_pf *pf = vsi->back;
5984 struct hlist_node *node;
5985 int ret, i;
5986
5987
5988
5989
5990 vsi->current_rss_size = 0;
5991
5992
5993 if (list_empty(&vsi->ch_list))
5994 return;
5995
5996 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5997 struct i40e_vsi *p_vsi;
5998
5999 list_del(&ch->list);
6000 p_vsi = ch->parent_vsi;
6001 if (!p_vsi || !ch->initialized) {
6002 kfree(ch);
6003 continue;
6004 }
6005
6006 for (i = 0; i < ch->num_queue_pairs; i++) {
6007 struct i40e_ring *tx_ring, *rx_ring;
6008 u16 pf_q;
6009
6010 pf_q = ch->base_queue + i;
6011 tx_ring = vsi->tx_rings[pf_q];
6012 tx_ring->ch = NULL;
6013
6014 rx_ring = vsi->rx_rings[pf_q];
6015 rx_ring->ch = NULL;
6016 }
6017
6018
6019 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
6020 if (ret)
6021 dev_info(&vsi->back->pdev->dev,
6022 "Failed to reset tx rate for ch->seid %u\n",
6023 ch->seid);
6024
6025
6026 hlist_for_each_entry_safe(cfilter, node,
6027 &pf->cloud_filter_list, cloud_node) {
6028 if (cfilter->seid != ch->seid)
6029 continue;
6030
6031 hash_del(&cfilter->cloud_node);
6032 if (cfilter->dst_port)
6033 ret = i40e_add_del_cloud_filter_big_buf(vsi,
6034 cfilter,
6035 false);
6036 else
6037 ret = i40e_add_del_cloud_filter(vsi, cfilter,
6038 false);
6039 last_aq_status = pf->hw.aq.asq_last_status;
6040 if (ret)
6041 dev_info(&pf->pdev->dev,
6042 "Failed to delete cloud filter, err %s aq_err %s\n",
6043 i40e_stat_str(&pf->hw, ret),
6044 i40e_aq_str(&pf->hw, last_aq_status));
6045 kfree(cfilter);
6046 }
6047
6048
6049 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
6050 NULL);
6051 if (ret)
6052 dev_err(&vsi->back->pdev->dev,
6053 "unable to remove channel (%d) for parent VSI(%d)\n",
6054 ch->seid, p_vsi->seid);
6055 kfree(ch);
6056 }
6057 INIT_LIST_HEAD(&vsi->ch_list);
6058 }
6059
6060
6061
6062
6063
6064
6065
6066
6067 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
6068 {
6069 struct i40e_channel *ch, *ch_tmp;
6070 int max = 0;
6071
6072 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
6073 if (!ch->initialized)
6074 continue;
6075 if (ch->num_queue_pairs > max)
6076 max = ch->num_queue_pairs;
6077 }
6078
6079 return max;
6080 }
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
6094 struct i40e_vsi *vsi, bool *reconfig_rss)
6095 {
6096 int max_ch_queues;
6097
6098 if (!reconfig_rss)
6099 return -EINVAL;
6100
6101 *reconfig_rss = false;
6102 if (vsi->current_rss_size) {
6103 if (num_queues > vsi->current_rss_size) {
6104 dev_dbg(&pf->pdev->dev,
6105 "Error: num_queues (%d) > vsi's current_size(%d)\n",
6106 num_queues, vsi->current_rss_size);
6107 return -EINVAL;
6108 } else if ((num_queues < vsi->current_rss_size) &&
6109 (!is_power_of_2(num_queues))) {
6110 dev_dbg(&pf->pdev->dev,
6111 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
6112 num_queues, vsi->current_rss_size);
6113 return -EINVAL;
6114 }
6115 }
6116
6117 if (!is_power_of_2(num_queues)) {
6118
6119
6120
6121
6122
6123 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
6124 if (num_queues < max_ch_queues) {
6125 dev_dbg(&pf->pdev->dev,
6126 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
6127 num_queues, max_ch_queues);
6128 return -EINVAL;
6129 }
6130 *reconfig_rss = true;
6131 }
6132
6133 return 0;
6134 }
6135
6136
6137
6138
6139
6140
6141
6142
6143 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
6144 {
6145 struct i40e_pf *pf = vsi->back;
6146 u8 seed[I40E_HKEY_ARRAY_SIZE];
6147 struct i40e_hw *hw = &pf->hw;
6148 int local_rss_size;
6149 u8 *lut;
6150 int ret;
6151
6152 if (!vsi->rss_size)
6153 return -EINVAL;
6154
6155 if (rss_size > vsi->rss_size)
6156 return -EINVAL;
6157
6158 local_rss_size = min_t(int, vsi->rss_size, rss_size);
6159 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
6160 if (!lut)
6161 return -ENOMEM;
6162
6163
6164 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
6165
6166
6167
6168
6169 if (vsi->rss_hkey_user)
6170 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
6171 else
6172 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
6173
6174 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
6175 if (ret) {
6176 dev_info(&pf->pdev->dev,
6177 "Cannot set RSS lut, err %s aq_err %s\n",
6178 i40e_stat_str(hw, ret),
6179 i40e_aq_str(hw, hw->aq.asq_last_status));
6180 kfree(lut);
6181 return ret;
6182 }
6183 kfree(lut);
6184
6185
6186 if (!vsi->orig_rss_size)
6187 vsi->orig_rss_size = vsi->rss_size;
6188 vsi->current_rss_size = local_rss_size;
6189
6190 return ret;
6191 }
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
6202 struct i40e_vsi_context *ctxt,
6203 struct i40e_channel *ch)
6204 {
6205 u16 qcount, qmap, sections = 0;
6206 u8 offset = 0;
6207 int pow;
6208
6209 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
6210 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
6211
6212 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
6213 ch->num_queue_pairs = qcount;
6214
6215
6216 pow = ilog2(qcount);
6217 if (!is_power_of_2(qcount))
6218 pow++;
6219
6220 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
6221 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
6222
6223
6224 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
6225
6226 ctxt->info.up_enable_bits = 0x1;
6227 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
6228 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
6229 ctxt->info.valid_sections |= cpu_to_le16(sections);
6230 }
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
6241 struct i40e_channel *ch)
6242 {
6243 struct i40e_hw *hw = &pf->hw;
6244 struct i40e_vsi_context ctxt;
6245 u8 enabled_tc = 0x1;
6246 int ret;
6247
6248 if (ch->type != I40E_VSI_VMDQ2) {
6249 dev_info(&pf->pdev->dev,
6250 "add new vsi failed, ch->type %d\n", ch->type);
6251 return -EINVAL;
6252 }
6253
6254 memset(&ctxt, 0, sizeof(ctxt));
6255 ctxt.pf_num = hw->pf_id;
6256 ctxt.vf_num = 0;
6257 ctxt.uplink_seid = uplink_seid;
6258 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
6259 if (ch->type == I40E_VSI_VMDQ2)
6260 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6261
6262 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
6263 ctxt.info.valid_sections |=
6264 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6265 ctxt.info.switch_id =
6266 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6267 }
6268
6269
6270 i40e_channel_setup_queue_map(pf, &ctxt, ch);
6271
6272
6273 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6274 if (ret) {
6275 dev_info(&pf->pdev->dev,
6276 "add new vsi failed, err %s aq_err %s\n",
6277 i40e_stat_str(&pf->hw, ret),
6278 i40e_aq_str(&pf->hw,
6279 pf->hw.aq.asq_last_status));
6280 return -ENOENT;
6281 }
6282
6283
6284
6285
6286 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
6287 ch->seid = ctxt.seid;
6288 ch->vsi_number = ctxt.vsi_number;
6289 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
6290
6291
6292
6293
6294
6295 ch->info.mapping_flags = ctxt.info.mapping_flags;
6296 memcpy(&ch->info.queue_mapping,
6297 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
6298 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
6299 sizeof(ctxt.info.tc_mapping));
6300
6301 return 0;
6302 }
6303
6304 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
6305 u8 *bw_share)
6306 {
6307 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
6308 i40e_status ret;
6309 int i;
6310
6311 memset(&bw_data, 0, sizeof(bw_data));
6312 bw_data.tc_valid_bits = ch->enabled_tc;
6313 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6314 bw_data.tc_bw_credits[i] = bw_share[i];
6315
6316 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
6317 &bw_data, NULL);
6318 if (ret) {
6319 dev_info(&vsi->back->pdev->dev,
6320 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
6321 vsi->back->hw.aq.asq_last_status, ch->seid);
6322 return -EINVAL;
6323 }
6324
6325 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6326 ch->info.qs_handle[i] = bw_data.qs_handles[i];
6327
6328 return 0;
6329 }
6330
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6341 struct i40e_vsi *vsi,
6342 struct i40e_channel *ch)
6343 {
6344 i40e_status ret;
6345 int i;
6346 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6347
6348
6349 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6350 if (ch->enabled_tc & BIT(i))
6351 bw_share[i] = 1;
6352 }
6353
6354
6355 ret = i40e_channel_config_bw(vsi, ch, bw_share);
6356 if (ret) {
6357 dev_info(&vsi->back->pdev->dev,
6358 "Failed configuring TC map %d for channel (seid %u)\n",
6359 ch->enabled_tc, ch->seid);
6360 return ret;
6361 }
6362
6363 for (i = 0; i < ch->num_queue_pairs; i++) {
6364 struct i40e_ring *tx_ring, *rx_ring;
6365 u16 pf_q;
6366
6367 pf_q = ch->base_queue + i;
6368
6369
6370
6371
6372 tx_ring = vsi->tx_rings[pf_q];
6373 tx_ring->ch = ch;
6374
6375
6376 rx_ring = vsi->rx_rings[pf_q];
6377 rx_ring->ch = ch;
6378 }
6379
6380 return 0;
6381 }
6382
6383
6384
6385
6386
6387
6388
6389
6390
6391
6392
6393
6394 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6395 struct i40e_vsi *vsi,
6396 struct i40e_channel *ch,
6397 u16 uplink_seid, u8 type)
6398 {
6399 int ret;
6400
6401 ch->initialized = false;
6402 ch->base_queue = vsi->next_base_queue;
6403 ch->type = type;
6404
6405
6406 ret = i40e_add_channel(pf, uplink_seid, ch);
6407 if (ret) {
6408 dev_info(&pf->pdev->dev,
6409 "failed to add_channel using uplink_seid %u\n",
6410 uplink_seid);
6411 return ret;
6412 }
6413
6414
6415 ch->initialized = true;
6416
6417
6418 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6419 if (ret) {
6420 dev_info(&pf->pdev->dev,
6421 "failed to configure TX rings for channel %u\n",
6422 ch->seid);
6423 return ret;
6424 }
6425
6426
6427 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6428 dev_dbg(&pf->pdev->dev,
6429 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6430 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6431 ch->num_queue_pairs,
6432 vsi->next_base_queue);
6433 return ret;
6434 }
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6446 struct i40e_channel *ch)
6447 {
6448 u8 vsi_type;
6449 u16 seid;
6450 int ret;
6451
6452 if (vsi->type == I40E_VSI_MAIN) {
6453 vsi_type = I40E_VSI_VMDQ2;
6454 } else {
6455 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6456 vsi->type);
6457 return false;
6458 }
6459
6460
6461 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6462
6463
6464 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6465 if (ret) {
6466 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6467 return false;
6468 }
6469
6470 return ch->initialized ? true : false;
6471 }
6472
6473
6474
6475
6476
6477
6478
6479
6480 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6481 {
6482 u8 mode;
6483 struct i40e_pf *pf = vsi->back;
6484 struct i40e_hw *hw = &pf->hw;
6485 int ret;
6486
6487 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6488 if (ret)
6489 return -EINVAL;
6490
6491 if (hw->dev_caps.switch_mode) {
6492
6493
6494
6495 u32 switch_mode = hw->dev_caps.switch_mode &
6496 I40E_SWITCH_MODE_MASK;
6497 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6498 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6499 return 0;
6500 dev_err(&pf->pdev->dev,
6501 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6502 hw->dev_caps.switch_mode);
6503 return -EINVAL;
6504 }
6505 }
6506
6507
6508 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6509
6510
6511 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6512
6513
6514 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6515
6516
6517 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6518 pf->last_sw_conf_valid_flags,
6519 mode, NULL);
6520 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6521 dev_err(&pf->pdev->dev,
6522 "couldn't set switch config bits, err %s aq_err %s\n",
6523 i40e_stat_str(hw, ret),
6524 i40e_aq_str(hw,
6525 hw->aq.asq_last_status));
6526
6527 return ret;
6528 }
6529
6530
6531
6532
6533
6534
6535
6536
6537
6538 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6539 struct i40e_channel *ch)
6540 {
6541 struct i40e_pf *pf = vsi->back;
6542 bool reconfig_rss;
6543 int err;
6544
6545 if (!ch)
6546 return -EINVAL;
6547
6548 if (!ch->num_queue_pairs) {
6549 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6550 ch->num_queue_pairs);
6551 return -EINVAL;
6552 }
6553
6554
6555 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6556 &reconfig_rss);
6557 if (err) {
6558 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6559 ch->num_queue_pairs);
6560 return -EINVAL;
6561 }
6562
6563
6564
6565
6566
6567 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6568 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6569
6570 if (vsi->type == I40E_VSI_MAIN) {
6571 if (i40e_is_tc_mqprio_enabled(pf))
6572 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
6573 else
6574 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
6575 }
6576
6577
6578
6579 }
6580
6581
6582
6583
6584 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6585 dev_dbg(&pf->pdev->dev,
6586 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6587 vsi->cnt_q_avail, ch->num_queue_pairs);
6588 return -EINVAL;
6589 }
6590
6591
6592 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6593 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6594 if (err) {
6595 dev_info(&pf->pdev->dev,
6596 "Error: unable to reconfig rss for num_queues (%u)\n",
6597 ch->num_queue_pairs);
6598 return -EINVAL;
6599 }
6600 }
6601
6602 if (!i40e_setup_channel(pf, vsi, ch)) {
6603 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6604 return -EINVAL;
6605 }
6606
6607 dev_info(&pf->pdev->dev,
6608 "Setup channel (id:%u) utilizing num_queues %d\n",
6609 ch->seid, ch->num_queue_pairs);
6610
6611
6612 if (ch->max_tx_rate) {
6613 u64 credits = ch->max_tx_rate;
6614
6615 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6616 return -EINVAL;
6617
6618 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6619 dev_dbg(&pf->pdev->dev,
6620 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6621 ch->max_tx_rate,
6622 credits,
6623 ch->seid);
6624 }
6625
6626
6627 ch->parent_vsi = vsi;
6628
6629
6630 vsi->cnt_q_avail -= ch->num_queue_pairs;
6631
6632 return 0;
6633 }
6634
6635
6636
6637
6638
6639
6640
6641 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6642 {
6643 struct i40e_channel *ch;
6644 u64 max_rate = 0;
6645 int ret = 0, i;
6646
6647
6648 vsi->tc_seid_map[0] = vsi->seid;
6649 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6650 if (vsi->tc_config.enabled_tc & BIT(i)) {
6651 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6652 if (!ch) {
6653 ret = -ENOMEM;
6654 goto err_free;
6655 }
6656
6657 INIT_LIST_HEAD(&ch->list);
6658 ch->num_queue_pairs =
6659 vsi->tc_config.tc_info[i].qcount;
6660 ch->base_queue =
6661 vsi->tc_config.tc_info[i].qoffset;
6662
6663
6664
6665
6666 max_rate = vsi->mqprio_qopt.max_rate[i];
6667 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6668 ch->max_tx_rate = max_rate;
6669
6670 list_add_tail(&ch->list, &vsi->ch_list);
6671
6672 ret = i40e_create_queue_channel(vsi, ch);
6673 if (ret) {
6674 dev_err(&vsi->back->pdev->dev,
6675 "Failed creating queue channel with TC%d: queues %d\n",
6676 i, ch->num_queue_pairs);
6677 goto err_free;
6678 }
6679 vsi->tc_seid_map[i] = ch->seid;
6680 }
6681 }
6682
6683
6684 i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
6685 return ret;
6686
6687 err_free:
6688 i40e_remove_queue_channels(vsi);
6689 return ret;
6690 }
6691
6692
6693
6694
6695
6696
6697
6698
6699 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6700 {
6701 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6702 struct i40e_pf *pf = veb->pf;
6703 int ret = 0;
6704 int i;
6705
6706
6707 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6708 return ret;
6709
6710 bw_data.tc_valid_bits = enabled_tc;
6711
6712
6713
6714 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6715 if (enabled_tc & BIT(i))
6716 bw_data.tc_bw_share_credits[i] = 1;
6717 }
6718
6719 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6720 &bw_data, NULL);
6721 if (ret) {
6722 dev_info(&pf->pdev->dev,
6723 "VEB bw config failed, err %s aq_err %s\n",
6724 i40e_stat_str(&pf->hw, ret),
6725 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6726 goto out;
6727 }
6728
6729
6730 ret = i40e_veb_get_bw_info(veb);
6731 if (ret) {
6732 dev_info(&pf->pdev->dev,
6733 "Failed getting veb bw config, err %s aq_err %s\n",
6734 i40e_stat_str(&pf->hw, ret),
6735 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6736 }
6737
6738 out:
6739 return ret;
6740 }
6741
6742 #ifdef CONFIG_I40E_DCB
6743
6744
6745
6746
6747
6748
6749
6750
6751 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6752 {
6753 u8 tc_map = 0;
6754 int ret;
6755 u8 v;
6756
6757
6758 tc_map = i40e_pf_get_tc_map(pf);
6759 if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6760 return;
6761
6762 for (v = 0; v < I40E_MAX_VEB; v++) {
6763 if (!pf->veb[v])
6764 continue;
6765 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6766 if (ret) {
6767 dev_info(&pf->pdev->dev,
6768 "Failed configuring TC for VEB seid=%d\n",
6769 pf->veb[v]->seid);
6770
6771 }
6772 }
6773
6774
6775 for (v = 0; v < pf->num_alloc_vsi; v++) {
6776 if (!pf->vsi[v])
6777 continue;
6778
6779
6780
6781
6782 if (v == pf->lan_vsi)
6783 tc_map = i40e_pf_get_tc_map(pf);
6784 else
6785 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6786
6787 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6788 if (ret) {
6789 dev_info(&pf->pdev->dev,
6790 "Failed configuring TC for VSI seid=%d\n",
6791 pf->vsi[v]->seid);
6792
6793 } else {
6794
6795 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6796 if (pf->vsi[v]->netdev)
6797 i40e_dcbnl_set_all(pf->vsi[v]);
6798 }
6799 }
6800 }
6801
6802
6803
6804
6805
6806
6807
6808
6809 static int i40e_resume_port_tx(struct i40e_pf *pf)
6810 {
6811 struct i40e_hw *hw = &pf->hw;
6812 int ret;
6813
6814 ret = i40e_aq_resume_port_tx(hw, NULL);
6815 if (ret) {
6816 dev_info(&pf->pdev->dev,
6817 "Resume Port Tx failed, err %s aq_err %s\n",
6818 i40e_stat_str(&pf->hw, ret),
6819 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6820
6821 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6822 i40e_service_event_schedule(pf);
6823 }
6824
6825 return ret;
6826 }
6827
6828
6829
6830
6831
6832
6833
6834 static int i40e_suspend_port_tx(struct i40e_pf *pf)
6835 {
6836 struct i40e_hw *hw = &pf->hw;
6837 int ret;
6838
6839 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6840 if (ret) {
6841 dev_info(&pf->pdev->dev,
6842 "Suspend Port Tx failed, err %s aq_err %s\n",
6843 i40e_stat_str(&pf->hw, ret),
6844 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6845
6846 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6847 i40e_service_event_schedule(pf);
6848 }
6849
6850 return ret;
6851 }
6852
6853
6854
6855
6856
6857
6858
6859
6860
6861 static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6862 struct i40e_dcbx_config *new_cfg)
6863 {
6864 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6865 int ret;
6866
6867
6868 if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6869 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6870 return 0;
6871 }
6872
6873
6874 i40e_pf_quiesce_all_vsi(pf);
6875
6876
6877 *old_cfg = *new_cfg;
6878 old_cfg->etsrec = old_cfg->etscfg;
6879 ret = i40e_set_dcb_config(&pf->hw);
6880 if (ret) {
6881 dev_info(&pf->pdev->dev,
6882 "Set DCB Config failed, err %s aq_err %s\n",
6883 i40e_stat_str(&pf->hw, ret),
6884 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6885 goto out;
6886 }
6887
6888
6889 i40e_dcb_reconfigure(pf);
6890 out:
6891
6892 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6893
6894 ret = i40e_resume_port_tx(pf);
6895
6896 if (ret)
6897 goto err;
6898 i40e_pf_unquiesce_all_vsi(pf);
6899 }
6900 err:
6901 return ret;
6902 }
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6913 {
6914 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6915 u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6916 u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6917 struct i40e_dcbx_config *old_cfg;
6918 u8 mode[I40E_MAX_TRAFFIC_CLASS];
6919 struct i40e_rx_pb_config pb_cfg;
6920 struct i40e_hw *hw = &pf->hw;
6921 u8 num_ports = hw->num_ports;
6922 bool need_reconfig;
6923 int ret = -EINVAL;
6924 u8 lltc_map = 0;
6925 u8 tc_map = 0;
6926 u8 new_numtc;
6927 u8 i;
6928
6929 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6930
6931
6932
6933
6934
6935
6936
6937
6938
6939
6940
6941
6942
6943
6944
6945 new_numtc = i40e_dcb_get_num_tc(new_cfg);
6946
6947 memset(&ets_data, 0, sizeof(ets_data));
6948 for (i = 0; i < new_numtc; i++) {
6949 tc_map |= BIT(i);
6950 switch (new_cfg->etscfg.tsatable[i]) {
6951 case I40E_IEEE_TSA_ETS:
6952 prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6953 ets_data.tc_bw_share_credits[i] =
6954 new_cfg->etscfg.tcbwtable[i];
6955 break;
6956 case I40E_IEEE_TSA_STRICT:
6957 prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6958 lltc_map |= BIT(i);
6959 ets_data.tc_bw_share_credits[i] =
6960 I40E_DCB_STRICT_PRIO_CREDITS;
6961 break;
6962 default:
6963
6964 need_reconfig = false;
6965 goto out;
6966 }
6967 }
6968
6969 old_cfg = &hw->local_dcbx_config;
6970
6971 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6972
6973
6974
6975
6976 if (need_reconfig) {
6977
6978 if (new_numtc > 1)
6979 pf->flags |= I40E_FLAG_DCB_ENABLED;
6980 else
6981 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6982
6983 set_bit(__I40E_PORT_SUSPENDED, pf->state);
6984
6985 i40e_pf_quiesce_all_vsi(pf);
6986 ret = i40e_suspend_port_tx(pf);
6987 if (ret)
6988 goto err;
6989 }
6990
6991
6992 ets_data.tc_valid_bits = tc_map;
6993 ets_data.tc_strict_priority_flags = lltc_map;
6994 ret = i40e_aq_config_switch_comp_ets
6995 (hw, pf->mac_seid, &ets_data,
6996 i40e_aqc_opc_modify_switching_comp_ets, NULL);
6997 if (ret) {
6998 dev_info(&pf->pdev->dev,
6999 "Modify Port ETS failed, err %s aq_err %s\n",
7000 i40e_stat_str(&pf->hw, ret),
7001 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7002 goto out;
7003 }
7004
7005
7006 memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
7007 i40e_dcb_hw_set_num_tc(hw, new_numtc);
7008 i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
7009 I40E_DCB_ARB_MODE_STRICT_PRIORITY,
7010 I40E_DCB_DEFAULT_MAX_EXPONENT,
7011 lltc_map);
7012 i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
7013 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
7014 prio_type);
7015 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
7016 new_cfg->etscfg.prioritytable);
7017 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
7018
7019
7020 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7021 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
7022 mfs_tc[i] += I40E_PACKET_HDR_PAD;
7023 }
7024
7025 i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
7026 false, new_cfg->pfc.pfcenable,
7027 mfs_tc, &pb_cfg);
7028 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
7029
7030
7031 pf->pb_cfg = pb_cfg;
7032
7033
7034 ret = i40e_aq_dcb_updated(&pf->hw, NULL);
7035 if (ret) {
7036 dev_info(&pf->pdev->dev,
7037 "DCB Updated failed, err %s aq_err %s\n",
7038 i40e_stat_str(&pf->hw, ret),
7039 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7040 goto out;
7041 }
7042
7043
7044 *old_cfg = *new_cfg;
7045
7046
7047 i40e_dcb_reconfigure(pf);
7048 out:
7049
7050 if (need_reconfig) {
7051 ret = i40e_resume_port_tx(pf);
7052
7053 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
7054
7055 if (ret)
7056 goto err;
7057
7058
7059 ret = i40e_pf_wait_queues_disabled(pf);
7060 if (ret) {
7061
7062 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
7063 i40e_service_event_schedule(pf);
7064 goto err;
7065 } else {
7066 i40e_pf_unquiesce_all_vsi(pf);
7067 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7068 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
7069 }
7070
7071 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
7072 ret = i40e_hw_set_dcb_config(pf, new_cfg);
7073 }
7074
7075 err:
7076 return ret;
7077 }
7078
7079
7080
7081
7082
7083
7084
7085 int i40e_dcb_sw_default_config(struct i40e_pf *pf)
7086 {
7087 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
7088 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
7089 struct i40e_hw *hw = &pf->hw;
7090 int err;
7091
7092 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
7093
7094 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
7095 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7096 pf->tmp_cfg.etscfg.maxtcs = 0;
7097 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7098 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
7099 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
7100 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
7101
7102 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
7103 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
7104 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
7105 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
7106
7107 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
7108 }
7109
7110 memset(&ets_data, 0, sizeof(ets_data));
7111 ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS;
7112 ets_data.tc_strict_priority_flags = 0;
7113 ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7114
7115
7116 err = i40e_aq_config_switch_comp_ets
7117 (hw, pf->mac_seid, &ets_data,
7118 i40e_aqc_opc_enable_switching_comp_ets, NULL);
7119 if (err) {
7120 dev_info(&pf->pdev->dev,
7121 "Enable Port ETS failed, err %s aq_err %s\n",
7122 i40e_stat_str(&pf->hw, err),
7123 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7124 err = -ENOENT;
7125 goto out;
7126 }
7127
7128
7129 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7130 dcb_cfg->etscfg.cbs = 0;
7131 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
7132 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7133
7134 out:
7135 return err;
7136 }
7137
7138
7139
7140
7141
7142
7143
7144
7145 static int i40e_init_pf_dcb(struct i40e_pf *pf)
7146 {
7147 struct i40e_hw *hw = &pf->hw;
7148 int err;
7149
7150
7151
7152
7153 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
7154 dev_info(&pf->pdev->dev, "DCB is not supported.\n");
7155 err = I40E_NOT_SUPPORTED;
7156 goto out;
7157 }
7158 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
7159 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
7160 err = i40e_dcb_sw_default_config(pf);
7161 if (err) {
7162 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
7163 goto out;
7164 }
7165 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
7166 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
7167 DCB_CAP_DCBX_VER_IEEE;
7168
7169 pf->flags |= I40E_FLAG_DCB_CAPABLE;
7170 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7171 goto out;
7172 }
7173 err = i40e_init_dcb(hw, true);
7174 if (!err) {
7175
7176 if ((!hw->func_caps.dcb) ||
7177 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
7178 dev_info(&pf->pdev->dev,
7179 "DCBX offload is not supported or is disabled for this PF.\n");
7180 } else {
7181
7182 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
7183 DCB_CAP_DCBX_VER_IEEE;
7184
7185 pf->flags |= I40E_FLAG_DCB_CAPABLE;
7186
7187
7188
7189 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
7190 pf->flags |= I40E_FLAG_DCB_ENABLED;
7191 else
7192 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7193 dev_dbg(&pf->pdev->dev,
7194 "DCBX offload is supported for this PF.\n");
7195 }
7196 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
7197 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
7198 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
7199 } else {
7200 dev_info(&pf->pdev->dev,
7201 "Query for DCB configuration failed, err %s aq_err %s\n",
7202 i40e_stat_str(&pf->hw, err),
7203 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7204 }
7205
7206 out:
7207 return err;
7208 }
7209 #endif
7210
7211
7212
7213
7214
7215
7216 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
7217 {
7218 enum i40e_aq_link_speed new_speed;
7219 struct i40e_pf *pf = vsi->back;
7220 char *speed = "Unknown";
7221 char *fc = "Unknown";
7222 char *fec = "";
7223 char *req_fec = "";
7224 char *an = "";
7225
7226 if (isup)
7227 new_speed = pf->hw.phy.link_info.link_speed;
7228 else
7229 new_speed = I40E_LINK_SPEED_UNKNOWN;
7230
7231 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
7232 return;
7233 vsi->current_isup = isup;
7234 vsi->current_speed = new_speed;
7235 if (!isup) {
7236 netdev_info(vsi->netdev, "NIC Link is Down\n");
7237 return;
7238 }
7239
7240
7241
7242
7243 if (pf->hw.func_caps.npar_enable &&
7244 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
7245 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
7246 netdev_warn(vsi->netdev,
7247 "The partition detected link speed that is less than 10Gbps\n");
7248
7249 switch (pf->hw.phy.link_info.link_speed) {
7250 case I40E_LINK_SPEED_40GB:
7251 speed = "40 G";
7252 break;
7253 case I40E_LINK_SPEED_20GB:
7254 speed = "20 G";
7255 break;
7256 case I40E_LINK_SPEED_25GB:
7257 speed = "25 G";
7258 break;
7259 case I40E_LINK_SPEED_10GB:
7260 speed = "10 G";
7261 break;
7262 case I40E_LINK_SPEED_5GB:
7263 speed = "5 G";
7264 break;
7265 case I40E_LINK_SPEED_2_5GB:
7266 speed = "2.5 G";
7267 break;
7268 case I40E_LINK_SPEED_1GB:
7269 speed = "1000 M";
7270 break;
7271 case I40E_LINK_SPEED_100MB:
7272 speed = "100 M";
7273 break;
7274 default:
7275 break;
7276 }
7277
7278 switch (pf->hw.fc.current_mode) {
7279 case I40E_FC_FULL:
7280 fc = "RX/TX";
7281 break;
7282 case I40E_FC_TX_PAUSE:
7283 fc = "TX";
7284 break;
7285 case I40E_FC_RX_PAUSE:
7286 fc = "RX";
7287 break;
7288 default:
7289 fc = "None";
7290 break;
7291 }
7292
7293 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
7294 req_fec = "None";
7295 fec = "None";
7296 an = "False";
7297
7298 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7299 an = "True";
7300
7301 if (pf->hw.phy.link_info.fec_info &
7302 I40E_AQ_CONFIG_FEC_KR_ENA)
7303 fec = "CL74 FC-FEC/BASE-R";
7304 else if (pf->hw.phy.link_info.fec_info &
7305 I40E_AQ_CONFIG_FEC_RS_ENA)
7306 fec = "CL108 RS-FEC";
7307
7308
7309
7310
7311 if (vsi->back->hw.phy.link_info.req_fec_info &
7312 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7313 if (vsi->back->hw.phy.link_info.req_fec_info &
7314 I40E_AQ_REQUEST_FEC_RS)
7315 req_fec = "CL108 RS-FEC";
7316 else
7317 req_fec = "CL74 FC-FEC/BASE-R";
7318 }
7319 netdev_info(vsi->netdev,
7320 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7321 speed, req_fec, fec, an, fc);
7322 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7323 req_fec = "None";
7324 fec = "None";
7325 an = "False";
7326
7327 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7328 an = "True";
7329
7330 if (pf->hw.phy.link_info.fec_info &
7331 I40E_AQ_CONFIG_FEC_KR_ENA)
7332 fec = "CL74 FC-FEC/BASE-R";
7333
7334 if (pf->hw.phy.link_info.req_fec_info &
7335 I40E_AQ_REQUEST_FEC_KR)
7336 req_fec = "CL74 FC-FEC/BASE-R";
7337
7338 netdev_info(vsi->netdev,
7339 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7340 speed, req_fec, fec, an, fc);
7341 } else {
7342 netdev_info(vsi->netdev,
7343 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7344 speed, fc);
7345 }
7346
7347 }
7348
7349
7350
7351
7352
7353 static int i40e_up_complete(struct i40e_vsi *vsi)
7354 {
7355 struct i40e_pf *pf = vsi->back;
7356 int err;
7357
7358 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7359 i40e_vsi_configure_msix(vsi);
7360 else
7361 i40e_configure_msi_and_legacy(vsi);
7362
7363
7364 err = i40e_vsi_start_rings(vsi);
7365 if (err)
7366 return err;
7367
7368 clear_bit(__I40E_VSI_DOWN, vsi->state);
7369 i40e_napi_enable_all(vsi);
7370 i40e_vsi_enable_irq(vsi);
7371
7372 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7373 (vsi->netdev)) {
7374 i40e_print_link_message(vsi, true);
7375 netif_tx_start_all_queues(vsi->netdev);
7376 netif_carrier_on(vsi->netdev);
7377 }
7378
7379
7380 if (vsi->type == I40E_VSI_FDIR) {
7381
7382 pf->fd_add_err = 0;
7383 pf->fd_atr_cnt = 0;
7384 i40e_fdir_filter_restore(vsi);
7385 }
7386
7387
7388
7389
7390 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7391 i40e_service_event_schedule(pf);
7392
7393 return 0;
7394 }
7395
7396
7397
7398
7399
7400
7401
7402
7403 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7404 {
7405 struct i40e_pf *pf = vsi->back;
7406
7407 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7408 usleep_range(1000, 2000);
7409 i40e_down(vsi);
7410
7411 i40e_up(vsi);
7412 clear_bit(__I40E_CONFIG_BUSY, pf->state);
7413 }
7414
7415
7416
7417
7418
7419
7420 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7421 {
7422 struct i40e_aq_get_phy_abilities_resp abilities;
7423 struct i40e_aq_set_phy_config config = {0};
7424 bool non_zero_phy_type = is_up;
7425 struct i40e_hw *hw = &pf->hw;
7426 i40e_status err;
7427 u64 mask;
7428 u8 speed;
7429
7430
7431
7432
7433
7434
7435
7436 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7437 NULL);
7438 if (err) {
7439 dev_err(&pf->pdev->dev,
7440 "failed to get phy cap., ret = %s last_status = %s\n",
7441 i40e_stat_str(hw, err),
7442 i40e_aq_str(hw, hw->aq.asq_last_status));
7443 return err;
7444 }
7445 speed = abilities.link_speed;
7446
7447
7448 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7449 NULL);
7450 if (err) {
7451 dev_err(&pf->pdev->dev,
7452 "failed to get phy cap., ret = %s last_status = %s\n",
7453 i40e_stat_str(hw, err),
7454 i40e_aq_str(hw, hw->aq.asq_last_status));
7455 return err;
7456 }
7457
7458
7459
7460
7461
7462 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
7463 non_zero_phy_type = true;
7464 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7465 return I40E_SUCCESS;
7466
7467
7468
7469
7470
7471 mask = I40E_PHY_TYPES_BITMASK;
7472 config.phy_type =
7473 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7474 config.phy_type_ext =
7475 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7476
7477 config.abilities = abilities.abilities;
7478 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
7479 if (is_up)
7480 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7481 else
7482 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7483 }
7484 if (abilities.link_speed != 0)
7485 config.link_speed = abilities.link_speed;
7486 else
7487 config.link_speed = speed;
7488 config.eee_capability = abilities.eee_capability;
7489 config.eeer = abilities.eeer_val;
7490 config.low_power_ctrl = abilities.d3_lpan;
7491 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7492 I40E_AQ_PHY_FEC_CONFIG_MASK;
7493 err = i40e_aq_set_phy_config(hw, &config, NULL);
7494
7495 if (err) {
7496 dev_err(&pf->pdev->dev,
7497 "set phy config ret = %s last_status = %s\n",
7498 i40e_stat_str(&pf->hw, err),
7499 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7500 return err;
7501 }
7502
7503
7504 err = i40e_update_link_info(hw);
7505 if (err) {
7506
7507
7508
7509
7510 msleep(1000);
7511 i40e_update_link_info(hw);
7512 }
7513
7514 i40e_aq_set_link_restart_an(hw, is_up, NULL);
7515
7516 return I40E_SUCCESS;
7517 }
7518
7519
7520
7521
7522
7523 int i40e_up(struct i40e_vsi *vsi)
7524 {
7525 int err;
7526
7527 if (vsi->type == I40E_VSI_MAIN &&
7528 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7529 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7530 i40e_force_link_state(vsi->back, true);
7531
7532 err = i40e_vsi_configure(vsi);
7533 if (!err)
7534 err = i40e_up_complete(vsi);
7535
7536 return err;
7537 }
7538
7539
7540
7541
7542
7543 void i40e_down(struct i40e_vsi *vsi)
7544 {
7545 int i;
7546
7547
7548
7549
7550 if (vsi->netdev) {
7551 netif_carrier_off(vsi->netdev);
7552 netif_tx_disable(vsi->netdev);
7553 }
7554 i40e_vsi_disable_irq(vsi);
7555 i40e_vsi_stop_rings(vsi);
7556 if (vsi->type == I40E_VSI_MAIN &&
7557 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7558 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7559 i40e_force_link_state(vsi->back, false);
7560 i40e_napi_disable_all(vsi);
7561
7562 for (i = 0; i < vsi->num_queue_pairs; i++) {
7563 i40e_clean_tx_ring(vsi->tx_rings[i]);
7564 if (i40e_enabled_xdp_vsi(vsi)) {
7565
7566
7567
7568 synchronize_rcu();
7569 i40e_clean_tx_ring(vsi->xdp_rings[i]);
7570 }
7571 i40e_clean_rx_ring(vsi->rx_rings[i]);
7572 }
7573
7574 }
7575
7576
7577
7578
7579
7580
7581 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7582 struct tc_mqprio_qopt_offload *mqprio_qopt)
7583 {
7584 u64 sum_max_rate = 0;
7585 u64 max_rate = 0;
7586 int i;
7587
7588 if (mqprio_qopt->qopt.offset[0] != 0 ||
7589 mqprio_qopt->qopt.num_tc < 1 ||
7590 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7591 return -EINVAL;
7592 for (i = 0; ; i++) {
7593 if (!mqprio_qopt->qopt.count[i])
7594 return -EINVAL;
7595 if (mqprio_qopt->min_rate[i]) {
7596 dev_err(&vsi->back->pdev->dev,
7597 "Invalid min tx rate (greater than 0) specified\n");
7598 return -EINVAL;
7599 }
7600 max_rate = mqprio_qopt->max_rate[i];
7601 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7602 sum_max_rate += max_rate;
7603
7604 if (i >= mqprio_qopt->qopt.num_tc - 1)
7605 break;
7606 if (mqprio_qopt->qopt.offset[i + 1] !=
7607 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7608 return -EINVAL;
7609 }
7610 if (vsi->num_queue_pairs <
7611 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7612 dev_err(&vsi->back->pdev->dev,
7613 "Failed to create traffic channel, insufficient number of queues.\n");
7614 return -EINVAL;
7615 }
7616 if (sum_max_rate > i40e_get_link_speed(vsi)) {
7617 dev_err(&vsi->back->pdev->dev,
7618 "Invalid max tx rate specified\n");
7619 return -EINVAL;
7620 }
7621 return 0;
7622 }
7623
7624
7625
7626
7627
7628 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7629 {
7630 u16 qcount;
7631 int i;
7632
7633
7634 vsi->tc_config.numtc = 1;
7635 vsi->tc_config.enabled_tc = 1;
7636 qcount = min_t(int, vsi->alloc_queue_pairs,
7637 i40e_pf_get_max_q_per_tc(vsi->back));
7638 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7639
7640
7641
7642 vsi->tc_config.tc_info[i].qoffset = 0;
7643 if (i == 0)
7644 vsi->tc_config.tc_info[i].qcount = qcount;
7645 else
7646 vsi->tc_config.tc_info[i].qcount = 1;
7647 vsi->tc_config.tc_info[i].netdev_tc = 0;
7648 }
7649 }
7650
7651
7652
7653
7654
7655
7656
7657
7658
7659
7660
7661 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7662 const u8 *macaddr, int *aq_err)
7663 {
7664 struct i40e_aqc_remove_macvlan_element_data element;
7665 i40e_status status;
7666
7667 memset(&element, 0, sizeof(element));
7668 ether_addr_copy(element.mac_addr, macaddr);
7669 element.vlan_tag = 0;
7670 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7671 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7672 *aq_err = hw->aq.asq_last_status;
7673
7674 return status;
7675 }
7676
7677
7678
7679
7680
7681
7682
7683
7684
7685
7686
7687 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7688 const u8 *macaddr, int *aq_err)
7689 {
7690 struct i40e_aqc_add_macvlan_element_data element;
7691 i40e_status status;
7692 u16 cmd_flags = 0;
7693
7694 ether_addr_copy(element.mac_addr, macaddr);
7695 element.vlan_tag = 0;
7696 element.queue_number = 0;
7697 element.match_method = I40E_AQC_MM_ERR_NO_RES;
7698 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7699 element.flags = cpu_to_le16(cmd_flags);
7700 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7701 *aq_err = hw->aq.asq_last_status;
7702
7703 return status;
7704 }
7705
7706
7707
7708
7709
7710
7711 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7712 {
7713 struct i40e_ring *tx_ring, *rx_ring;
7714 u16 pf_q;
7715 int i;
7716
7717 for (i = 0; i < ch->num_queue_pairs; i++) {
7718 pf_q = ch->base_queue + i;
7719 tx_ring = vsi->tx_rings[pf_q];
7720 tx_ring->ch = NULL;
7721 rx_ring = vsi->rx_rings[pf_q];
7722 rx_ring->ch = NULL;
7723 }
7724 }
7725
7726
7727
7728
7729
7730
7731
7732
7733
7734 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7735 {
7736 struct i40e_channel *ch, *ch_tmp;
7737 int ret;
7738
7739 if (list_empty(&vsi->macvlan_list))
7740 return;
7741
7742 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7743 struct i40e_vsi *parent_vsi;
7744
7745 if (i40e_is_channel_macvlan(ch)) {
7746 i40e_reset_ch_rings(vsi, ch);
7747 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7748 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7749 netdev_set_sb_channel(ch->fwd->netdev, 0);
7750 kfree(ch->fwd);
7751 ch->fwd = NULL;
7752 }
7753
7754 list_del(&ch->list);
7755 parent_vsi = ch->parent_vsi;
7756 if (!parent_vsi || !ch->initialized) {
7757 kfree(ch);
7758 continue;
7759 }
7760
7761
7762 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7763 NULL);
7764 if (ret)
7765 dev_err(&vsi->back->pdev->dev,
7766 "unable to remove channel (%d) for parent VSI(%d)\n",
7767 ch->seid, parent_vsi->seid);
7768 kfree(ch);
7769 }
7770 vsi->macvlan_cnt = 0;
7771 }
7772
7773
7774
7775
7776
7777
7778
7779 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7780 struct i40e_fwd_adapter *fwd)
7781 {
7782 struct i40e_channel *ch = NULL, *ch_tmp, *iter;
7783 int ret = 0, num_tc = 1, i, aq_err;
7784 struct i40e_pf *pf = vsi->back;
7785 struct i40e_hw *hw = &pf->hw;
7786
7787
7788 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
7789 if (!i40e_is_channel_macvlan(iter)) {
7790 iter->fwd = fwd;
7791
7792 for (i = 0; i < num_tc; i++)
7793 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7794 i,
7795 iter->num_queue_pairs,
7796 iter->base_queue);
7797 for (i = 0; i < iter->num_queue_pairs; i++) {
7798 struct i40e_ring *tx_ring, *rx_ring;
7799 u16 pf_q;
7800
7801 pf_q = iter->base_queue + i;
7802
7803
7804 tx_ring = vsi->tx_rings[pf_q];
7805 tx_ring->ch = iter;
7806
7807
7808 rx_ring = vsi->rx_rings[pf_q];
7809 rx_ring->ch = iter;
7810 }
7811 ch = iter;
7812 break;
7813 }
7814 }
7815
7816 if (!ch)
7817 return -EINVAL;
7818
7819
7820
7821
7822 wmb();
7823
7824
7825 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7826 if (ret) {
7827
7828 macvlan_release_l2fw_offload(vdev);
7829 for (i = 0; i < ch->num_queue_pairs; i++) {
7830 struct i40e_ring *rx_ring;
7831 u16 pf_q;
7832
7833 pf_q = ch->base_queue + i;
7834 rx_ring = vsi->rx_rings[pf_q];
7835 rx_ring->netdev = NULL;
7836 }
7837 dev_info(&pf->pdev->dev,
7838 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7839 i40e_stat_str(hw, ret),
7840 i40e_aq_str(hw, aq_err));
7841 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7842 }
7843
7844 return ret;
7845 }
7846
7847
7848
7849
7850
7851
7852
7853
7854 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7855 struct net_device *vdev)
7856 {
7857 struct i40e_pf *pf = vsi->back;
7858 struct i40e_hw *hw = &pf->hw;
7859 struct i40e_vsi_context ctxt;
7860 u16 sections, qmap, num_qps;
7861 struct i40e_channel *ch;
7862 int i, pow, ret = 0;
7863 u8 offset = 0;
7864
7865 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7866 return -EINVAL;
7867
7868 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7869
7870
7871 pow = fls(roundup_pow_of_two(num_qps) - 1);
7872
7873 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7874 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7875
7876
7877 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7878 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7879 memset(&ctxt, 0, sizeof(ctxt));
7880 ctxt.seid = vsi->seid;
7881 ctxt.pf_num = vsi->back->hw.pf_id;
7882 ctxt.vf_num = 0;
7883 ctxt.uplink_seid = vsi->uplink_seid;
7884 ctxt.info = vsi->info;
7885 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7886 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7887 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7888 ctxt.info.valid_sections |= cpu_to_le16(sections);
7889
7890
7891 vsi->rss_size = max_t(u16, num_qps, qcnt);
7892 ret = i40e_vsi_config_rss(vsi);
7893 if (ret) {
7894 dev_info(&pf->pdev->dev,
7895 "Failed to reconfig RSS for num_queues (%u)\n",
7896 vsi->rss_size);
7897 return ret;
7898 }
7899 vsi->reconfig_rss = true;
7900 dev_dbg(&vsi->back->pdev->dev,
7901 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7902 vsi->next_base_queue = num_qps;
7903 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7904
7905
7906
7907
7908 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7909 if (ret) {
7910 dev_info(&pf->pdev->dev,
7911 "Update vsi tc config failed, err %s aq_err %s\n",
7912 i40e_stat_str(hw, ret),
7913 i40e_aq_str(hw, hw->aq.asq_last_status));
7914 return ret;
7915 }
7916
7917 i40e_vsi_update_queue_map(vsi, &ctxt);
7918 vsi->info.valid_sections = 0;
7919
7920
7921 INIT_LIST_HEAD(&vsi->macvlan_list);
7922 for (i = 0; i < macvlan_cnt; i++) {
7923 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7924 if (!ch) {
7925 ret = -ENOMEM;
7926 goto err_free;
7927 }
7928 INIT_LIST_HEAD(&ch->list);
7929 ch->num_queue_pairs = qcnt;
7930 if (!i40e_setup_channel(pf, vsi, ch)) {
7931 ret = -EINVAL;
7932 kfree(ch);
7933 goto err_free;
7934 }
7935 ch->parent_vsi = vsi;
7936 vsi->cnt_q_avail -= ch->num_queue_pairs;
7937 vsi->macvlan_cnt++;
7938 list_add_tail(&ch->list, &vsi->macvlan_list);
7939 }
7940
7941 return ret;
7942
7943 err_free:
7944 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7945 i40e_free_macvlan_channels(vsi);
7946
7947 return ret;
7948 }
7949
7950
7951
7952
7953
7954
7955 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7956 {
7957 struct i40e_netdev_priv *np = netdev_priv(netdev);
7958 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7959 struct i40e_vsi *vsi = np->vsi;
7960 struct i40e_pf *pf = vsi->back;
7961 struct i40e_fwd_adapter *fwd;
7962 int avail_macvlan, ret;
7963
7964 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7965 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7966 return ERR_PTR(-EINVAL);
7967 }
7968 if (i40e_is_tc_mqprio_enabled(pf)) {
7969 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7970 return ERR_PTR(-EINVAL);
7971 }
7972 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7973 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7974 return ERR_PTR(-EINVAL);
7975 }
7976
7977
7978
7979
7980 if (netif_is_multiqueue(vdev))
7981 return ERR_PTR(-ERANGE);
7982
7983 if (!vsi->macvlan_cnt) {
7984
7985 set_bit(0, vsi->fwd_bitmask);
7986
7987
7988
7989
7990
7991 vectors = pf->num_lan_msix;
7992 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7993
7994 q_per_macvlan = 4;
7995 macvlan_cnt = (vectors - 32) / 4;
7996 } else if (vectors <= 64 && vectors > 32) {
7997
7998 q_per_macvlan = 2;
7999 macvlan_cnt = (vectors - 16) / 2;
8000 } else if (vectors <= 32 && vectors > 16) {
8001
8002 q_per_macvlan = 1;
8003 macvlan_cnt = vectors - 16;
8004 } else if (vectors <= 16 && vectors > 8) {
8005
8006 q_per_macvlan = 1;
8007 macvlan_cnt = vectors - 8;
8008 } else {
8009
8010 q_per_macvlan = 1;
8011 macvlan_cnt = vectors - 1;
8012 }
8013
8014 if (macvlan_cnt == 0)
8015 return ERR_PTR(-EBUSY);
8016
8017
8018 i40e_quiesce_vsi(vsi);
8019
8020
8021 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
8022 vdev);
8023 if (ret)
8024 return ERR_PTR(ret);
8025
8026
8027 i40e_unquiesce_vsi(vsi);
8028 }
8029 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
8030 vsi->macvlan_cnt);
8031 if (avail_macvlan >= I40E_MAX_MACVLANS)
8032 return ERR_PTR(-EBUSY);
8033
8034
8035 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
8036 if (!fwd)
8037 return ERR_PTR(-ENOMEM);
8038
8039 set_bit(avail_macvlan, vsi->fwd_bitmask);
8040 fwd->bit_no = avail_macvlan;
8041 netdev_set_sb_channel(vdev, avail_macvlan);
8042 fwd->netdev = vdev;
8043
8044 if (!netif_running(netdev))
8045 return fwd;
8046
8047
8048 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
8049 if (ret) {
8050
8051 netdev_unbind_sb_channel(netdev, vdev);
8052 netdev_set_sb_channel(vdev, 0);
8053
8054 kfree(fwd);
8055 return ERR_PTR(-EINVAL);
8056 }
8057
8058 return fwd;
8059 }
8060
8061
8062
8063
8064
8065 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
8066 {
8067 struct i40e_channel *ch, *ch_tmp;
8068 struct i40e_pf *pf = vsi->back;
8069 struct i40e_hw *hw = &pf->hw;
8070 int aq_err, ret = 0;
8071
8072 if (list_empty(&vsi->macvlan_list))
8073 return;
8074
8075 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8076 if (i40e_is_channel_macvlan(ch)) {
8077 ret = i40e_del_macvlan_filter(hw, ch->seid,
8078 i40e_channel_mac(ch),
8079 &aq_err);
8080 if (!ret) {
8081
8082 i40e_reset_ch_rings(vsi, ch);
8083 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
8084 netdev_unbind_sb_channel(vsi->netdev,
8085 ch->fwd->netdev);
8086 netdev_set_sb_channel(ch->fwd->netdev, 0);
8087 kfree(ch->fwd);
8088 ch->fwd = NULL;
8089 }
8090 }
8091 }
8092 }
8093
8094
8095
8096
8097
8098
8099 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
8100 {
8101 struct i40e_netdev_priv *np = netdev_priv(netdev);
8102 struct i40e_fwd_adapter *fwd = vdev;
8103 struct i40e_channel *ch, *ch_tmp;
8104 struct i40e_vsi *vsi = np->vsi;
8105 struct i40e_pf *pf = vsi->back;
8106 struct i40e_hw *hw = &pf->hw;
8107 int aq_err, ret = 0;
8108
8109
8110 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8111 if (i40e_is_channel_macvlan(ch) &&
8112 ether_addr_equal(i40e_channel_mac(ch),
8113 fwd->netdev->dev_addr)) {
8114 ret = i40e_del_macvlan_filter(hw, ch->seid,
8115 i40e_channel_mac(ch),
8116 &aq_err);
8117 if (!ret) {
8118
8119 i40e_reset_ch_rings(vsi, ch);
8120 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
8121 netdev_unbind_sb_channel(netdev, fwd->netdev);
8122 netdev_set_sb_channel(fwd->netdev, 0);
8123 kfree(ch->fwd);
8124 ch->fwd = NULL;
8125 } else {
8126 dev_info(&pf->pdev->dev,
8127 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
8128 i40e_stat_str(hw, ret),
8129 i40e_aq_str(hw, aq_err));
8130 }
8131 break;
8132 }
8133 }
8134 }
8135
8136
8137
8138
8139
8140
8141 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
8142 {
8143 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8144 struct i40e_netdev_priv *np = netdev_priv(netdev);
8145 struct i40e_vsi *vsi = np->vsi;
8146 struct i40e_pf *pf = vsi->back;
8147 u8 enabled_tc = 0, num_tc, hw;
8148 bool need_reset = false;
8149 int old_queue_pairs;
8150 int ret = -EINVAL;
8151 u16 mode;
8152 int i;
8153
8154 old_queue_pairs = vsi->num_queue_pairs;
8155 num_tc = mqprio_qopt->qopt.num_tc;
8156 hw = mqprio_qopt->qopt.hw;
8157 mode = mqprio_qopt->mode;
8158 if (!hw) {
8159 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
8160 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8161 goto config_tc;
8162 }
8163
8164
8165 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
8166 netdev_info(netdev,
8167 "Configuring TC not supported in MFP mode\n");
8168 return ret;
8169 }
8170 switch (mode) {
8171 case TC_MQPRIO_MODE_DCB:
8172 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
8173
8174
8175 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
8176 netdev_info(netdev,
8177 "DCB is not enabled for adapter\n");
8178 return ret;
8179 }
8180
8181
8182 if (num_tc > i40e_pf_get_num_tc(pf)) {
8183 netdev_info(netdev,
8184 "TC count greater than enabled on link for adapter\n");
8185 return ret;
8186 }
8187 break;
8188 case TC_MQPRIO_MODE_CHANNEL:
8189 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
8190 netdev_info(netdev,
8191 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
8192 return ret;
8193 }
8194 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8195 return ret;
8196 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
8197 if (ret)
8198 return ret;
8199 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
8200 sizeof(*mqprio_qopt));
8201 pf->flags |= I40E_FLAG_TC_MQPRIO;
8202 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8203 break;
8204 default:
8205 return -EINVAL;
8206 }
8207
8208 config_tc:
8209
8210 for (i = 0; i < num_tc; i++)
8211 enabled_tc |= BIT(i);
8212
8213
8214 if (enabled_tc == vsi->tc_config.enabled_tc &&
8215 mode != TC_MQPRIO_MODE_CHANNEL)
8216 return 0;
8217
8218
8219 i40e_quiesce_vsi(vsi);
8220
8221 if (!hw && !i40e_is_tc_mqprio_enabled(pf))
8222 i40e_remove_queue_channels(vsi);
8223
8224
8225 ret = i40e_vsi_config_tc(vsi, enabled_tc);
8226 if (ret) {
8227 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
8228 vsi->seid);
8229 need_reset = true;
8230 goto exit;
8231 } else if (enabled_tc &&
8232 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
8233 netdev_info(netdev,
8234 "Failed to create channel. Override queues (%u) not power of 2\n",
8235 vsi->tc_config.tc_info[0].qcount);
8236 ret = -EINVAL;
8237 need_reset = true;
8238 goto exit;
8239 }
8240
8241 dev_info(&vsi->back->pdev->dev,
8242 "Setup channel (id:%u) utilizing num_queues %d\n",
8243 vsi->seid, vsi->tc_config.tc_info[0].qcount);
8244
8245 if (i40e_is_tc_mqprio_enabled(pf)) {
8246 if (vsi->mqprio_qopt.max_rate[0]) {
8247 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
8248 vsi->mqprio_qopt.max_rate[0]);
8249
8250 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
8251 if (!ret) {
8252 u64 credits = max_tx_rate;
8253
8254 do_div(credits, I40E_BW_CREDIT_DIVISOR);
8255 dev_dbg(&vsi->back->pdev->dev,
8256 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
8257 max_tx_rate,
8258 credits,
8259 vsi->seid);
8260 } else {
8261 need_reset = true;
8262 goto exit;
8263 }
8264 }
8265 ret = i40e_configure_queue_channels(vsi);
8266 if (ret) {
8267 vsi->num_queue_pairs = old_queue_pairs;
8268 netdev_info(netdev,
8269 "Failed configuring queue channels\n");
8270 need_reset = true;
8271 goto exit;
8272 }
8273 }
8274
8275 exit:
8276
8277 if (need_reset) {
8278 i40e_vsi_set_default_tc_config(vsi);
8279 need_reset = false;
8280 }
8281
8282
8283 i40e_unquiesce_vsi(vsi);
8284 return ret;
8285 }
8286
8287
8288
8289
8290
8291
8292
8293
8294 static inline void
8295 i40e_set_cld_element(struct i40e_cloud_filter *filter,
8296 struct i40e_aqc_cloud_filters_element_data *cld)
8297 {
8298 u32 ipa;
8299 int i;
8300
8301 memset(cld, 0, sizeof(*cld));
8302 ether_addr_copy(cld->outer_mac, filter->dst_mac);
8303 ether_addr_copy(cld->inner_mac, filter->src_mac);
8304
8305 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
8306 return;
8307
8308 if (filter->n_proto == ETH_P_IPV6) {
8309 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
8310 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
8311 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
8312
8313 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
8314 }
8315 } else {
8316 ipa = be32_to_cpu(filter->dst_ipv4);
8317
8318 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8319 }
8320
8321 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8322
8323
8324
8325
8326 if (filter->tenant_id)
8327 return;
8328 }
8329
8330
8331
8332
8333
8334
8335
8336
8337
8338
8339 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8340 struct i40e_cloud_filter *filter, bool add)
8341 {
8342 struct i40e_aqc_cloud_filters_element_data cld_filter;
8343 struct i40e_pf *pf = vsi->back;
8344 int ret;
8345 static const u16 flag_table[128] = {
8346 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
8347 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8348 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
8349 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8350 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
8351 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8352 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8353 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8354 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8355 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8356 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8357 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8358 [I40E_CLOUD_FILTER_FLAGS_IIP] =
8359 I40E_AQC_ADD_CLOUD_FILTER_IIP,
8360 };
8361
8362 if (filter->flags >= ARRAY_SIZE(flag_table))
8363 return I40E_ERR_CONFIG;
8364
8365 memset(&cld_filter, 0, sizeof(cld_filter));
8366
8367
8368 i40e_set_cld_element(filter, &cld_filter);
8369
8370 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8371 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8372 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8373
8374 if (filter->n_proto == ETH_P_IPV6)
8375 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8376 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8377 else
8378 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8379 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8380
8381 if (add)
8382 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8383 &cld_filter, 1);
8384 else
8385 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8386 &cld_filter, 1);
8387 if (ret)
8388 dev_dbg(&pf->pdev->dev,
8389 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8390 add ? "add" : "delete", filter->dst_port, ret,
8391 pf->hw.aq.asq_last_status);
8392 else
8393 dev_info(&pf->pdev->dev,
8394 "%s cloud filter for VSI: %d\n",
8395 add ? "Added" : "Deleted", filter->seid);
8396 return ret;
8397 }
8398
8399
8400
8401
8402
8403
8404
8405
8406
8407
8408 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8409 struct i40e_cloud_filter *filter,
8410 bool add)
8411 {
8412 struct i40e_aqc_cloud_filters_element_bb cld_filter;
8413 struct i40e_pf *pf = vsi->back;
8414 int ret;
8415
8416
8417 if ((is_valid_ether_addr(filter->dst_mac) &&
8418 is_valid_ether_addr(filter->src_mac)) ||
8419 (is_multicast_ether_addr(filter->dst_mac) &&
8420 is_multicast_ether_addr(filter->src_mac)))
8421 return -EOPNOTSUPP;
8422
8423
8424
8425
8426 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8427 return -EOPNOTSUPP;
8428
8429
8430 if (filter->src_port ||
8431 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8432 !ipv6_addr_any(&filter->ip.v6.src_ip6))
8433 return -EOPNOTSUPP;
8434
8435 memset(&cld_filter, 0, sizeof(cld_filter));
8436
8437
8438 i40e_set_cld_element(filter, &cld_filter.element);
8439
8440 if (is_valid_ether_addr(filter->dst_mac) ||
8441 is_valid_ether_addr(filter->src_mac) ||
8442 is_multicast_ether_addr(filter->dst_mac) ||
8443 is_multicast_ether_addr(filter->src_mac)) {
8444
8445 if (filter->dst_ipv4)
8446 return -EOPNOTSUPP;
8447
8448
8449
8450
8451
8452 cld_filter.element.flags =
8453 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8454
8455 if (filter->vlan_id) {
8456 cld_filter.element.flags =
8457 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8458 }
8459
8460 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8461 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8462 cld_filter.element.flags =
8463 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8464 if (filter->n_proto == ETH_P_IPV6)
8465 cld_filter.element.flags |=
8466 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8467 else
8468 cld_filter.element.flags |=
8469 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8470 } else {
8471 dev_err(&pf->pdev->dev,
8472 "either mac or ip has to be valid for cloud filter\n");
8473 return -EINVAL;
8474 }
8475
8476
8477 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8478 be16_to_cpu(filter->dst_port);
8479
8480 if (add) {
8481
8482 ret = i40e_validate_and_set_switch_mode(vsi);
8483 if (ret) {
8484 dev_err(&pf->pdev->dev,
8485 "failed to set switch mode, ret %d\n",
8486 ret);
8487 return ret;
8488 }
8489
8490 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8491 &cld_filter, 1);
8492 } else {
8493 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8494 &cld_filter, 1);
8495 }
8496
8497 if (ret)
8498 dev_dbg(&pf->pdev->dev,
8499 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8500 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8501 else
8502 dev_info(&pf->pdev->dev,
8503 "%s cloud filter for VSI: %d, L4 port: %d\n",
8504 add ? "add" : "delete", filter->seid,
8505 ntohs(filter->dst_port));
8506 return ret;
8507 }
8508
8509
8510
8511
8512
8513
8514
8515
8516 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8517 struct flow_cls_offload *f,
8518 struct i40e_cloud_filter *filter)
8519 {
8520 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8521 struct flow_dissector *dissector = rule->match.dissector;
8522 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8523 struct i40e_pf *pf = vsi->back;
8524 u8 field_flags = 0;
8525
8526 if (dissector->used_keys &
8527 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
8528 BIT(FLOW_DISSECTOR_KEY_BASIC) |
8529 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8530 BIT(FLOW_DISSECTOR_KEY_VLAN) |
8531 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8532 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8533 BIT(FLOW_DISSECTOR_KEY_PORTS) |
8534 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8535 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
8536 dissector->used_keys);
8537 return -EOPNOTSUPP;
8538 }
8539
8540 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8541 struct flow_match_enc_keyid match;
8542
8543 flow_rule_match_enc_keyid(rule, &match);
8544 if (match.mask->keyid != 0)
8545 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8546
8547 filter->tenant_id = be32_to_cpu(match.key->keyid);
8548 }
8549
8550 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8551 struct flow_match_basic match;
8552
8553 flow_rule_match_basic(rule, &match);
8554 n_proto_key = ntohs(match.key->n_proto);
8555 n_proto_mask = ntohs(match.mask->n_proto);
8556
8557 if (n_proto_key == ETH_P_ALL) {
8558 n_proto_key = 0;
8559 n_proto_mask = 0;
8560 }
8561 filter->n_proto = n_proto_key & n_proto_mask;
8562 filter->ip_proto = match.key->ip_proto;
8563 }
8564
8565 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8566 struct flow_match_eth_addrs match;
8567
8568 flow_rule_match_eth_addrs(rule, &match);
8569
8570
8571 if (!is_zero_ether_addr(match.mask->dst)) {
8572 if (is_broadcast_ether_addr(match.mask->dst)) {
8573 field_flags |= I40E_CLOUD_FIELD_OMAC;
8574 } else {
8575 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8576 match.mask->dst);
8577 return I40E_ERR_CONFIG;
8578 }
8579 }
8580
8581 if (!is_zero_ether_addr(match.mask->src)) {
8582 if (is_broadcast_ether_addr(match.mask->src)) {
8583 field_flags |= I40E_CLOUD_FIELD_IMAC;
8584 } else {
8585 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8586 match.mask->src);
8587 return I40E_ERR_CONFIG;
8588 }
8589 }
8590 ether_addr_copy(filter->dst_mac, match.key->dst);
8591 ether_addr_copy(filter->src_mac, match.key->src);
8592 }
8593
8594 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8595 struct flow_match_vlan match;
8596
8597 flow_rule_match_vlan(rule, &match);
8598 if (match.mask->vlan_id) {
8599 if (match.mask->vlan_id == VLAN_VID_MASK) {
8600 field_flags |= I40E_CLOUD_FIELD_IVLAN;
8601
8602 } else {
8603 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8604 match.mask->vlan_id);
8605 return I40E_ERR_CONFIG;
8606 }
8607 }
8608
8609 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8610 }
8611
8612 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8613 struct flow_match_control match;
8614
8615 flow_rule_match_control(rule, &match);
8616 addr_type = match.key->addr_type;
8617 }
8618
8619 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8620 struct flow_match_ipv4_addrs match;
8621
8622 flow_rule_match_ipv4_addrs(rule, &match);
8623 if (match.mask->dst) {
8624 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8625 field_flags |= I40E_CLOUD_FIELD_IIP;
8626 } else {
8627 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8628 &match.mask->dst);
8629 return I40E_ERR_CONFIG;
8630 }
8631 }
8632
8633 if (match.mask->src) {
8634 if (match.mask->src == cpu_to_be32(0xffffffff)) {
8635 field_flags |= I40E_CLOUD_FIELD_IIP;
8636 } else {
8637 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8638 &match.mask->src);
8639 return I40E_ERR_CONFIG;
8640 }
8641 }
8642
8643 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8644 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8645 return I40E_ERR_CONFIG;
8646 }
8647 filter->dst_ipv4 = match.key->dst;
8648 filter->src_ipv4 = match.key->src;
8649 }
8650
8651 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8652 struct flow_match_ipv6_addrs match;
8653
8654 flow_rule_match_ipv6_addrs(rule, &match);
8655
8656
8657
8658
8659 if (ipv6_addr_loopback(&match.key->dst) ||
8660 ipv6_addr_loopback(&match.key->src)) {
8661 dev_err(&pf->pdev->dev,
8662 "Bad ipv6, addr is LOOPBACK\n");
8663 return I40E_ERR_CONFIG;
8664 }
8665 if (!ipv6_addr_any(&match.mask->dst) ||
8666 !ipv6_addr_any(&match.mask->src))
8667 field_flags |= I40E_CLOUD_FIELD_IIP;
8668
8669 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8670 sizeof(filter->src_ipv6));
8671 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8672 sizeof(filter->dst_ipv6));
8673 }
8674
8675 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8676 struct flow_match_ports match;
8677
8678 flow_rule_match_ports(rule, &match);
8679 if (match.mask->src) {
8680 if (match.mask->src == cpu_to_be16(0xffff)) {
8681 field_flags |= I40E_CLOUD_FIELD_IIP;
8682 } else {
8683 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8684 be16_to_cpu(match.mask->src));
8685 return I40E_ERR_CONFIG;
8686 }
8687 }
8688
8689 if (match.mask->dst) {
8690 if (match.mask->dst == cpu_to_be16(0xffff)) {
8691 field_flags |= I40E_CLOUD_FIELD_IIP;
8692 } else {
8693 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8694 be16_to_cpu(match.mask->dst));
8695 return I40E_ERR_CONFIG;
8696 }
8697 }
8698
8699 filter->dst_port = match.key->dst;
8700 filter->src_port = match.key->src;
8701
8702 switch (filter->ip_proto) {
8703 case IPPROTO_TCP:
8704 case IPPROTO_UDP:
8705 break;
8706 default:
8707 dev_err(&pf->pdev->dev,
8708 "Only UDP and TCP transport are supported\n");
8709 return -EINVAL;
8710 }
8711 }
8712 filter->flags = field_flags;
8713 return 0;
8714 }
8715
8716
8717
8718
8719
8720
8721
8722
8723 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8724 struct i40e_cloud_filter *filter)
8725 {
8726 struct i40e_channel *ch, *ch_tmp;
8727
8728
8729 if (tc == 0) {
8730 filter->seid = vsi->seid;
8731 return 0;
8732 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8733 if (!filter->dst_port) {
8734 dev_err(&vsi->back->pdev->dev,
8735 "Specify destination port to direct to traffic class that is not default\n");
8736 return -EINVAL;
8737 }
8738 if (list_empty(&vsi->ch_list))
8739 return -EINVAL;
8740 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8741 list) {
8742 if (ch->seid == vsi->tc_seid_map[tc])
8743 filter->seid = ch->seid;
8744 }
8745 return 0;
8746 }
8747 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8748 return -EINVAL;
8749 }
8750
8751
8752
8753
8754
8755
8756
8757 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8758 struct flow_cls_offload *cls_flower)
8759 {
8760 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8761 struct i40e_cloud_filter *filter = NULL;
8762 struct i40e_pf *pf = vsi->back;
8763 int err = 0;
8764
8765 if (tc < 0) {
8766 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8767 return -EOPNOTSUPP;
8768 }
8769
8770 if (!tc) {
8771 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
8772 return -EINVAL;
8773 }
8774
8775 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8776 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8777 return -EBUSY;
8778
8779 if (pf->fdir_pf_active_filters ||
8780 (!hlist_empty(&pf->fdir_filter_list))) {
8781 dev_err(&vsi->back->pdev->dev,
8782 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8783 return -EINVAL;
8784 }
8785
8786 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8787 dev_err(&vsi->back->pdev->dev,
8788 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8789 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8790 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8791 }
8792
8793 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8794 if (!filter)
8795 return -ENOMEM;
8796
8797 filter->cookie = cls_flower->cookie;
8798
8799 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8800 if (err < 0)
8801 goto err;
8802
8803 err = i40e_handle_tclass(vsi, tc, filter);
8804 if (err < 0)
8805 goto err;
8806
8807
8808 if (filter->dst_port)
8809 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8810 else
8811 err = i40e_add_del_cloud_filter(vsi, filter, true);
8812
8813 if (err) {
8814 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
8815 err);
8816 goto err;
8817 }
8818
8819
8820 INIT_HLIST_NODE(&filter->cloud_node);
8821
8822 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8823
8824 pf->num_cloud_filters++;
8825
8826 return err;
8827 err:
8828 kfree(filter);
8829 return err;
8830 }
8831
8832
8833
8834
8835
8836
8837
8838 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8839 unsigned long *cookie)
8840 {
8841 struct i40e_cloud_filter *filter = NULL;
8842 struct hlist_node *node2;
8843
8844 hlist_for_each_entry_safe(filter, node2,
8845 &vsi->back->cloud_filter_list, cloud_node)
8846 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8847 return filter;
8848 return NULL;
8849 }
8850
8851
8852
8853
8854
8855
8856
8857 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8858 struct flow_cls_offload *cls_flower)
8859 {
8860 struct i40e_cloud_filter *filter = NULL;
8861 struct i40e_pf *pf = vsi->back;
8862 int err = 0;
8863
8864 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8865
8866 if (!filter)
8867 return -EINVAL;
8868
8869 hash_del(&filter->cloud_node);
8870
8871 if (filter->dst_port)
8872 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8873 else
8874 err = i40e_add_del_cloud_filter(vsi, filter, false);
8875
8876 kfree(filter);
8877 if (err) {
8878 dev_err(&pf->pdev->dev,
8879 "Failed to delete cloud filter, err %s\n",
8880 i40e_stat_str(&pf->hw, err));
8881 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8882 }
8883
8884 pf->num_cloud_filters--;
8885 if (!pf->num_cloud_filters)
8886 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8887 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8888 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8889 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8890 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8891 }
8892 return 0;
8893 }
8894
8895
8896
8897
8898
8899
8900 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8901 struct flow_cls_offload *cls_flower)
8902 {
8903 struct i40e_vsi *vsi = np->vsi;
8904
8905 switch (cls_flower->command) {
8906 case FLOW_CLS_REPLACE:
8907 return i40e_configure_clsflower(vsi, cls_flower);
8908 case FLOW_CLS_DESTROY:
8909 return i40e_delete_clsflower(vsi, cls_flower);
8910 case FLOW_CLS_STATS:
8911 return -EOPNOTSUPP;
8912 default:
8913 return -EOPNOTSUPP;
8914 }
8915 }
8916
8917 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8918 void *cb_priv)
8919 {
8920 struct i40e_netdev_priv *np = cb_priv;
8921
8922 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8923 return -EOPNOTSUPP;
8924
8925 switch (type) {
8926 case TC_SETUP_CLSFLOWER:
8927 return i40e_setup_tc_cls_flower(np, type_data);
8928
8929 default:
8930 return -EOPNOTSUPP;
8931 }
8932 }
8933
8934 static LIST_HEAD(i40e_block_cb_list);
8935
8936 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8937 void *type_data)
8938 {
8939 struct i40e_netdev_priv *np = netdev_priv(netdev);
8940
8941 switch (type) {
8942 case TC_SETUP_QDISC_MQPRIO:
8943 return i40e_setup_tc(netdev, type_data);
8944 case TC_SETUP_BLOCK:
8945 return flow_block_cb_setup_simple(type_data,
8946 &i40e_block_cb_list,
8947 i40e_setup_tc_block_cb,
8948 np, np, true);
8949 default:
8950 return -EOPNOTSUPP;
8951 }
8952 }
8953
8954
8955
8956
8957
8958
8959
8960
8961
8962
8963
8964
8965
8966 int i40e_open(struct net_device *netdev)
8967 {
8968 struct i40e_netdev_priv *np = netdev_priv(netdev);
8969 struct i40e_vsi *vsi = np->vsi;
8970 struct i40e_pf *pf = vsi->back;
8971 int err;
8972
8973
8974 if (test_bit(__I40E_TESTING, pf->state) ||
8975 test_bit(__I40E_BAD_EEPROM, pf->state))
8976 return -EBUSY;
8977
8978 netif_carrier_off(netdev);
8979
8980 if (i40e_force_link_state(pf, true))
8981 return -EAGAIN;
8982
8983 err = i40e_vsi_open(vsi);
8984 if (err)
8985 return err;
8986
8987
8988 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8989 TCP_FLAG_FIN) >> 16);
8990 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8991 TCP_FLAG_FIN |
8992 TCP_FLAG_CWR) >> 16);
8993 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8994 udp_tunnel_get_rx_info(netdev);
8995
8996 return 0;
8997 }
8998
8999
9000
9001
9002
9003
9004
9005
9006
9007 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
9008 {
9009 int ret;
9010
9011 ret = netif_set_real_num_rx_queues(vsi->netdev,
9012 vsi->num_queue_pairs);
9013 if (ret)
9014 return ret;
9015
9016 return netif_set_real_num_tx_queues(vsi->netdev,
9017 vsi->num_queue_pairs);
9018 }
9019
9020
9021
9022
9023
9024
9025
9026
9027
9028
9029
9030 int i40e_vsi_open(struct i40e_vsi *vsi)
9031 {
9032 struct i40e_pf *pf = vsi->back;
9033 char int_name[I40E_INT_NAME_STR_LEN];
9034 int err;
9035
9036
9037 err = i40e_vsi_setup_tx_resources(vsi);
9038 if (err)
9039 goto err_setup_tx;
9040 err = i40e_vsi_setup_rx_resources(vsi);
9041 if (err)
9042 goto err_setup_rx;
9043
9044 err = i40e_vsi_configure(vsi);
9045 if (err)
9046 goto err_setup_rx;
9047
9048 if (vsi->netdev) {
9049 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
9050 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
9051 err = i40e_vsi_request_irq(vsi, int_name);
9052 if (err)
9053 goto err_setup_rx;
9054
9055
9056 err = i40e_netif_set_realnum_tx_rx_queues(vsi);
9057 if (err)
9058 goto err_set_queues;
9059
9060 } else if (vsi->type == I40E_VSI_FDIR) {
9061 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
9062 dev_driver_string(&pf->pdev->dev),
9063 dev_name(&pf->pdev->dev));
9064 err = i40e_vsi_request_irq(vsi, int_name);
9065 if (err)
9066 goto err_setup_rx;
9067
9068 } else {
9069 err = -EINVAL;
9070 goto err_setup_rx;
9071 }
9072
9073 err = i40e_up_complete(vsi);
9074 if (err)
9075 goto err_up_complete;
9076
9077 return 0;
9078
9079 err_up_complete:
9080 i40e_down(vsi);
9081 err_set_queues:
9082 i40e_vsi_free_irq(vsi);
9083 err_setup_rx:
9084 i40e_vsi_free_rx_resources(vsi);
9085 err_setup_tx:
9086 i40e_vsi_free_tx_resources(vsi);
9087 if (vsi == pf->vsi[pf->lan_vsi])
9088 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
9089
9090 return err;
9091 }
9092
9093
9094
9095
9096
9097
9098
9099
9100 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
9101 {
9102 struct i40e_fdir_filter *filter;
9103 struct i40e_flex_pit *pit_entry, *tmp;
9104 struct hlist_node *node2;
9105
9106 hlist_for_each_entry_safe(filter, node2,
9107 &pf->fdir_filter_list, fdir_node) {
9108 hlist_del(&filter->fdir_node);
9109 kfree(filter);
9110 }
9111
9112 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
9113 list_del(&pit_entry->list);
9114 kfree(pit_entry);
9115 }
9116 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
9117
9118 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
9119 list_del(&pit_entry->list);
9120 kfree(pit_entry);
9121 }
9122 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
9123
9124 pf->fdir_pf_active_filters = 0;
9125 i40e_reset_fdir_filter_cnt(pf);
9126
9127
9128 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9129 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9130 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9131
9132
9133 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
9134 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9135 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9136
9137
9138 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
9139 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9140 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9141
9142
9143 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
9144 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9145 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9146
9147
9148 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
9149 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9150 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9151
9152
9153 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
9154 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9155 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9156
9157
9158 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
9159 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9160
9161 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
9162 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9163
9164
9165 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
9166 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9167
9168 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
9169 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9170 }
9171
9172
9173
9174
9175
9176
9177
9178
9179 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
9180 {
9181 struct i40e_cloud_filter *cfilter;
9182 struct hlist_node *node;
9183
9184 hlist_for_each_entry_safe(cfilter, node,
9185 &pf->cloud_filter_list, cloud_node) {
9186 hlist_del(&cfilter->cloud_node);
9187 kfree(cfilter);
9188 }
9189 pf->num_cloud_filters = 0;
9190
9191 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
9192 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
9193 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
9194 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
9195 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
9196 }
9197 }
9198
9199
9200
9201
9202
9203
9204
9205
9206
9207
9208
9209 int i40e_close(struct net_device *netdev)
9210 {
9211 struct i40e_netdev_priv *np = netdev_priv(netdev);
9212 struct i40e_vsi *vsi = np->vsi;
9213
9214 i40e_vsi_close(vsi);
9215
9216 return 0;
9217 }
9218
9219
9220
9221
9222
9223
9224
9225
9226
9227
9228
9229
9230 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
9231 {
9232 u32 val;
9233
9234
9235 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
9236
9237
9238
9239
9240
9241
9242
9243
9244
9245 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
9246 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9247 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
9248 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9249
9250 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
9251
9252
9253
9254
9255
9256 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
9257 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9258 val |= I40E_GLGEN_RTRIG_CORER_MASK;
9259 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9260 i40e_flush(&pf->hw);
9261
9262 } else if (reset_flags & I40E_PF_RESET_FLAG) {
9263
9264
9265
9266
9267
9268
9269
9270
9271
9272 dev_dbg(&pf->pdev->dev, "PFR requested\n");
9273 i40e_handle_reset_warning(pf, lock_acquired);
9274
9275 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
9276
9277
9278
9279
9280 i40e_prep_for_reset(pf);
9281 i40e_reset_and_rebuild(pf, true, lock_acquired);
9282 dev_info(&pf->pdev->dev,
9283 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
9284 "FW LLDP is disabled\n" :
9285 "FW LLDP is enabled\n");
9286
9287 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
9288 int v;
9289
9290
9291 dev_info(&pf->pdev->dev,
9292 "VSI reinit requested\n");
9293 for (v = 0; v < pf->num_alloc_vsi; v++) {
9294 struct i40e_vsi *vsi = pf->vsi[v];
9295
9296 if (vsi != NULL &&
9297 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
9298 vsi->state))
9299 i40e_vsi_reinit_locked(pf->vsi[v]);
9300 }
9301 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
9302 int v;
9303
9304
9305 dev_info(&pf->pdev->dev, "VSI down requested\n");
9306 for (v = 0; v < pf->num_alloc_vsi; v++) {
9307 struct i40e_vsi *vsi = pf->vsi[v];
9308
9309 if (vsi != NULL &&
9310 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
9311 vsi->state)) {
9312 set_bit(__I40E_VSI_DOWN, vsi->state);
9313 i40e_down(vsi);
9314 }
9315 }
9316 } else {
9317 dev_info(&pf->pdev->dev,
9318 "bad reset request 0x%08x\n", reset_flags);
9319 }
9320 }
9321
9322 #ifdef CONFIG_I40E_DCB
9323
9324
9325
9326
9327
9328
9329 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
9330 struct i40e_dcbx_config *old_cfg,
9331 struct i40e_dcbx_config *new_cfg)
9332 {
9333 bool need_reconfig = false;
9334
9335
9336 if (memcmp(&new_cfg->etscfg,
9337 &old_cfg->etscfg,
9338 sizeof(new_cfg->etscfg))) {
9339
9340 if (memcmp(&new_cfg->etscfg.prioritytable,
9341 &old_cfg->etscfg.prioritytable,
9342 sizeof(new_cfg->etscfg.prioritytable))) {
9343 need_reconfig = true;
9344 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9345 }
9346
9347 if (memcmp(&new_cfg->etscfg.tcbwtable,
9348 &old_cfg->etscfg.tcbwtable,
9349 sizeof(new_cfg->etscfg.tcbwtable)))
9350 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9351
9352 if (memcmp(&new_cfg->etscfg.tsatable,
9353 &old_cfg->etscfg.tsatable,
9354 sizeof(new_cfg->etscfg.tsatable)))
9355 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9356 }
9357
9358
9359 if (memcmp(&new_cfg->pfc,
9360 &old_cfg->pfc,
9361 sizeof(new_cfg->pfc))) {
9362 need_reconfig = true;
9363 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9364 }
9365
9366
9367 if (memcmp(&new_cfg->app,
9368 &old_cfg->app,
9369 sizeof(new_cfg->app))) {
9370 need_reconfig = true;
9371 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9372 }
9373
9374 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9375 return need_reconfig;
9376 }
9377
9378
9379
9380
9381
9382
9383 static int i40e_handle_lldp_event(struct i40e_pf *pf,
9384 struct i40e_arq_event_info *e)
9385 {
9386 struct i40e_aqc_lldp_get_mib *mib =
9387 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9388 struct i40e_hw *hw = &pf->hw;
9389 struct i40e_dcbx_config tmp_dcbx_cfg;
9390 bool need_reconfig = false;
9391 int ret = 0;
9392 u8 type;
9393
9394
9395 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9396 (hw->phy.link_info.link_speed &
9397 ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9398 !(pf->flags & I40E_FLAG_DCB_CAPABLE))
9399
9400 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9401
9402
9403 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
9404 return ret;
9405
9406
9407 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9408 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9409 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9410 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9411 return ret;
9412
9413
9414 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9415 dev_dbg(&pf->pdev->dev,
9416 "LLDP event mib type %s\n", type ? "remote" : "local");
9417 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9418
9419 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9420 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9421 &hw->remote_dcbx_config);
9422 goto exit;
9423 }
9424
9425
9426 tmp_dcbx_cfg = hw->local_dcbx_config;
9427
9428
9429 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9430
9431 ret = i40e_get_dcb_config(&pf->hw);
9432 if (ret) {
9433
9434 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9435 (hw->phy.link_info.link_speed &
9436 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9437 dev_warn(&pf->pdev->dev,
9438 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9439 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9440 } else {
9441 dev_info(&pf->pdev->dev,
9442 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
9443 i40e_stat_str(&pf->hw, ret),
9444 i40e_aq_str(&pf->hw,
9445 pf->hw.aq.asq_last_status));
9446 }
9447 goto exit;
9448 }
9449
9450
9451 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9452 sizeof(tmp_dcbx_cfg))) {
9453 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9454 goto exit;
9455 }
9456
9457 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9458 &hw->local_dcbx_config);
9459
9460 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9461
9462 if (!need_reconfig)
9463 goto exit;
9464
9465
9466 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9467 pf->flags |= I40E_FLAG_DCB_ENABLED;
9468 else
9469 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9470
9471 set_bit(__I40E_PORT_SUSPENDED, pf->state);
9472
9473 i40e_pf_quiesce_all_vsi(pf);
9474
9475
9476 i40e_dcb_reconfigure(pf);
9477
9478 ret = i40e_resume_port_tx(pf);
9479
9480 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9481
9482 if (ret)
9483 goto exit;
9484
9485
9486 ret = i40e_pf_wait_queues_disabled(pf);
9487 if (ret) {
9488
9489 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9490 i40e_service_event_schedule(pf);
9491 } else {
9492 i40e_pf_unquiesce_all_vsi(pf);
9493 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9494 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9495 }
9496
9497 exit:
9498 return ret;
9499 }
9500 #endif
9501
9502
9503
9504
9505
9506
9507
9508 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9509 {
9510 rtnl_lock();
9511 i40e_do_reset(pf, reset_flags, true);
9512 rtnl_unlock();
9513 }
9514
9515
9516
9517
9518
9519
9520
9521
9522
9523 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9524 struct i40e_arq_event_info *e)
9525 {
9526 struct i40e_aqc_lan_overflow *data =
9527 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9528 u32 queue = le32_to_cpu(data->prtdcb_rupto);
9529 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9530 struct i40e_hw *hw = &pf->hw;
9531 struct i40e_vf *vf;
9532 u16 vf_id;
9533
9534 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9535 queue, qtx_ctl);
9536
9537
9538 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
9539 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
9540 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
9541 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
9542 vf_id -= hw->func_caps.vf_base_id;
9543 vf = &pf->vf[vf_id];
9544 i40e_vc_notify_vf_reset(vf);
9545
9546 msleep(20);
9547 i40e_reset_vf(vf, false);
9548 }
9549 }
9550
9551
9552
9553
9554
9555 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9556 {
9557 u32 val, fcnt_prog;
9558
9559 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9560 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9561 return fcnt_prog;
9562 }
9563
9564
9565
9566
9567
9568 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9569 {
9570 u32 val, fcnt_prog;
9571
9572 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9573 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9574 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
9575 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
9576 return fcnt_prog;
9577 }
9578
9579
9580
9581
9582
9583 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9584 {
9585 u32 val, fcnt_prog;
9586
9587 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9588 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9589 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
9590 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
9591 return fcnt_prog;
9592 }
9593
9594
9595
9596
9597
9598 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9599 {
9600 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9601 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
9602 (I40E_DEBUG_FD & pf->hw.debug_mask))
9603 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9604 }
9605
9606
9607
9608
9609
9610 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9611 {
9612 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9613
9614
9615
9616
9617
9618 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9619 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9620 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9621
9622 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9623 (I40E_DEBUG_FD & pf->hw.debug_mask))
9624 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9625 }
9626 }
9627
9628
9629
9630
9631
9632
9633 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9634 struct i40e_fdir_filter *filter)
9635 {
9636
9637 pf->fdir_pf_active_filters--;
9638 pf->fd_inv = 0;
9639
9640 switch (filter->flow_type) {
9641 case TCP_V4_FLOW:
9642 pf->fd_tcp4_filter_cnt--;
9643 break;
9644 case UDP_V4_FLOW:
9645 pf->fd_udp4_filter_cnt--;
9646 break;
9647 case SCTP_V4_FLOW:
9648 pf->fd_sctp4_filter_cnt--;
9649 break;
9650 case TCP_V6_FLOW:
9651 pf->fd_tcp6_filter_cnt--;
9652 break;
9653 case UDP_V6_FLOW:
9654 pf->fd_udp6_filter_cnt--;
9655 break;
9656 case SCTP_V6_FLOW:
9657 pf->fd_udp6_filter_cnt--;
9658 break;
9659 case IP_USER_FLOW:
9660 switch (filter->ipl4_proto) {
9661 case IPPROTO_TCP:
9662 pf->fd_tcp4_filter_cnt--;
9663 break;
9664 case IPPROTO_UDP:
9665 pf->fd_udp4_filter_cnt--;
9666 break;
9667 case IPPROTO_SCTP:
9668 pf->fd_sctp4_filter_cnt--;
9669 break;
9670 case IPPROTO_IP:
9671 pf->fd_ip4_filter_cnt--;
9672 break;
9673 }
9674 break;
9675 case IPV6_USER_FLOW:
9676 switch (filter->ipl4_proto) {
9677 case IPPROTO_TCP:
9678 pf->fd_tcp6_filter_cnt--;
9679 break;
9680 case IPPROTO_UDP:
9681 pf->fd_udp6_filter_cnt--;
9682 break;
9683 case IPPROTO_SCTP:
9684 pf->fd_sctp6_filter_cnt--;
9685 break;
9686 case IPPROTO_IP:
9687 pf->fd_ip6_filter_cnt--;
9688 break;
9689 }
9690 break;
9691 }
9692
9693
9694 hlist_del(&filter->fdir_node);
9695 kfree(filter);
9696 }
9697
9698
9699
9700
9701
9702 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9703 {
9704 struct i40e_fdir_filter *filter;
9705 u32 fcnt_prog, fcnt_avail;
9706 struct hlist_node *node;
9707
9708 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9709 return;
9710
9711
9712 fcnt_prog = i40e_get_global_fd_count(pf);
9713 fcnt_avail = pf->fdir_pf_filter_count;
9714 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9715 (pf->fd_add_err == 0) ||
9716 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9717 i40e_reenable_fdir_sb(pf);
9718
9719
9720
9721
9722
9723 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9724 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9725 i40e_reenable_fdir_atr(pf);
9726
9727
9728 if (pf->fd_inv > 0) {
9729 hlist_for_each_entry_safe(filter, node,
9730 &pf->fdir_filter_list, fdir_node)
9731 if (filter->fd_id == pf->fd_inv)
9732 i40e_delete_invalid_filter(pf, filter);
9733 }
9734 }
9735
9736 #define I40E_MIN_FD_FLUSH_INTERVAL 10
9737 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9738
9739
9740
9741
9742 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9743 {
9744 unsigned long min_flush_time;
9745 int flush_wait_retry = 50;
9746 bool disable_atr = false;
9747 int fd_room;
9748 int reg;
9749
9750 if (!time_after(jiffies, pf->fd_flush_timestamp +
9751 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9752 return;
9753
9754
9755
9756
9757 min_flush_time = pf->fd_flush_timestamp +
9758 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9759 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9760
9761 if (!(time_after(jiffies, min_flush_time)) &&
9762 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9763 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9764 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9765 disable_atr = true;
9766 }
9767
9768 pf->fd_flush_timestamp = jiffies;
9769 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9770
9771 wr32(&pf->hw, I40E_PFQF_CTL_1,
9772 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9773 i40e_flush(&pf->hw);
9774 pf->fd_flush_cnt++;
9775 pf->fd_add_err = 0;
9776 do {
9777
9778 usleep_range(5000, 6000);
9779 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9780 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9781 break;
9782 } while (flush_wait_retry--);
9783 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9784 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9785 } else {
9786
9787 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9788 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9789 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9790 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9791 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9792 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9793 }
9794 }
9795
9796
9797
9798
9799
9800 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9801 {
9802 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9803 }
9804
9805
9806
9807
9808
9809 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9810 {
9811
9812
9813 if (test_bit(__I40E_DOWN, pf->state))
9814 return;
9815
9816 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9817 i40e_fdir_flush_and_replay(pf);
9818
9819 i40e_fdir_check_and_reenable(pf);
9820
9821 }
9822
9823
9824
9825
9826
9827
9828 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9829 {
9830 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9831 return;
9832
9833 switch (vsi->type) {
9834 case I40E_VSI_MAIN:
9835 if (!vsi->netdev || !vsi->netdev_registered)
9836 break;
9837
9838 if (link_up) {
9839 netif_carrier_on(vsi->netdev);
9840 netif_tx_wake_all_queues(vsi->netdev);
9841 } else {
9842 netif_carrier_off(vsi->netdev);
9843 netif_tx_stop_all_queues(vsi->netdev);
9844 }
9845 break;
9846
9847 case I40E_VSI_SRIOV:
9848 case I40E_VSI_VMDQ2:
9849 case I40E_VSI_CTRL:
9850 case I40E_VSI_IWARP:
9851 case I40E_VSI_MIRROR:
9852 default:
9853
9854 break;
9855 }
9856 }
9857
9858
9859
9860
9861
9862
9863 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9864 {
9865 struct i40e_pf *pf;
9866 int i;
9867
9868 if (!veb || !veb->pf)
9869 return;
9870 pf = veb->pf;
9871
9872
9873 for (i = 0; i < I40E_MAX_VEB; i++)
9874 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9875 i40e_veb_link_event(pf->veb[i], link_up);
9876
9877
9878 for (i = 0; i < pf->num_alloc_vsi; i++)
9879 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9880 i40e_vsi_link_event(pf->vsi[i], link_up);
9881 }
9882
9883
9884
9885
9886
9887 static void i40e_link_event(struct i40e_pf *pf)
9888 {
9889 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9890 u8 new_link_speed, old_link_speed;
9891 i40e_status status;
9892 bool new_link, old_link;
9893 #ifdef CONFIG_I40E_DCB
9894 int err;
9895 #endif
9896
9897
9898 pf->hw.phy.get_link_info = true;
9899 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9900 status = i40e_get_link_status(&pf->hw, &new_link);
9901
9902
9903 if (status == I40E_SUCCESS) {
9904 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9905 } else {
9906
9907
9908
9909 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9910 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9911 status);
9912 return;
9913 }
9914
9915 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9916 new_link_speed = pf->hw.phy.link_info.link_speed;
9917
9918 if (new_link == old_link &&
9919 new_link_speed == old_link_speed &&
9920 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9921 new_link == netif_carrier_ok(vsi->netdev)))
9922 return;
9923
9924 i40e_print_link_message(vsi, new_link);
9925
9926
9927
9928
9929 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9930 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9931 else
9932 i40e_vsi_link_event(vsi, new_link);
9933
9934 if (pf->vf)
9935 i40e_vc_notify_link_state(pf);
9936
9937 if (pf->flags & I40E_FLAG_PTP)
9938 i40e_ptp_set_increment(pf);
9939 #ifdef CONFIG_I40E_DCB
9940 if (new_link == old_link)
9941 return;
9942
9943 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9944 return;
9945
9946
9947
9948
9949 if (!new_link) {
9950 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9951 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9952 err = i40e_dcb_sw_default_config(pf);
9953 if (err) {
9954 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
9955 I40E_FLAG_DCB_ENABLED);
9956 } else {
9957 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9958 DCB_CAP_DCBX_VER_IEEE;
9959 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9960 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9961 }
9962 }
9963 #endif
9964 }
9965
9966
9967
9968
9969
9970 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9971 {
9972 int i;
9973
9974
9975 if (test_bit(__I40E_DOWN, pf->state) ||
9976 test_bit(__I40E_CONFIG_BUSY, pf->state))
9977 return;
9978
9979
9980 if (time_before(jiffies, (pf->service_timer_previous +
9981 pf->service_timer_period)))
9982 return;
9983 pf->service_timer_previous = jiffies;
9984
9985 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9986 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9987 i40e_link_event(pf);
9988
9989
9990
9991
9992 for (i = 0; i < pf->num_alloc_vsi; i++)
9993 if (pf->vsi[i] && pf->vsi[i]->netdev)
9994 i40e_update_stats(pf->vsi[i]);
9995
9996 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9997
9998 for (i = 0; i < I40E_MAX_VEB; i++)
9999 if (pf->veb[i])
10000 i40e_update_veb_stats(pf->veb[i]);
10001 }
10002
10003 i40e_ptp_rx_hang(pf);
10004 i40e_ptp_tx_hang(pf);
10005 }
10006
10007
10008
10009
10010
10011 static void i40e_reset_subtask(struct i40e_pf *pf)
10012 {
10013 u32 reset_flags = 0;
10014
10015 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
10016 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
10017 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
10018 }
10019 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
10020 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
10021 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
10022 }
10023 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
10024 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
10025 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
10026 }
10027 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
10028 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
10029 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
10030 }
10031 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
10032 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
10033 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
10034 }
10035
10036
10037
10038
10039 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
10040 i40e_prep_for_reset(pf);
10041 i40e_reset(pf);
10042 i40e_rebuild(pf, false, false);
10043 }
10044
10045
10046 if (reset_flags &&
10047 !test_bit(__I40E_DOWN, pf->state) &&
10048 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
10049 i40e_do_reset(pf, reset_flags, false);
10050 }
10051 }
10052
10053
10054
10055
10056
10057
10058 static void i40e_handle_link_event(struct i40e_pf *pf,
10059 struct i40e_arq_event_info *e)
10060 {
10061 struct i40e_aqc_get_link_status *status =
10062 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
10063
10064
10065
10066
10067
10068
10069
10070 i40e_link_event(pf);
10071
10072
10073 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
10074 dev_err(&pf->pdev->dev,
10075 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
10076 dev_err(&pf->pdev->dev,
10077 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10078 } else {
10079
10080
10081
10082 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
10083 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
10084 (!(status->link_info & I40E_AQ_LINK_UP)) &&
10085 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
10086 dev_err(&pf->pdev->dev,
10087 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
10088 dev_err(&pf->pdev->dev,
10089 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10090 }
10091 }
10092 }
10093
10094
10095
10096
10097
10098 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
10099 {
10100 struct i40e_arq_event_info event;
10101 struct i40e_hw *hw = &pf->hw;
10102 u16 pending, i = 0;
10103 i40e_status ret;
10104 u16 opcode;
10105 u32 oldval;
10106 u32 val;
10107
10108
10109 if (test_bit(__I40E_RESET_FAILED, pf->state))
10110 return;
10111
10112
10113 val = rd32(&pf->hw, pf->hw.aq.arq.len);
10114 oldval = val;
10115 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
10116 if (hw->debug_mask & I40E_DEBUG_AQ)
10117 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
10118 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
10119 }
10120 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
10121 if (hw->debug_mask & I40E_DEBUG_AQ)
10122 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
10123 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
10124 pf->arq_overflows++;
10125 }
10126 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
10127 if (hw->debug_mask & I40E_DEBUG_AQ)
10128 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
10129 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
10130 }
10131 if (oldval != val)
10132 wr32(&pf->hw, pf->hw.aq.arq.len, val);
10133
10134 val = rd32(&pf->hw, pf->hw.aq.asq.len);
10135 oldval = val;
10136 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
10137 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10138 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
10139 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
10140 }
10141 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
10142 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10143 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
10144 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
10145 }
10146 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
10147 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10148 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
10149 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
10150 }
10151 if (oldval != val)
10152 wr32(&pf->hw, pf->hw.aq.asq.len, val);
10153
10154 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
10155 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
10156 if (!event.msg_buf)
10157 return;
10158
10159 do {
10160 ret = i40e_clean_arq_element(hw, &event, &pending);
10161 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
10162 break;
10163 else if (ret) {
10164 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
10165 break;
10166 }
10167
10168 opcode = le16_to_cpu(event.desc.opcode);
10169 switch (opcode) {
10170
10171 case i40e_aqc_opc_get_link_status:
10172 rtnl_lock();
10173 i40e_handle_link_event(pf, &event);
10174 rtnl_unlock();
10175 break;
10176 case i40e_aqc_opc_send_msg_to_pf:
10177 ret = i40e_vc_process_vf_msg(pf,
10178 le16_to_cpu(event.desc.retval),
10179 le32_to_cpu(event.desc.cookie_high),
10180 le32_to_cpu(event.desc.cookie_low),
10181 event.msg_buf,
10182 event.msg_len);
10183 break;
10184 case i40e_aqc_opc_lldp_update_mib:
10185 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
10186 #ifdef CONFIG_I40E_DCB
10187 rtnl_lock();
10188 i40e_handle_lldp_event(pf, &event);
10189 rtnl_unlock();
10190 #endif
10191 break;
10192 case i40e_aqc_opc_event_lan_overflow:
10193 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
10194 i40e_handle_lan_overflow_event(pf, &event);
10195 break;
10196 case i40e_aqc_opc_send_msg_to_peer:
10197 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
10198 break;
10199 case i40e_aqc_opc_nvm_erase:
10200 case i40e_aqc_opc_nvm_update:
10201 case i40e_aqc_opc_oem_post_update:
10202 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
10203 "ARQ NVM operation 0x%04x completed\n",
10204 opcode);
10205 break;
10206 default:
10207 dev_info(&pf->pdev->dev,
10208 "ARQ: Unknown event 0x%04x ignored\n",
10209 opcode);
10210 break;
10211 }
10212 } while (i++ < pf->adminq_work_limit);
10213
10214 if (i < pf->adminq_work_limit)
10215 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
10216
10217
10218 val = rd32(hw, I40E_PFINT_ICR0_ENA);
10219 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
10220 wr32(hw, I40E_PFINT_ICR0_ENA, val);
10221 i40e_flush(hw);
10222
10223 kfree(event.msg_buf);
10224 }
10225
10226
10227
10228
10229
10230 static void i40e_verify_eeprom(struct i40e_pf *pf)
10231 {
10232 int err;
10233
10234 err = i40e_diag_eeprom_test(&pf->hw);
10235 if (err) {
10236
10237 err = i40e_diag_eeprom_test(&pf->hw);
10238 if (err) {
10239 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
10240 err);
10241 set_bit(__I40E_BAD_EEPROM, pf->state);
10242 }
10243 }
10244
10245 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
10246 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
10247 clear_bit(__I40E_BAD_EEPROM, pf->state);
10248 }
10249 }
10250
10251
10252
10253
10254
10255
10256
10257 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
10258 {
10259 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10260 struct i40e_vsi_context ctxt;
10261 int ret;
10262
10263 ctxt.seid = pf->main_vsi_seid;
10264 ctxt.pf_num = pf->hw.pf_id;
10265 ctxt.vf_num = 0;
10266 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10267 if (ret) {
10268 dev_info(&pf->pdev->dev,
10269 "couldn't get PF vsi config, err %s aq_err %s\n",
10270 i40e_stat_str(&pf->hw, ret),
10271 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10272 return;
10273 }
10274 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10275 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10276 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10277
10278 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10279 if (ret) {
10280 dev_info(&pf->pdev->dev,
10281 "update vsi switch failed, err %s aq_err %s\n",
10282 i40e_stat_str(&pf->hw, ret),
10283 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10284 }
10285 }
10286
10287
10288
10289
10290
10291
10292
10293 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
10294 {
10295 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10296 struct i40e_vsi_context ctxt;
10297 int ret;
10298
10299 ctxt.seid = pf->main_vsi_seid;
10300 ctxt.pf_num = pf->hw.pf_id;
10301 ctxt.vf_num = 0;
10302 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10303 if (ret) {
10304 dev_info(&pf->pdev->dev,
10305 "couldn't get PF vsi config, err %s aq_err %s\n",
10306 i40e_stat_str(&pf->hw, ret),
10307 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10308 return;
10309 }
10310 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10311 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10312 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10313
10314 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10315 if (ret) {
10316 dev_info(&pf->pdev->dev,
10317 "update vsi switch failed, err %s aq_err %s\n",
10318 i40e_stat_str(&pf->hw, ret),
10319 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10320 }
10321 }
10322
10323
10324
10325
10326
10327
10328
10329
10330
10331 static void i40e_config_bridge_mode(struct i40e_veb *veb)
10332 {
10333 struct i40e_pf *pf = veb->pf;
10334
10335 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
10336 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
10337 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10338 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
10339 i40e_disable_pf_switch_lb(pf);
10340 else
10341 i40e_enable_pf_switch_lb(pf);
10342 }
10343
10344
10345
10346
10347
10348
10349
10350
10351
10352
10353 static int i40e_reconstitute_veb(struct i40e_veb *veb)
10354 {
10355 struct i40e_vsi *ctl_vsi = NULL;
10356 struct i40e_pf *pf = veb->pf;
10357 int v, veb_idx;
10358 int ret;
10359
10360
10361 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10362 if (pf->vsi[v] &&
10363 pf->vsi[v]->veb_idx == veb->idx &&
10364 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10365 ctl_vsi = pf->vsi[v];
10366 break;
10367 }
10368 }
10369 if (!ctl_vsi) {
10370 dev_info(&pf->pdev->dev,
10371 "missing owner VSI for veb_idx %d\n", veb->idx);
10372 ret = -ENOENT;
10373 goto end_reconstitute;
10374 }
10375 if (ctl_vsi != pf->vsi[pf->lan_vsi])
10376 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10377 ret = i40e_add_vsi(ctl_vsi);
10378 if (ret) {
10379 dev_info(&pf->pdev->dev,
10380 "rebuild of veb_idx %d owner VSI failed: %d\n",
10381 veb->idx, ret);
10382 goto end_reconstitute;
10383 }
10384 i40e_vsi_reset_stats(ctl_vsi);
10385
10386
10387 ret = i40e_add_veb(veb, ctl_vsi);
10388 if (ret)
10389 goto end_reconstitute;
10390
10391 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10392 veb->bridge_mode = BRIDGE_MODE_VEB;
10393 else
10394 veb->bridge_mode = BRIDGE_MODE_VEPA;
10395 i40e_config_bridge_mode(veb);
10396
10397
10398 for (v = 0; v < pf->num_alloc_vsi; v++) {
10399 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10400 continue;
10401
10402 if (pf->vsi[v]->veb_idx == veb->idx) {
10403 struct i40e_vsi *vsi = pf->vsi[v];
10404
10405 vsi->uplink_seid = veb->seid;
10406 ret = i40e_add_vsi(vsi);
10407 if (ret) {
10408 dev_info(&pf->pdev->dev,
10409 "rebuild of vsi_idx %d failed: %d\n",
10410 v, ret);
10411 goto end_reconstitute;
10412 }
10413 i40e_vsi_reset_stats(vsi);
10414 }
10415 }
10416
10417
10418 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10419 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10420 pf->veb[veb_idx]->uplink_seid = veb->seid;
10421 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
10422 if (ret)
10423 break;
10424 }
10425 }
10426
10427 end_reconstitute:
10428 return ret;
10429 }
10430
10431
10432
10433
10434
10435
10436 static int i40e_get_capabilities(struct i40e_pf *pf,
10437 enum i40e_admin_queue_opc list_type)
10438 {
10439 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10440 u16 data_size;
10441 int buf_len;
10442 int err;
10443
10444 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10445 do {
10446 cap_buf = kzalloc(buf_len, GFP_KERNEL);
10447 if (!cap_buf)
10448 return -ENOMEM;
10449
10450
10451 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10452 &data_size, list_type,
10453 NULL);
10454
10455 kfree(cap_buf);
10456
10457 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10458
10459 buf_len = data_size;
10460 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
10461 dev_info(&pf->pdev->dev,
10462 "capability discovery failed, err %s aq_err %s\n",
10463 i40e_stat_str(&pf->hw, err),
10464 i40e_aq_str(&pf->hw,
10465 pf->hw.aq.asq_last_status));
10466 return -ENODEV;
10467 }
10468 } while (err);
10469
10470 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10471 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10472 dev_info(&pf->pdev->dev,
10473 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10474 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10475 pf->hw.func_caps.num_msix_vectors,
10476 pf->hw.func_caps.num_msix_vectors_vf,
10477 pf->hw.func_caps.fd_filters_guaranteed,
10478 pf->hw.func_caps.fd_filters_best_effort,
10479 pf->hw.func_caps.num_tx_qp,
10480 pf->hw.func_caps.num_vsis);
10481 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10482 dev_info(&pf->pdev->dev,
10483 "switch_mode=0x%04x, function_valid=0x%08x\n",
10484 pf->hw.dev_caps.switch_mode,
10485 pf->hw.dev_caps.valid_functions);
10486 dev_info(&pf->pdev->dev,
10487 "SR-IOV=%d, num_vfs for all function=%u\n",
10488 pf->hw.dev_caps.sr_iov_1_1,
10489 pf->hw.dev_caps.num_vfs);
10490 dev_info(&pf->pdev->dev,
10491 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10492 pf->hw.dev_caps.num_vsis,
10493 pf->hw.dev_caps.num_rx_qp,
10494 pf->hw.dev_caps.num_tx_qp);
10495 }
10496 }
10497 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10498 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10499 + pf->hw.func_caps.num_vfs)
10500 if (pf->hw.revision_id == 0 &&
10501 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10502 dev_info(&pf->pdev->dev,
10503 "got num_vsis %d, setting num_vsis to %d\n",
10504 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10505 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10506 }
10507 }
10508 return 0;
10509 }
10510
10511 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10512
10513
10514
10515
10516
10517 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10518 {
10519 struct i40e_vsi *vsi;
10520
10521
10522
10523
10524 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10525 static const u32 hkey[] = {
10526 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10527 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10528 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10529 0x95b3a76d};
10530 int i;
10531
10532 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10533 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10534 }
10535
10536 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
10537 return;
10538
10539
10540 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10541
10542
10543 if (!vsi) {
10544 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10545 pf->vsi[pf->lan_vsi]->seid, 0);
10546 if (!vsi) {
10547 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10548 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10549 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10550 return;
10551 }
10552 }
10553
10554 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10555 }
10556
10557
10558
10559
10560
10561 static void i40e_fdir_teardown(struct i40e_pf *pf)
10562 {
10563 struct i40e_vsi *vsi;
10564
10565 i40e_fdir_filter_exit(pf);
10566 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10567 if (vsi)
10568 i40e_vsi_release(vsi);
10569 }
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10580 {
10581 struct i40e_cloud_filter *cfilter;
10582 struct i40e_pf *pf = vsi->back;
10583 struct hlist_node *node;
10584 i40e_status ret;
10585
10586
10587 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10588 cloud_node) {
10589 if (cfilter->seid != seid)
10590 continue;
10591
10592 if (cfilter->dst_port)
10593 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10594 true);
10595 else
10596 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10597
10598 if (ret) {
10599 dev_dbg(&pf->pdev->dev,
10600 "Failed to rebuild cloud filter, err %s aq_err %s\n",
10601 i40e_stat_str(&pf->hw, ret),
10602 i40e_aq_str(&pf->hw,
10603 pf->hw.aq.asq_last_status));
10604 return ret;
10605 }
10606 }
10607 return 0;
10608 }
10609
10610
10611
10612
10613
10614
10615
10616 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10617 {
10618 struct i40e_channel *ch, *ch_tmp;
10619 i40e_status ret;
10620
10621 if (list_empty(&vsi->ch_list))
10622 return 0;
10623
10624 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10625 if (!ch->initialized)
10626 break;
10627
10628 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10629 if (ret) {
10630 dev_info(&vsi->back->pdev->dev,
10631 "failed to rebuild channels using uplink_seid %u\n",
10632 vsi->uplink_seid);
10633 return ret;
10634 }
10635
10636 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10637 if (ret) {
10638 dev_info(&vsi->back->pdev->dev,
10639 "failed to configure TX rings for channel %u\n",
10640 ch->seid);
10641 return ret;
10642 }
10643
10644 vsi->next_base_queue = vsi->next_base_queue +
10645 ch->num_queue_pairs;
10646 if (ch->max_tx_rate) {
10647 u64 credits = ch->max_tx_rate;
10648
10649 if (i40e_set_bw_limit(vsi, ch->seid,
10650 ch->max_tx_rate))
10651 return -EINVAL;
10652
10653 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10654 dev_dbg(&vsi->back->pdev->dev,
10655 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10656 ch->max_tx_rate,
10657 credits,
10658 ch->seid);
10659 }
10660 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10661 if (ret) {
10662 dev_dbg(&vsi->back->pdev->dev,
10663 "Failed to rebuild cloud filters for channel VSI %u\n",
10664 ch->seid);
10665 return ret;
10666 }
10667 }
10668 return 0;
10669 }
10670
10671
10672
10673
10674
10675
10676
10677 static void i40e_prep_for_reset(struct i40e_pf *pf)
10678 {
10679 struct i40e_hw *hw = &pf->hw;
10680 i40e_status ret = 0;
10681 u32 v;
10682
10683 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10684 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10685 return;
10686 if (i40e_check_asq_alive(&pf->hw))
10687 i40e_vc_notify_reset(pf);
10688
10689 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10690
10691
10692 i40e_pf_quiesce_all_vsi(pf);
10693
10694 for (v = 0; v < pf->num_alloc_vsi; v++) {
10695 if (pf->vsi[v])
10696 pf->vsi[v]->seid = 0;
10697 }
10698
10699 i40e_shutdown_adminq(&pf->hw);
10700
10701
10702 if (hw->hmc.hmc_obj) {
10703 ret = i40e_shutdown_lan_hmc(hw);
10704 if (ret)
10705 dev_warn(&pf->pdev->dev,
10706 "shutdown_lan_hmc failed: %d\n", ret);
10707 }
10708
10709
10710
10711
10712 i40e_ptp_save_hw_time(pf);
10713 }
10714
10715
10716
10717
10718
10719 static void i40e_send_version(struct i40e_pf *pf)
10720 {
10721 struct i40e_driver_version dv;
10722
10723 dv.major_version = 0xff;
10724 dv.minor_version = 0xff;
10725 dv.build_version = 0xff;
10726 dv.subbuild_version = 0;
10727 strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10728 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10729 }
10730
10731
10732
10733
10734
10735 static void i40e_get_oem_version(struct i40e_hw *hw)
10736 {
10737 u16 block_offset = 0xffff;
10738 u16 block_length = 0;
10739 u16 capabilities = 0;
10740 u16 gen_snap = 0;
10741 u16 release = 0;
10742
10743 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
10744 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
10745 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
10746 #define I40E_NVM_OEM_GEN_OFFSET 0x02
10747 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
10748 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
10749 #define I40E_NVM_OEM_LENGTH 3
10750
10751
10752 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10753 if (block_offset == 0xffff)
10754 return;
10755
10756
10757 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10758 &block_length);
10759 if (block_length < I40E_NVM_OEM_LENGTH)
10760 return;
10761
10762
10763 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10764 &capabilities);
10765 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10766 return;
10767
10768 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10769 &gen_snap);
10770 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10771 &release);
10772 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
10773 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10774 }
10775
10776
10777
10778
10779
10780 static int i40e_reset(struct i40e_pf *pf)
10781 {
10782 struct i40e_hw *hw = &pf->hw;
10783 i40e_status ret;
10784
10785 ret = i40e_pf_reset(hw);
10786 if (ret) {
10787 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10788 set_bit(__I40E_RESET_FAILED, pf->state);
10789 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10790 } else {
10791 pf->pfr_count++;
10792 }
10793 return ret;
10794 }
10795
10796
10797
10798
10799
10800
10801
10802
10803 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10804 {
10805 const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
10806 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10807 struct i40e_hw *hw = &pf->hw;
10808 i40e_status ret;
10809 u32 val;
10810 int v;
10811
10812 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10813 is_recovery_mode_reported)
10814 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10815
10816 if (test_bit(__I40E_DOWN, pf->state) &&
10817 !test_bit(__I40E_RECOVERY_MODE, pf->state))
10818 goto clear_recovery;
10819 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10820
10821
10822 ret = i40e_init_adminq(&pf->hw);
10823 if (ret) {
10824 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
10825 i40e_stat_str(&pf->hw, ret),
10826 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10827 goto clear_recovery;
10828 }
10829 i40e_get_oem_version(&pf->hw);
10830
10831 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
10832
10833 mdelay(1000);
10834 }
10835
10836
10837 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10838 i40e_verify_eeprom(pf);
10839
10840
10841
10842
10843
10844 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10845 if (i40e_get_capabilities(pf,
10846 i40e_aqc_opc_list_func_capabilities))
10847 goto end_unlock;
10848
10849 if (is_recovery_mode_reported) {
10850
10851
10852
10853 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10854 goto end_unlock;
10855 } else {
10856 if (!lock_acquired)
10857 rtnl_lock();
10858
10859
10860
10861
10862 free_irq(pf->pdev->irq, pf);
10863 i40e_clear_interrupt_scheme(pf);
10864 if (i40e_restore_interrupt_scheme(pf))
10865 goto end_unlock;
10866 }
10867
10868
10869 i40e_send_version(pf);
10870
10871
10872
10873
10874 goto end_unlock;
10875 }
10876
10877 i40e_clear_pxe_mode(hw);
10878 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10879 if (ret)
10880 goto end_core_reset;
10881
10882 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10883 hw->func_caps.num_rx_qp, 0, 0);
10884 if (ret) {
10885 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10886 goto end_core_reset;
10887 }
10888 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10889 if (ret) {
10890 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10891 goto end_core_reset;
10892 }
10893
10894 #ifdef CONFIG_I40E_DCB
10895
10896
10897
10898
10899 if (i40e_is_tc_mqprio_enabled(pf)) {
10900 i40e_aq_set_dcb_parameters(hw, false, NULL);
10901 } else {
10902 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10903 (hw->phy.link_info.link_speed &
10904 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10905 i40e_aq_set_dcb_parameters(hw, false, NULL);
10906 dev_warn(&pf->pdev->dev,
10907 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10908 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10909 } else {
10910 i40e_aq_set_dcb_parameters(hw, true, NULL);
10911 ret = i40e_init_pf_dcb(pf);
10912 if (ret) {
10913 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10914 ret);
10915 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10916
10917 }
10918 }
10919 }
10920
10921 #endif
10922 if (!lock_acquired)
10923 rtnl_lock();
10924 ret = i40e_setup_pf_switch(pf, reinit, true);
10925 if (ret)
10926 goto end_unlock;
10927
10928
10929
10930
10931 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10932 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10933 I40E_AQ_EVENT_MEDIA_NA |
10934 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10935 if (ret)
10936 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10937 i40e_stat_str(&pf->hw, ret),
10938 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10939
10940
10941
10942
10943
10944
10945
10946
10947 if (vsi->uplink_seid != pf->mac_seid) {
10948 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10949
10950 for (v = 0; v < I40E_MAX_VEB; v++) {
10951 if (!pf->veb[v])
10952 continue;
10953
10954 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10955 pf->veb[v]->uplink_seid == 0) {
10956 ret = i40e_reconstitute_veb(pf->veb[v]);
10957
10958 if (!ret)
10959 continue;
10960
10961
10962
10963
10964
10965
10966
10967 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10968 dev_info(&pf->pdev->dev,
10969 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10970 ret);
10971 vsi->uplink_seid = pf->mac_seid;
10972 break;
10973 } else if (pf->veb[v]->uplink_seid == 0) {
10974 dev_info(&pf->pdev->dev,
10975 "rebuild of orphan VEB failed: %d\n",
10976 ret);
10977 }
10978 }
10979 }
10980 }
10981
10982 if (vsi->uplink_seid == pf->mac_seid) {
10983 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10984
10985 ret = i40e_add_vsi(vsi);
10986 if (ret) {
10987 dev_info(&pf->pdev->dev,
10988 "rebuild of Main VSI failed: %d\n", ret);
10989 goto end_unlock;
10990 }
10991 }
10992
10993 if (vsi->mqprio_qopt.max_rate[0]) {
10994 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
10995 vsi->mqprio_qopt.max_rate[0]);
10996 u64 credits = 0;
10997
10998 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10999 if (ret)
11000 goto end_unlock;
11001
11002 credits = max_tx_rate;
11003 do_div(credits, I40E_BW_CREDIT_DIVISOR);
11004 dev_dbg(&vsi->back->pdev->dev,
11005 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
11006 max_tx_rate,
11007 credits,
11008 vsi->seid);
11009 }
11010
11011 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
11012 if (ret)
11013 goto end_unlock;
11014
11015
11016
11017
11018 ret = i40e_rebuild_channels(vsi);
11019 if (ret)
11020 goto end_unlock;
11021
11022
11023
11024
11025
11026 #define I40E_REG_MSS 0x000E64DC
11027 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
11028 #define I40E_64BYTE_MSS 0x400000
11029 val = rd32(hw, I40E_REG_MSS);
11030 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11031 val &= ~I40E_REG_MSS_MIN_MASK;
11032 val |= I40E_64BYTE_MSS;
11033 wr32(hw, I40E_REG_MSS, val);
11034 }
11035
11036 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
11037 msleep(75);
11038 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11039 if (ret)
11040 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11041 i40e_stat_str(&pf->hw, ret),
11042 i40e_aq_str(&pf->hw,
11043 pf->hw.aq.asq_last_status));
11044 }
11045
11046 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11047 ret = i40e_setup_misc_vector(pf);
11048
11049
11050
11051
11052
11053
11054
11055 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11056 pf->main_vsi_seid);
11057
11058
11059 i40e_pf_unquiesce_all_vsi(pf);
11060
11061
11062 if (!lock_acquired)
11063 rtnl_unlock();
11064
11065
11066 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
11067 if (ret)
11068 dev_warn(&pf->pdev->dev,
11069 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
11070 pf->cur_promisc ? "on" : "off",
11071 i40e_stat_str(&pf->hw, ret),
11072 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11073
11074 i40e_reset_all_vfs(pf, true);
11075
11076
11077 i40e_send_version(pf);
11078
11079
11080 goto end_core_reset;
11081
11082 end_unlock:
11083 if (!lock_acquired)
11084 rtnl_unlock();
11085 end_core_reset:
11086 clear_bit(__I40E_RESET_FAILED, pf->state);
11087 clear_recovery:
11088 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
11089 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
11090 }
11091
11092
11093
11094
11095
11096
11097
11098
11099 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
11100 bool lock_acquired)
11101 {
11102 int ret;
11103
11104 if (test_bit(__I40E_IN_REMOVE, pf->state))
11105 return;
11106
11107
11108
11109
11110 ret = i40e_reset(pf);
11111 if (!ret)
11112 i40e_rebuild(pf, reinit, lock_acquired);
11113 }
11114
11115
11116
11117
11118
11119
11120
11121
11122
11123
11124 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
11125 {
11126 i40e_prep_for_reset(pf);
11127 i40e_reset_and_rebuild(pf, false, lock_acquired);
11128 }
11129
11130
11131
11132
11133
11134
11135
11136 static void i40e_handle_mdd_event(struct i40e_pf *pf)
11137 {
11138 struct i40e_hw *hw = &pf->hw;
11139 bool mdd_detected = false;
11140 struct i40e_vf *vf;
11141 u32 reg;
11142 int i;
11143
11144 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
11145 return;
11146
11147
11148 reg = rd32(hw, I40E_GL_MDET_TX);
11149 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
11150 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
11151 I40E_GL_MDET_TX_PF_NUM_SHIFT;
11152 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
11153 I40E_GL_MDET_TX_VF_NUM_SHIFT;
11154 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
11155 I40E_GL_MDET_TX_EVENT_SHIFT;
11156 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
11157 I40E_GL_MDET_TX_QUEUE_SHIFT) -
11158 pf->hw.func_caps.base_queue;
11159 if (netif_msg_tx_err(pf))
11160 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
11161 event, queue, pf_num, vf_num);
11162 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
11163 mdd_detected = true;
11164 }
11165 reg = rd32(hw, I40E_GL_MDET_RX);
11166 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
11167 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
11168 I40E_GL_MDET_RX_FUNCTION_SHIFT;
11169 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
11170 I40E_GL_MDET_RX_EVENT_SHIFT;
11171 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
11172 I40E_GL_MDET_RX_QUEUE_SHIFT) -
11173 pf->hw.func_caps.base_queue;
11174 if (netif_msg_rx_err(pf))
11175 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
11176 event, queue, func);
11177 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
11178 mdd_detected = true;
11179 }
11180
11181 if (mdd_detected) {
11182 reg = rd32(hw, I40E_PF_MDET_TX);
11183 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
11184 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
11185 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
11186 }
11187 reg = rd32(hw, I40E_PF_MDET_RX);
11188 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
11189 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
11190 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
11191 }
11192 }
11193
11194
11195 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
11196 vf = &(pf->vf[i]);
11197 reg = rd32(hw, I40E_VP_MDET_TX(i));
11198 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
11199 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
11200 vf->num_mdd_events++;
11201 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
11202 i);
11203 dev_info(&pf->pdev->dev,
11204 "Use PF Control I/F to re-enable the VF\n");
11205 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
11206 }
11207
11208 reg = rd32(hw, I40E_VP_MDET_RX(i));
11209 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
11210 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
11211 vf->num_mdd_events++;
11212 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
11213 i);
11214 dev_info(&pf->pdev->dev,
11215 "Use PF Control I/F to re-enable the VF\n");
11216 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
11217 }
11218 }
11219
11220
11221 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
11222 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
11223 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
11224 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
11225 i40e_flush(hw);
11226 }
11227
11228
11229
11230
11231
11232 static void i40e_service_task(struct work_struct *work)
11233 {
11234 struct i40e_pf *pf = container_of(work,
11235 struct i40e_pf,
11236 service_task);
11237 unsigned long start_time = jiffies;
11238
11239
11240 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
11241 test_bit(__I40E_SUSPENDED, pf->state))
11242 return;
11243
11244 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
11245 return;
11246
11247 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
11248 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
11249 i40e_sync_filters_subtask(pf);
11250 i40e_reset_subtask(pf);
11251 i40e_handle_mdd_event(pf);
11252 i40e_vc_process_vflr_event(pf);
11253 i40e_watchdog_subtask(pf);
11254 i40e_fdir_reinit_subtask(pf);
11255 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
11256
11257 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
11258 true);
11259 } else {
11260 i40e_client_subtask(pf);
11261 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
11262 pf->state))
11263 i40e_notify_client_of_l2_param_changes(
11264 pf->vsi[pf->lan_vsi]);
11265 }
11266 i40e_sync_filters_subtask(pf);
11267 } else {
11268 i40e_reset_subtask(pf);
11269 }
11270
11271 i40e_clean_adminq_subtask(pf);
11272
11273
11274 smp_mb__before_atomic();
11275 clear_bit(__I40E_SERVICE_SCHED, pf->state);
11276
11277
11278
11279
11280
11281 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
11282 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
11283 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
11284 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
11285 i40e_service_event_schedule(pf);
11286 }
11287
11288
11289
11290
11291
11292 static void i40e_service_timer(struct timer_list *t)
11293 {
11294 struct i40e_pf *pf = from_timer(pf, t, service_timer);
11295
11296 mod_timer(&pf->service_timer,
11297 round_jiffies(jiffies + pf->service_timer_period));
11298 i40e_service_event_schedule(pf);
11299 }
11300
11301
11302
11303
11304
11305 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
11306 {
11307 struct i40e_pf *pf = vsi->back;
11308
11309 switch (vsi->type) {
11310 case I40E_VSI_MAIN:
11311 vsi->alloc_queue_pairs = pf->num_lan_qps;
11312 if (!vsi->num_tx_desc)
11313 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11314 I40E_REQ_DESCRIPTOR_MULTIPLE);
11315 if (!vsi->num_rx_desc)
11316 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11317 I40E_REQ_DESCRIPTOR_MULTIPLE);
11318 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11319 vsi->num_q_vectors = pf->num_lan_msix;
11320 else
11321 vsi->num_q_vectors = 1;
11322
11323 break;
11324
11325 case I40E_VSI_FDIR:
11326 vsi->alloc_queue_pairs = 1;
11327 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11328 I40E_REQ_DESCRIPTOR_MULTIPLE);
11329 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11330 I40E_REQ_DESCRIPTOR_MULTIPLE);
11331 vsi->num_q_vectors = pf->num_fdsb_msix;
11332 break;
11333
11334 case I40E_VSI_VMDQ2:
11335 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11336 if (!vsi->num_tx_desc)
11337 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11338 I40E_REQ_DESCRIPTOR_MULTIPLE);
11339 if (!vsi->num_rx_desc)
11340 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11341 I40E_REQ_DESCRIPTOR_MULTIPLE);
11342 vsi->num_q_vectors = pf->num_vmdq_msix;
11343 break;
11344
11345 case I40E_VSI_SRIOV:
11346 vsi->alloc_queue_pairs = pf->num_vf_qps;
11347 if (!vsi->num_tx_desc)
11348 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11349 I40E_REQ_DESCRIPTOR_MULTIPLE);
11350 if (!vsi->num_rx_desc)
11351 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11352 I40E_REQ_DESCRIPTOR_MULTIPLE);
11353 break;
11354
11355 default:
11356 WARN_ON(1);
11357 return -ENODATA;
11358 }
11359
11360 if (is_kdump_kernel()) {
11361 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11362 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11363 }
11364
11365 return 0;
11366 }
11367
11368
11369
11370
11371
11372
11373
11374
11375
11376 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11377 {
11378 struct i40e_ring **next_rings;
11379 int size;
11380 int ret = 0;
11381
11382
11383 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11384 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11385 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11386 if (!vsi->tx_rings)
11387 return -ENOMEM;
11388 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11389 if (i40e_enabled_xdp_vsi(vsi)) {
11390 vsi->xdp_rings = next_rings;
11391 next_rings += vsi->alloc_queue_pairs;
11392 }
11393 vsi->rx_rings = next_rings;
11394
11395 if (alloc_qvectors) {
11396
11397 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11398 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11399 if (!vsi->q_vectors) {
11400 ret = -ENOMEM;
11401 goto err_vectors;
11402 }
11403 }
11404 return ret;
11405
11406 err_vectors:
11407 kfree(vsi->tx_rings);
11408 return ret;
11409 }
11410
11411
11412
11413
11414
11415
11416
11417
11418
11419 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11420 {
11421 int ret = -ENODEV;
11422 struct i40e_vsi *vsi;
11423 int vsi_idx;
11424 int i;
11425
11426
11427 mutex_lock(&pf->switch_mutex);
11428
11429
11430
11431
11432
11433
11434
11435 i = pf->next_vsi;
11436 while (i < pf->num_alloc_vsi && pf->vsi[i])
11437 i++;
11438 if (i >= pf->num_alloc_vsi) {
11439 i = 0;
11440 while (i < pf->next_vsi && pf->vsi[i])
11441 i++;
11442 }
11443
11444 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11445 vsi_idx = i;
11446 } else {
11447 ret = -ENODEV;
11448 goto unlock_pf;
11449 }
11450 pf->next_vsi = ++i;
11451
11452 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11453 if (!vsi) {
11454 ret = -ENOMEM;
11455 goto unlock_pf;
11456 }
11457 vsi->type = type;
11458 vsi->back = pf;
11459 set_bit(__I40E_VSI_DOWN, vsi->state);
11460 vsi->flags = 0;
11461 vsi->idx = vsi_idx;
11462 vsi->int_rate_limit = 0;
11463 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11464 pf->rss_table_size : 64;
11465 vsi->netdev_registered = false;
11466 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11467 hash_init(vsi->mac_filter_hash);
11468 vsi->irqs_ready = false;
11469
11470 if (type == I40E_VSI_MAIN) {
11471 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11472 if (!vsi->af_xdp_zc_qps)
11473 goto err_rings;
11474 }
11475
11476 ret = i40e_set_num_rings_in_vsi(vsi);
11477 if (ret)
11478 goto err_rings;
11479
11480 ret = i40e_vsi_alloc_arrays(vsi, true);
11481 if (ret)
11482 goto err_rings;
11483
11484
11485 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11486
11487
11488 spin_lock_init(&vsi->mac_filter_hash_lock);
11489 pf->vsi[vsi_idx] = vsi;
11490 ret = vsi_idx;
11491 goto unlock_pf;
11492
11493 err_rings:
11494 bitmap_free(vsi->af_xdp_zc_qps);
11495 pf->next_vsi = i - 1;
11496 kfree(vsi);
11497 unlock_pf:
11498 mutex_unlock(&pf->switch_mutex);
11499 return ret;
11500 }
11501
11502
11503
11504
11505
11506
11507
11508
11509
11510 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11511 {
11512
11513 if (free_qvectors) {
11514 kfree(vsi->q_vectors);
11515 vsi->q_vectors = NULL;
11516 }
11517 kfree(vsi->tx_rings);
11518 vsi->tx_rings = NULL;
11519 vsi->rx_rings = NULL;
11520 vsi->xdp_rings = NULL;
11521 }
11522
11523
11524
11525
11526
11527
11528 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11529 {
11530 if (!vsi)
11531 return;
11532
11533 kfree(vsi->rss_hkey_user);
11534 vsi->rss_hkey_user = NULL;
11535
11536 kfree(vsi->rss_lut_user);
11537 vsi->rss_lut_user = NULL;
11538 }
11539
11540
11541
11542
11543
11544 static int i40e_vsi_clear(struct i40e_vsi *vsi)
11545 {
11546 struct i40e_pf *pf;
11547
11548 if (!vsi)
11549 return 0;
11550
11551 if (!vsi->back)
11552 goto free_vsi;
11553 pf = vsi->back;
11554
11555 mutex_lock(&pf->switch_mutex);
11556 if (!pf->vsi[vsi->idx]) {
11557 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11558 vsi->idx, vsi->idx, vsi->type);
11559 goto unlock_vsi;
11560 }
11561
11562 if (pf->vsi[vsi->idx] != vsi) {
11563 dev_err(&pf->pdev->dev,
11564 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11565 pf->vsi[vsi->idx]->idx,
11566 pf->vsi[vsi->idx]->type,
11567 vsi->idx, vsi->type);
11568 goto unlock_vsi;
11569 }
11570
11571
11572 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11573 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11574
11575 bitmap_free(vsi->af_xdp_zc_qps);
11576 i40e_vsi_free_arrays(vsi, true);
11577 i40e_clear_rss_config_user(vsi);
11578
11579 pf->vsi[vsi->idx] = NULL;
11580 if (vsi->idx < pf->next_vsi)
11581 pf->next_vsi = vsi->idx;
11582
11583 unlock_vsi:
11584 mutex_unlock(&pf->switch_mutex);
11585 free_vsi:
11586 kfree(vsi);
11587
11588 return 0;
11589 }
11590
11591
11592
11593
11594
11595 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11596 {
11597 int i;
11598
11599 if (vsi->tx_rings && vsi->tx_rings[0]) {
11600 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11601 kfree_rcu(vsi->tx_rings[i], rcu);
11602 WRITE_ONCE(vsi->tx_rings[i], NULL);
11603 WRITE_ONCE(vsi->rx_rings[i], NULL);
11604 if (vsi->xdp_rings)
11605 WRITE_ONCE(vsi->xdp_rings[i], NULL);
11606 }
11607 }
11608 }
11609
11610
11611
11612
11613
11614 static int i40e_alloc_rings(struct i40e_vsi *vsi)
11615 {
11616 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11617 struct i40e_pf *pf = vsi->back;
11618 struct i40e_ring *ring;
11619
11620
11621 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11622
11623 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11624 if (!ring)
11625 goto err_out;
11626
11627 ring->queue_index = i;
11628 ring->reg_idx = vsi->base_queue + i;
11629 ring->ring_active = false;
11630 ring->vsi = vsi;
11631 ring->netdev = vsi->netdev;
11632 ring->dev = &pf->pdev->dev;
11633 ring->count = vsi->num_tx_desc;
11634 ring->size = 0;
11635 ring->dcb_tc = 0;
11636 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11637 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11638 ring->itr_setting = pf->tx_itr_default;
11639 WRITE_ONCE(vsi->tx_rings[i], ring++);
11640
11641 if (!i40e_enabled_xdp_vsi(vsi))
11642 goto setup_rx;
11643
11644 ring->queue_index = vsi->alloc_queue_pairs + i;
11645 ring->reg_idx = vsi->base_queue + ring->queue_index;
11646 ring->ring_active = false;
11647 ring->vsi = vsi;
11648 ring->netdev = NULL;
11649 ring->dev = &pf->pdev->dev;
11650 ring->count = vsi->num_tx_desc;
11651 ring->size = 0;
11652 ring->dcb_tc = 0;
11653 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11654 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11655 set_ring_xdp(ring);
11656 ring->itr_setting = pf->tx_itr_default;
11657 WRITE_ONCE(vsi->xdp_rings[i], ring++);
11658
11659 setup_rx:
11660 ring->queue_index = i;
11661 ring->reg_idx = vsi->base_queue + i;
11662 ring->ring_active = false;
11663 ring->vsi = vsi;
11664 ring->netdev = vsi->netdev;
11665 ring->dev = &pf->pdev->dev;
11666 ring->count = vsi->num_rx_desc;
11667 ring->size = 0;
11668 ring->dcb_tc = 0;
11669 ring->itr_setting = pf->rx_itr_default;
11670 WRITE_ONCE(vsi->rx_rings[i], ring);
11671 }
11672
11673 return 0;
11674
11675 err_out:
11676 i40e_vsi_clear_rings(vsi);
11677 return -ENOMEM;
11678 }
11679
11680
11681
11682
11683
11684
11685
11686
11687 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11688 {
11689 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11690 I40E_MIN_MSIX, vectors);
11691 if (vectors < 0) {
11692 dev_info(&pf->pdev->dev,
11693 "MSI-X vector reservation failed: %d\n", vectors);
11694 vectors = 0;
11695 }
11696
11697 return vectors;
11698 }
11699
11700
11701
11702
11703
11704
11705
11706
11707
11708 static int i40e_init_msix(struct i40e_pf *pf)
11709 {
11710 struct i40e_hw *hw = &pf->hw;
11711 int cpus, extra_vectors;
11712 int vectors_left;
11713 int v_budget, i;
11714 int v_actual;
11715 int iwarp_requested = 0;
11716
11717 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
11718 return -ENODEV;
11719
11720
11721
11722
11723
11724
11725
11726
11727
11728
11729
11730
11731
11732
11733
11734
11735 vectors_left = hw->func_caps.num_msix_vectors;
11736 v_budget = 0;
11737
11738
11739 if (vectors_left) {
11740 v_budget++;
11741 vectors_left--;
11742 }
11743
11744
11745
11746
11747
11748
11749
11750
11751 cpus = num_online_cpus();
11752 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11753 vectors_left -= pf->num_lan_msix;
11754
11755
11756 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11757 if (vectors_left) {
11758 pf->num_fdsb_msix = 1;
11759 v_budget++;
11760 vectors_left--;
11761 } else {
11762 pf->num_fdsb_msix = 0;
11763 }
11764 }
11765
11766
11767 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11768 iwarp_requested = pf->num_iwarp_msix;
11769
11770 if (!vectors_left)
11771 pf->num_iwarp_msix = 0;
11772 else if (vectors_left < pf->num_iwarp_msix)
11773 pf->num_iwarp_msix = 1;
11774 v_budget += pf->num_iwarp_msix;
11775 vectors_left -= pf->num_iwarp_msix;
11776 }
11777
11778
11779 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11780 if (!vectors_left) {
11781 pf->num_vmdq_msix = 0;
11782 pf->num_vmdq_qps = 0;
11783 } else {
11784 int vmdq_vecs_wanted =
11785 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11786 int vmdq_vecs =
11787 min_t(int, vectors_left, vmdq_vecs_wanted);
11788
11789
11790
11791
11792
11793
11794
11795 if (vectors_left < vmdq_vecs_wanted) {
11796 pf->num_vmdq_qps = 1;
11797 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11798 vmdq_vecs = min_t(int,
11799 vectors_left,
11800 vmdq_vecs_wanted);
11801 }
11802 pf->num_vmdq_msix = pf->num_vmdq_qps;
11803
11804 v_budget += vmdq_vecs;
11805 vectors_left -= vmdq_vecs;
11806 }
11807 }
11808
11809
11810
11811
11812
11813
11814
11815
11816
11817
11818 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11819 pf->num_lan_msix += extra_vectors;
11820 vectors_left -= extra_vectors;
11821
11822 WARN(vectors_left < 0,
11823 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11824
11825 v_budget += pf->num_lan_msix;
11826 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11827 GFP_KERNEL);
11828 if (!pf->msix_entries)
11829 return -ENOMEM;
11830
11831 for (i = 0; i < v_budget; i++)
11832 pf->msix_entries[i].entry = i;
11833 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11834
11835 if (v_actual < I40E_MIN_MSIX) {
11836 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11837 kfree(pf->msix_entries);
11838 pf->msix_entries = NULL;
11839 pci_disable_msix(pf->pdev);
11840 return -ENODEV;
11841
11842 } else if (v_actual == I40E_MIN_MSIX) {
11843
11844 pf->num_vmdq_vsis = 0;
11845 pf->num_vmdq_qps = 0;
11846 pf->num_lan_qps = 1;
11847 pf->num_lan_msix = 1;
11848
11849 } else if (v_actual != v_budget) {
11850
11851
11852
11853
11854
11855 int vec;
11856
11857 dev_info(&pf->pdev->dev,
11858 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11859 v_actual, v_budget);
11860
11861 vec = v_actual - 1;
11862
11863
11864 pf->num_vmdq_msix = 1;
11865 pf->num_vmdq_vsis = 1;
11866 pf->num_vmdq_qps = 1;
11867
11868
11869 switch (vec) {
11870 case 2:
11871 pf->num_lan_msix = 1;
11872 break;
11873 case 3:
11874 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11875 pf->num_lan_msix = 1;
11876 pf->num_iwarp_msix = 1;
11877 } else {
11878 pf->num_lan_msix = 2;
11879 }
11880 break;
11881 default:
11882 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11883 pf->num_iwarp_msix = min_t(int, (vec / 3),
11884 iwarp_requested);
11885 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11886 I40E_DEFAULT_NUM_VMDQ_VSI);
11887 } else {
11888 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11889 I40E_DEFAULT_NUM_VMDQ_VSI);
11890 }
11891 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11892 pf->num_fdsb_msix = 1;
11893 vec--;
11894 }
11895 pf->num_lan_msix = min_t(int,
11896 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11897 pf->num_lan_msix);
11898 pf->num_lan_qps = pf->num_lan_msix;
11899 break;
11900 }
11901 }
11902
11903 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11904 (pf->num_fdsb_msix == 0)) {
11905 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11906 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11907 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11908 }
11909 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11910 (pf->num_vmdq_msix == 0)) {
11911 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11912 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11913 }
11914
11915 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11916 (pf->num_iwarp_msix == 0)) {
11917 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11918 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11919 }
11920 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11921 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11922 pf->num_lan_msix,
11923 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11924 pf->num_fdsb_msix,
11925 pf->num_iwarp_msix);
11926
11927 return v_actual;
11928 }
11929
11930
11931
11932
11933
11934
11935
11936
11937 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11938 {
11939 struct i40e_q_vector *q_vector;
11940
11941
11942 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11943 if (!q_vector)
11944 return -ENOMEM;
11945
11946 q_vector->vsi = vsi;
11947 q_vector->v_idx = v_idx;
11948 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11949
11950 if (vsi->netdev)
11951 netif_napi_add(vsi->netdev, &q_vector->napi,
11952 i40e_napi_poll, NAPI_POLL_WEIGHT);
11953
11954
11955 vsi->q_vectors[v_idx] = q_vector;
11956
11957 return 0;
11958 }
11959
11960
11961
11962
11963
11964
11965
11966
11967 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11968 {
11969 struct i40e_pf *pf = vsi->back;
11970 int err, v_idx, num_q_vectors;
11971
11972
11973 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11974 num_q_vectors = vsi->num_q_vectors;
11975 else if (vsi == pf->vsi[pf->lan_vsi])
11976 num_q_vectors = 1;
11977 else
11978 return -EINVAL;
11979
11980 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11981 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
11982 if (err)
11983 goto err_out;
11984 }
11985
11986 return 0;
11987
11988 err_out:
11989 while (v_idx--)
11990 i40e_free_q_vector(vsi, v_idx);
11991
11992 return err;
11993 }
11994
11995
11996
11997
11998
11999 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
12000 {
12001 int vectors = 0;
12002 ssize_t size;
12003
12004 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12005 vectors = i40e_init_msix(pf);
12006 if (vectors < 0) {
12007 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
12008 I40E_FLAG_IWARP_ENABLED |
12009 I40E_FLAG_RSS_ENABLED |
12010 I40E_FLAG_DCB_CAPABLE |
12011 I40E_FLAG_DCB_ENABLED |
12012 I40E_FLAG_SRIOV_ENABLED |
12013 I40E_FLAG_FD_SB_ENABLED |
12014 I40E_FLAG_FD_ATR_ENABLED |
12015 I40E_FLAG_VMDQ_ENABLED);
12016 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12017
12018
12019 i40e_determine_queue_usage(pf);
12020 }
12021 }
12022
12023 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
12024 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
12025 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
12026 vectors = pci_enable_msi(pf->pdev);
12027 if (vectors < 0) {
12028 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
12029 vectors);
12030 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
12031 }
12032 vectors = 1;
12033 }
12034
12035 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
12036 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
12037
12038
12039 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
12040 pf->irq_pile = kzalloc(size, GFP_KERNEL);
12041 if (!pf->irq_pile)
12042 return -ENOMEM;
12043
12044 pf->irq_pile->num_entries = vectors;
12045
12046
12047 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
12048
12049 return 0;
12050 }
12051
12052
12053
12054
12055
12056
12057
12058
12059
12060 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
12061 {
12062 int err, i;
12063
12064
12065
12066
12067
12068 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
12069
12070 err = i40e_init_interrupt_scheme(pf);
12071 if (err)
12072 return err;
12073
12074
12075
12076
12077 for (i = 0; i < pf->num_alloc_vsi; i++) {
12078 if (pf->vsi[i]) {
12079 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
12080 if (err)
12081 goto err_unwind;
12082 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
12083 }
12084 }
12085
12086 err = i40e_setup_misc_vector(pf);
12087 if (err)
12088 goto err_unwind;
12089
12090 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
12091 i40e_client_update_msix_info(pf);
12092
12093 return 0;
12094
12095 err_unwind:
12096 while (i--) {
12097 if (pf->vsi[i])
12098 i40e_vsi_free_q_vectors(pf->vsi[i]);
12099 }
12100
12101 return err;
12102 }
12103
12104
12105
12106
12107
12108
12109
12110
12111
12112
12113
12114 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
12115 {
12116 int err;
12117
12118 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12119 err = i40e_setup_misc_vector(pf);
12120
12121 if (err) {
12122 dev_info(&pf->pdev->dev,
12123 "MSI-X misc vector request failed, error %d\n",
12124 err);
12125 return err;
12126 }
12127 } else {
12128 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
12129
12130 err = request_irq(pf->pdev->irq, i40e_intr, flags,
12131 pf->int_name, pf);
12132
12133 if (err) {
12134 dev_info(&pf->pdev->dev,
12135 "MSI/legacy misc vector request failed, error %d\n",
12136 err);
12137 return err;
12138 }
12139 i40e_enable_misc_int_causes(pf);
12140 i40e_irq_dynamic_enable_icr0(pf);
12141 }
12142
12143 return 0;
12144 }
12145
12146
12147
12148
12149
12150
12151
12152
12153
12154 static int i40e_setup_misc_vector(struct i40e_pf *pf)
12155 {
12156 struct i40e_hw *hw = &pf->hw;
12157 int err = 0;
12158
12159
12160 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
12161 err = request_irq(pf->msix_entries[0].vector,
12162 i40e_intr, 0, pf->int_name, pf);
12163 if (err) {
12164 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
12165 dev_info(&pf->pdev->dev,
12166 "request_irq for %s failed: %d\n",
12167 pf->int_name, err);
12168 return -EFAULT;
12169 }
12170 }
12171
12172 i40e_enable_misc_int_causes(pf);
12173
12174
12175 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
12176 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
12177
12178 i40e_flush(hw);
12179
12180 i40e_irq_dynamic_enable_icr0(pf);
12181
12182 return err;
12183 }
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
12195 u8 *lut, u16 lut_size)
12196 {
12197 struct i40e_pf *pf = vsi->back;
12198 struct i40e_hw *hw = &pf->hw;
12199 int ret = 0;
12200
12201 if (seed) {
12202 ret = i40e_aq_get_rss_key(hw, vsi->id,
12203 (struct i40e_aqc_get_set_rss_key_data *)seed);
12204 if (ret) {
12205 dev_info(&pf->pdev->dev,
12206 "Cannot get RSS key, err %s aq_err %s\n",
12207 i40e_stat_str(&pf->hw, ret),
12208 i40e_aq_str(&pf->hw,
12209 pf->hw.aq.asq_last_status));
12210 return ret;
12211 }
12212 }
12213
12214 if (lut) {
12215 bool pf_lut = vsi->type == I40E_VSI_MAIN;
12216
12217 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
12218 if (ret) {
12219 dev_info(&pf->pdev->dev,
12220 "Cannot get RSS lut, err %s aq_err %s\n",
12221 i40e_stat_str(&pf->hw, ret),
12222 i40e_aq_str(&pf->hw,
12223 pf->hw.aq.asq_last_status));
12224 return ret;
12225 }
12226 }
12227
12228 return ret;
12229 }
12230
12231
12232
12233
12234
12235
12236
12237
12238
12239
12240 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
12241 const u8 *lut, u16 lut_size)
12242 {
12243 struct i40e_pf *pf = vsi->back;
12244 struct i40e_hw *hw = &pf->hw;
12245 u16 vf_id = vsi->vf_id;
12246 u8 i;
12247
12248
12249 if (seed) {
12250 u32 *seed_dw = (u32 *)seed;
12251
12252 if (vsi->type == I40E_VSI_MAIN) {
12253 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12254 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
12255 } else if (vsi->type == I40E_VSI_SRIOV) {
12256 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
12257 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
12258 } else {
12259 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
12260 }
12261 }
12262
12263 if (lut) {
12264 u32 *lut_dw = (u32 *)lut;
12265
12266 if (vsi->type == I40E_VSI_MAIN) {
12267 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12268 return -EINVAL;
12269 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12270 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
12271 } else if (vsi->type == I40E_VSI_SRIOV) {
12272 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
12273 return -EINVAL;
12274 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12275 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
12276 } else {
12277 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12278 }
12279 }
12280 i40e_flush(hw);
12281
12282 return 0;
12283 }
12284
12285
12286
12287
12288
12289
12290
12291
12292
12293
12294 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
12295 u8 *lut, u16 lut_size)
12296 {
12297 struct i40e_pf *pf = vsi->back;
12298 struct i40e_hw *hw = &pf->hw;
12299 u16 i;
12300
12301 if (seed) {
12302 u32 *seed_dw = (u32 *)seed;
12303
12304 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12305 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
12306 }
12307 if (lut) {
12308 u32 *lut_dw = (u32 *)lut;
12309
12310 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12311 return -EINVAL;
12312 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12313 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
12314 }
12315
12316 return 0;
12317 }
12318
12319
12320
12321
12322
12323
12324
12325
12326
12327
12328 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12329 {
12330 struct i40e_pf *pf = vsi->back;
12331
12332 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12333 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12334 else
12335 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12336 }
12337
12338
12339
12340
12341
12342
12343
12344
12345
12346
12347 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12348 {
12349 struct i40e_pf *pf = vsi->back;
12350
12351 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12352 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12353 else
12354 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12355 }
12356
12357
12358
12359
12360
12361
12362
12363
12364 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12365 u16 rss_table_size, u16 rss_size)
12366 {
12367 u16 i;
12368
12369 for (i = 0; i < rss_table_size; i++)
12370 lut[i] = i % rss_size;
12371 }
12372
12373
12374
12375
12376
12377 static int i40e_pf_config_rss(struct i40e_pf *pf)
12378 {
12379 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12380 u8 seed[I40E_HKEY_ARRAY_SIZE];
12381 u8 *lut;
12382 struct i40e_hw *hw = &pf->hw;
12383 u32 reg_val;
12384 u64 hena;
12385 int ret;
12386
12387
12388 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12389 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12390 hena |= i40e_pf_get_default_rss_hena(pf);
12391
12392 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12393 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12394
12395
12396 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12397 reg_val = (pf->rss_table_size == 512) ?
12398 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12399 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12400 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12401
12402
12403 if (!vsi->rss_size) {
12404 u16 qcount;
12405
12406
12407
12408
12409
12410 qcount = vsi->num_queue_pairs /
12411 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12412 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12413 }
12414 if (!vsi->rss_size)
12415 return -EINVAL;
12416
12417 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12418 if (!lut)
12419 return -ENOMEM;
12420
12421
12422 if (vsi->rss_lut_user)
12423 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12424 else
12425 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12426
12427
12428
12429
12430 if (vsi->rss_hkey_user)
12431 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12432 else
12433 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12434 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12435 kfree(lut);
12436
12437 return ret;
12438 }
12439
12440
12441
12442
12443
12444
12445
12446
12447
12448
12449 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12450 {
12451 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12452 int new_rss_size;
12453
12454 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
12455 return 0;
12456
12457 queue_count = min_t(int, queue_count, num_online_cpus());
12458 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12459
12460 if (queue_count != vsi->num_queue_pairs) {
12461 u16 qcount;
12462
12463 vsi->req_queue_pairs = queue_count;
12464 i40e_prep_for_reset(pf);
12465 if (test_bit(__I40E_IN_REMOVE, pf->state))
12466 return pf->alloc_rss_size;
12467
12468 pf->alloc_rss_size = new_rss_size;
12469
12470 i40e_reset_and_rebuild(pf, true, true);
12471
12472
12473
12474
12475 if (queue_count < vsi->rss_size) {
12476 i40e_clear_rss_config_user(vsi);
12477 dev_dbg(&pf->pdev->dev,
12478 "discard user configured hash keys and lut\n");
12479 }
12480
12481
12482 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12483 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12484
12485 i40e_pf_config_rss(pf);
12486 }
12487 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
12488 vsi->req_queue_pairs, pf->rss_size_max);
12489 return pf->alloc_rss_size;
12490 }
12491
12492
12493
12494
12495
12496 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
12497 {
12498 i40e_status status;
12499 bool min_valid, max_valid;
12500 u32 max_bw, min_bw;
12501
12502 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12503 &min_valid, &max_valid);
12504
12505 if (!status) {
12506 if (min_valid)
12507 pf->min_bw = min_bw;
12508 if (max_valid)
12509 pf->max_bw = max_bw;
12510 }
12511
12512 return status;
12513 }
12514
12515
12516
12517
12518
12519 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
12520 {
12521 struct i40e_aqc_configure_partition_bw_data bw_data;
12522 i40e_status status;
12523
12524 memset(&bw_data, 0, sizeof(bw_data));
12525
12526
12527 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12528 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12529 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12530
12531
12532 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12533
12534 return status;
12535 }
12536
12537
12538
12539
12540
12541 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12542 {
12543
12544 enum i40e_admin_queue_err last_aq_status;
12545 i40e_status ret;
12546 u16 nvm_word;
12547
12548 if (pf->hw.partition_id != 1) {
12549 dev_info(&pf->pdev->dev,
12550 "Commit BW only works on partition 1! This is partition %d",
12551 pf->hw.partition_id);
12552 ret = I40E_NOT_SUPPORTED;
12553 goto bw_commit_out;
12554 }
12555
12556
12557 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12558 last_aq_status = pf->hw.aq.asq_last_status;
12559 if (ret) {
12560 dev_info(&pf->pdev->dev,
12561 "Cannot acquire NVM for read access, err %s aq_err %s\n",
12562 i40e_stat_str(&pf->hw, ret),
12563 i40e_aq_str(&pf->hw, last_aq_status));
12564 goto bw_commit_out;
12565 }
12566
12567
12568 ret = i40e_aq_read_nvm(&pf->hw,
12569 I40E_SR_NVM_CONTROL_WORD,
12570 0x10, sizeof(nvm_word), &nvm_word,
12571 false, NULL);
12572
12573
12574
12575 last_aq_status = pf->hw.aq.asq_last_status;
12576 i40e_release_nvm(&pf->hw);
12577 if (ret) {
12578 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
12579 i40e_stat_str(&pf->hw, ret),
12580 i40e_aq_str(&pf->hw, last_aq_status));
12581 goto bw_commit_out;
12582 }
12583
12584
12585 msleep(50);
12586
12587
12588 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12589 last_aq_status = pf->hw.aq.asq_last_status;
12590 if (ret) {
12591 dev_info(&pf->pdev->dev,
12592 "Cannot acquire NVM for write access, err %s aq_err %s\n",
12593 i40e_stat_str(&pf->hw, ret),
12594 i40e_aq_str(&pf->hw, last_aq_status));
12595 goto bw_commit_out;
12596 }
12597
12598
12599
12600
12601 ret = i40e_aq_update_nvm(&pf->hw,
12602 I40E_SR_NVM_CONTROL_WORD,
12603 0x10, sizeof(nvm_word),
12604 &nvm_word, true, 0, NULL);
12605
12606
12607
12608 last_aq_status = pf->hw.aq.asq_last_status;
12609 i40e_release_nvm(&pf->hw);
12610 if (ret)
12611 dev_info(&pf->pdev->dev,
12612 "BW settings NOT SAVED, err %s aq_err %s\n",
12613 i40e_stat_str(&pf->hw, ret),
12614 i40e_aq_str(&pf->hw, last_aq_status));
12615 bw_commit_out:
12616
12617 return ret;
12618 }
12619
12620
12621
12622
12623
12624
12625 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12626 {
12627 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
12628 #define I40E_FEATURES_ENABLE_PTR 0x2A
12629 #define I40E_CURRENT_SETTING_PTR 0x2B
12630 #define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
12631 #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
12632 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
12633 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
12634 i40e_status read_status = I40E_SUCCESS;
12635 u16 sr_emp_sr_settings_ptr = 0;
12636 u16 features_enable = 0;
12637 u16 link_behavior = 0;
12638 bool ret = false;
12639
12640 read_status = i40e_read_nvm_word(&pf->hw,
12641 I40E_SR_EMP_SR_SETTINGS_PTR,
12642 &sr_emp_sr_settings_ptr);
12643 if (read_status)
12644 goto err_nvm;
12645 read_status = i40e_read_nvm_word(&pf->hw,
12646 sr_emp_sr_settings_ptr +
12647 I40E_FEATURES_ENABLE_PTR,
12648 &features_enable);
12649 if (read_status)
12650 goto err_nvm;
12651 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12652 read_status = i40e_read_nvm_module_data(&pf->hw,
12653 I40E_SR_EMP_SR_SETTINGS_PTR,
12654 I40E_CURRENT_SETTING_PTR,
12655 I40E_LINK_BEHAVIOR_WORD_OFFSET,
12656 I40E_LINK_BEHAVIOR_WORD_LENGTH,
12657 &link_behavior);
12658 if (read_status)
12659 goto err_nvm;
12660 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12661 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12662 }
12663 return ret;
12664
12665 err_nvm:
12666 dev_warn(&pf->pdev->dev,
12667 "total-port-shutdown feature is off due to read nvm error: %s\n",
12668 i40e_stat_str(&pf->hw, read_status));
12669 return ret;
12670 }
12671
12672
12673
12674
12675
12676
12677
12678
12679
12680 static int i40e_sw_init(struct i40e_pf *pf)
12681 {
12682 int err = 0;
12683 int size;
12684 u16 pow;
12685
12686
12687 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
12688 I40E_FLAG_MSI_ENABLED |
12689 I40E_FLAG_MSIX_ENABLED;
12690
12691
12692 pf->rx_itr_default = I40E_ITR_RX_DEF;
12693 pf->tx_itr_default = I40E_ITR_TX_DEF;
12694
12695
12696
12697
12698 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12699 pf->alloc_rss_size = 1;
12700 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12701 pf->rss_size_max = min_t(int, pf->rss_size_max,
12702 pf->hw.func_caps.num_tx_qp);
12703
12704
12705 pow = roundup_pow_of_two(num_online_cpus());
12706 pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12707
12708 if (pf->hw.func_caps.rss) {
12709 pf->flags |= I40E_FLAG_RSS_ENABLED;
12710 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12711 num_online_cpus());
12712 }
12713
12714
12715 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12716 pf->flags |= I40E_FLAG_MFP_ENABLED;
12717 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12718 if (i40e_get_partition_bw_setting(pf)) {
12719 dev_warn(&pf->pdev->dev,
12720 "Could not get partition bw settings\n");
12721 } else {
12722 dev_info(&pf->pdev->dev,
12723 "Partition BW Min = %8.8x, Max = %8.8x\n",
12724 pf->min_bw, pf->max_bw);
12725
12726
12727 i40e_set_partition_bw_setting(pf);
12728 }
12729 }
12730
12731 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12732 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12733 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
12734 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
12735 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
12736 pf->hw.num_partitions > 1)
12737 dev_info(&pf->pdev->dev,
12738 "Flow Director Sideband mode Disabled in MFP mode\n");
12739 else
12740 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12741 pf->fdir_pf_filter_count =
12742 pf->hw.func_caps.fd_filters_guaranteed;
12743 pf->hw.fdir_shared_filter_count =
12744 pf->hw.func_caps.fd_filters_best_effort;
12745 }
12746
12747 if (pf->hw.mac.type == I40E_MAC_X722) {
12748 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12749 I40E_HW_128_QP_RSS_CAPABLE |
12750 I40E_HW_ATR_EVICT_CAPABLE |
12751 I40E_HW_WB_ON_ITR_CAPABLE |
12752 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12753 I40E_HW_NO_PCI_LINK_CHECK |
12754 I40E_HW_USE_SET_LLDP_MIB |
12755 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12756 I40E_HW_PTP_L4_CAPABLE |
12757 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12758 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12759
12760 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12761 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12762 I40E_FDEVICT_PCTYPE_DEFAULT) {
12763 dev_warn(&pf->pdev->dev,
12764 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12765 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12766 }
12767 } else if ((pf->hw.aq.api_maj_ver > 1) ||
12768 ((pf->hw.aq.api_maj_ver == 1) &&
12769 (pf->hw.aq.api_min_ver > 4))) {
12770
12771 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12772 }
12773
12774
12775 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12776 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12777
12778 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12779 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12780 (pf->hw.aq.fw_maj_ver < 4))) {
12781 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12782
12783 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12784 }
12785
12786
12787 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12788 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12789 (pf->hw.aq.fw_maj_ver < 4)))
12790 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12791
12792
12793 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12794 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12795 (pf->hw.aq.fw_maj_ver >= 5)))
12796 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12797
12798
12799 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12800 pf->hw.aq.fw_maj_ver >= 6)
12801 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12802
12803 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12804 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12805 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12806 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12807 }
12808
12809 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12810 pf->flags |= I40E_FLAG_IWARP_ENABLED;
12811
12812 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12813 }
12814
12815
12816
12817
12818
12819 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12820 pf->hw.func_caps.npar_enable &&
12821 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12822 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12823
12824 #ifdef CONFIG_PCI_IOV
12825 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12826 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12827 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12828 pf->num_req_vfs = min_t(int,
12829 pf->hw.func_caps.num_vfs,
12830 I40E_MAX_VF_COUNT);
12831 }
12832 #endif
12833 pf->eeprom_version = 0xDEAD;
12834 pf->lan_veb = I40E_NO_VEB;
12835 pf->lan_vsi = I40E_NO_VSI;
12836
12837
12838 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12839
12840
12841 size = sizeof(struct i40e_lump_tracking)
12842 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12843 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12844 if (!pf->qp_pile) {
12845 err = -ENOMEM;
12846 goto sw_init_done;
12847 }
12848 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12849
12850 pf->tx_timeout_recovery_level = 1;
12851
12852 if (pf->hw.mac.type != I40E_MAC_X722 &&
12853 i40e_is_total_port_shutdown_enabled(pf)) {
12854
12855
12856
12857 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12858 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12859 dev_info(&pf->pdev->dev,
12860 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12861 }
12862 mutex_init(&pf->switch_mutex);
12863
12864 sw_init_done:
12865 return err;
12866 }
12867
12868
12869
12870
12871
12872
12873
12874
12875 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12876 {
12877 bool need_reset = false;
12878
12879
12880
12881
12882 if (features & NETIF_F_NTUPLE) {
12883
12884 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12885 need_reset = true;
12886
12887
12888
12889 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12890 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12891 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12892 }
12893 } else {
12894
12895 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12896 need_reset = true;
12897 i40e_fdir_filter_exit(pf);
12898 }
12899 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12900 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12901 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12902
12903
12904 pf->fd_add_err = 0;
12905 pf->fd_atr_cnt = 0;
12906
12907 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12908 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12909 (I40E_DEBUG_FD & pf->hw.debug_mask))
12910 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12911 }
12912 return need_reset;
12913 }
12914
12915
12916
12917
12918
12919 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12920 {
12921 struct i40e_pf *pf = vsi->back;
12922 struct i40e_hw *hw = &pf->hw;
12923 u16 vf_id = vsi->vf_id;
12924 u8 i;
12925
12926 if (vsi->type == I40E_VSI_MAIN) {
12927 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12928 wr32(hw, I40E_PFQF_HLUT(i), 0);
12929 } else if (vsi->type == I40E_VSI_SRIOV) {
12930 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12931 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12932 } else {
12933 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12934 }
12935 }
12936
12937
12938
12939
12940
12941
12942
12943 static int i40e_set_features(struct net_device *netdev,
12944 netdev_features_t features)
12945 {
12946 struct i40e_netdev_priv *np = netdev_priv(netdev);
12947 struct i40e_vsi *vsi = np->vsi;
12948 struct i40e_pf *pf = vsi->back;
12949 bool need_reset;
12950
12951 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12952 i40e_pf_config_rss(pf);
12953 else if (!(features & NETIF_F_RXHASH) &&
12954 netdev->features & NETIF_F_RXHASH)
12955 i40e_clear_rss_lut(vsi);
12956
12957 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12958 i40e_vlan_stripping_enable(vsi);
12959 else
12960 i40e_vlan_stripping_disable(vsi);
12961
12962 if (!(features & NETIF_F_HW_TC) &&
12963 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12964 dev_err(&pf->pdev->dev,
12965 "Offloaded tc filters active, can't turn hw_tc_offload off");
12966 return -EINVAL;
12967 }
12968
12969 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12970 i40e_del_all_macvlans(vsi);
12971
12972 need_reset = i40e_set_ntuple(pf, features);
12973
12974 if (need_reset)
12975 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12976
12977 return 0;
12978 }
12979
12980 static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12981 unsigned int table, unsigned int idx,
12982 struct udp_tunnel_info *ti)
12983 {
12984 struct i40e_netdev_priv *np = netdev_priv(netdev);
12985 struct i40e_hw *hw = &np->vsi->back->hw;
12986 u8 type, filter_index;
12987 i40e_status ret;
12988
12989 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12990 I40E_AQC_TUNNEL_TYPE_NGE;
12991
12992 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12993 NULL);
12994 if (ret) {
12995 netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
12996 i40e_stat_str(hw, ret),
12997 i40e_aq_str(hw, hw->aq.asq_last_status));
12998 return -EIO;
12999 }
13000
13001 udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
13002 return 0;
13003 }
13004
13005 static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
13006 unsigned int table, unsigned int idx,
13007 struct udp_tunnel_info *ti)
13008 {
13009 struct i40e_netdev_priv *np = netdev_priv(netdev);
13010 struct i40e_hw *hw = &np->vsi->back->hw;
13011 i40e_status ret;
13012
13013 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
13014 if (ret) {
13015 netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
13016 i40e_stat_str(hw, ret),
13017 i40e_aq_str(hw, hw->aq.asq_last_status));
13018 return -EIO;
13019 }
13020
13021 return 0;
13022 }
13023
13024 static int i40e_get_phys_port_id(struct net_device *netdev,
13025 struct netdev_phys_item_id *ppid)
13026 {
13027 struct i40e_netdev_priv *np = netdev_priv(netdev);
13028 struct i40e_pf *pf = np->vsi->back;
13029 struct i40e_hw *hw = &pf->hw;
13030
13031 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
13032 return -EOPNOTSUPP;
13033
13034 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
13035 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
13036
13037 return 0;
13038 }
13039
13040
13041
13042
13043
13044
13045
13046
13047
13048
13049
13050 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
13051 struct net_device *dev,
13052 const unsigned char *addr, u16 vid,
13053 u16 flags,
13054 struct netlink_ext_ack *extack)
13055 {
13056 struct i40e_netdev_priv *np = netdev_priv(dev);
13057 struct i40e_pf *pf = np->vsi->back;
13058 int err = 0;
13059
13060 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
13061 return -EOPNOTSUPP;
13062
13063 if (vid) {
13064 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
13065 return -EINVAL;
13066 }
13067
13068
13069
13070
13071 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
13072 netdev_info(dev, "FDB only supports static addresses\n");
13073 return -EINVAL;
13074 }
13075
13076 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
13077 err = dev_uc_add_excl(dev, addr);
13078 else if (is_multicast_ether_addr(addr))
13079 err = dev_mc_add_excl(dev, addr);
13080 else
13081 err = -EINVAL;
13082
13083
13084 if (err == -EEXIST && !(flags & NLM_F_EXCL))
13085 err = 0;
13086
13087 return err;
13088 }
13089
13090
13091
13092
13093
13094
13095
13096
13097
13098
13099
13100
13101
13102
13103
13104
13105
13106 static int i40e_ndo_bridge_setlink(struct net_device *dev,
13107 struct nlmsghdr *nlh,
13108 u16 flags,
13109 struct netlink_ext_ack *extack)
13110 {
13111 struct i40e_netdev_priv *np = netdev_priv(dev);
13112 struct i40e_vsi *vsi = np->vsi;
13113 struct i40e_pf *pf = vsi->back;
13114 struct i40e_veb *veb = NULL;
13115 struct nlattr *attr, *br_spec;
13116 int i, rem;
13117
13118
13119 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13120 return -EOPNOTSUPP;
13121
13122
13123 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13124 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13125 veb = pf->veb[i];
13126 }
13127
13128 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13129
13130 nla_for_each_nested(attr, br_spec, rem) {
13131 __u16 mode;
13132
13133 if (nla_type(attr) != IFLA_BRIDGE_MODE)
13134 continue;
13135
13136 mode = nla_get_u16(attr);
13137 if ((mode != BRIDGE_MODE_VEPA) &&
13138 (mode != BRIDGE_MODE_VEB))
13139 return -EINVAL;
13140
13141
13142 if (!veb) {
13143 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13144 vsi->tc_config.enabled_tc);
13145 if (veb) {
13146 veb->bridge_mode = mode;
13147 i40e_config_bridge_mode(veb);
13148 } else {
13149
13150 return -ENOENT;
13151 }
13152 break;
13153 } else if (mode != veb->bridge_mode) {
13154
13155 veb->bridge_mode = mode;
13156
13157 if (mode == BRIDGE_MODE_VEB)
13158 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
13159 else
13160 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13161 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
13162 break;
13163 }
13164 }
13165
13166 return 0;
13167 }
13168
13169
13170
13171
13172
13173
13174
13175
13176
13177
13178
13179
13180
13181 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13182 struct net_device *dev,
13183 u32 __always_unused filter_mask,
13184 int nlflags)
13185 {
13186 struct i40e_netdev_priv *np = netdev_priv(dev);
13187 struct i40e_vsi *vsi = np->vsi;
13188 struct i40e_pf *pf = vsi->back;
13189 struct i40e_veb *veb = NULL;
13190 int i;
13191
13192
13193 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13194 return -EOPNOTSUPP;
13195
13196
13197 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13198 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13199 veb = pf->veb[i];
13200 }
13201
13202 if (!veb)
13203 return 0;
13204
13205 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
13206 0, 0, nlflags, filter_mask, NULL);
13207 }
13208
13209
13210
13211
13212
13213
13214
13215 static netdev_features_t i40e_features_check(struct sk_buff *skb,
13216 struct net_device *dev,
13217 netdev_features_t features)
13218 {
13219 size_t len;
13220
13221
13222
13223
13224
13225 if (skb->ip_summed != CHECKSUM_PARTIAL)
13226 return features;
13227
13228
13229
13230
13231 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
13232 features &= ~NETIF_F_GSO_MASK;
13233
13234
13235 len = skb_network_header(skb) - skb->data;
13236 if (len & ~(63 * 2))
13237 goto out_err;
13238
13239
13240 len = skb_transport_header(skb) - skb_network_header(skb);
13241 if (len & ~(127 * 4))
13242 goto out_err;
13243
13244 if (skb->encapsulation) {
13245
13246 len = skb_inner_network_header(skb) - skb_transport_header(skb);
13247 if (len & ~(127 * 2))
13248 goto out_err;
13249
13250
13251 len = skb_inner_transport_header(skb) -
13252 skb_inner_network_header(skb);
13253 if (len & ~(127 * 4))
13254 goto out_err;
13255 }
13256
13257
13258
13259
13260
13261
13262 return features;
13263 out_err:
13264 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13265 }
13266
13267
13268
13269
13270
13271
13272
13273 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
13274 struct netlink_ext_ack *extack)
13275 {
13276 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
13277 struct i40e_pf *pf = vsi->back;
13278 struct bpf_prog *old_prog;
13279 bool need_reset;
13280 int i;
13281
13282
13283 if (frame_size > vsi->rx_buf_len) {
13284 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
13285 return -EINVAL;
13286 }
13287
13288
13289 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
13290
13291 if (need_reset)
13292 i40e_prep_for_reset(pf);
13293
13294
13295 if (test_bit(__I40E_IN_REMOVE, pf->state))
13296 return -EINVAL;
13297
13298 old_prog = xchg(&vsi->xdp_prog, prog);
13299
13300 if (need_reset) {
13301 if (!prog)
13302
13303 synchronize_rcu();
13304 i40e_reset_and_rebuild(pf, true, true);
13305 }
13306
13307 for (i = 0; i < vsi->num_queue_pairs; i++)
13308 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
13309
13310 if (old_prog)
13311 bpf_prog_put(old_prog);
13312
13313
13314
13315
13316 if (need_reset && prog)
13317 for (i = 0; i < vsi->num_queue_pairs; i++)
13318 if (vsi->xdp_rings[i]->xsk_pool)
13319 (void)i40e_xsk_wakeup(vsi->netdev, i,
13320 XDP_WAKEUP_RX);
13321
13322 return 0;
13323 }
13324
13325
13326
13327
13328
13329
13330
13331 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
13332 {
13333 struct i40e_pf *pf = vsi->back;
13334 int timeout = 50;
13335
13336 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13337 timeout--;
13338 if (!timeout)
13339 return -EBUSY;
13340 usleep_range(1000, 2000);
13341 }
13342
13343 return 0;
13344 }
13345
13346
13347
13348
13349
13350 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13351 {
13352 struct i40e_pf *pf = vsi->back;
13353
13354 clear_bit(__I40E_CONFIG_BUSY, pf->state);
13355 }
13356
13357
13358
13359
13360
13361
13362 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13363 {
13364 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13365 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13366 memset(&vsi->tx_rings[queue_pair]->stats, 0,
13367 sizeof(vsi->tx_rings[queue_pair]->stats));
13368 if (i40e_enabled_xdp_vsi(vsi)) {
13369 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13370 sizeof(vsi->xdp_rings[queue_pair]->stats));
13371 }
13372 }
13373
13374
13375
13376
13377
13378
13379 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13380 {
13381 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13382 if (i40e_enabled_xdp_vsi(vsi)) {
13383
13384
13385
13386 synchronize_rcu();
13387 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13388 }
13389 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13390 }
13391
13392
13393
13394
13395
13396
13397
13398 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13399 bool enable)
13400 {
13401 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13402 struct i40e_q_vector *q_vector = rxr->q_vector;
13403
13404 if (!vsi->netdev)
13405 return;
13406
13407
13408 if (q_vector->rx.ring || q_vector->tx.ring) {
13409 if (enable)
13410 napi_enable(&q_vector->napi);
13411 else
13412 napi_disable(&q_vector->napi);
13413 }
13414 }
13415
13416
13417
13418
13419
13420
13421
13422
13423
13424 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13425 bool enable)
13426 {
13427 struct i40e_pf *pf = vsi->back;
13428 int pf_q, ret = 0;
13429
13430 pf_q = vsi->base_queue + queue_pair;
13431 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13432 false , enable);
13433 if (ret) {
13434 dev_info(&pf->pdev->dev,
13435 "VSI seid %d Tx ring %d %sable timeout\n",
13436 vsi->seid, pf_q, (enable ? "en" : "dis"));
13437 return ret;
13438 }
13439
13440 i40e_control_rx_q(pf, pf_q, enable);
13441 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13442 if (ret) {
13443 dev_info(&pf->pdev->dev,
13444 "VSI seid %d Rx ring %d %sable timeout\n",
13445 vsi->seid, pf_q, (enable ? "en" : "dis"));
13446 return ret;
13447 }
13448
13449
13450
13451
13452 if (!enable)
13453 mdelay(50);
13454
13455 if (!i40e_enabled_xdp_vsi(vsi))
13456 return ret;
13457
13458 ret = i40e_control_wait_tx_q(vsi->seid, pf,
13459 pf_q + vsi->alloc_queue_pairs,
13460 true , enable);
13461 if (ret) {
13462 dev_info(&pf->pdev->dev,
13463 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13464 vsi->seid, pf_q, (enable ? "en" : "dis"));
13465 }
13466
13467 return ret;
13468 }
13469
13470
13471
13472
13473
13474
13475 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13476 {
13477 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13478 struct i40e_pf *pf = vsi->back;
13479 struct i40e_hw *hw = &pf->hw;
13480
13481
13482 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
13483 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13484 else
13485 i40e_irq_dynamic_enable_icr0(pf);
13486
13487 i40e_flush(hw);
13488 }
13489
13490
13491
13492
13493
13494
13495 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13496 {
13497 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13498 struct i40e_pf *pf = vsi->back;
13499 struct i40e_hw *hw = &pf->hw;
13500
13501
13502
13503
13504
13505
13506
13507 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13508 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13509
13510 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13511 i40e_flush(hw);
13512 synchronize_irq(pf->msix_entries[intpf].vector);
13513 } else {
13514
13515 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13516 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13517 i40e_flush(hw);
13518 synchronize_irq(pf->pdev->irq);
13519 }
13520 }
13521
13522
13523
13524
13525
13526
13527
13528
13529 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13530 {
13531 int err;
13532
13533 err = i40e_enter_busy_conf(vsi);
13534 if (err)
13535 return err;
13536
13537 i40e_queue_pair_disable_irq(vsi, queue_pair);
13538 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false );
13539 i40e_queue_pair_toggle_napi(vsi, queue_pair, false );
13540 i40e_queue_pair_clean_rings(vsi, queue_pair);
13541 i40e_queue_pair_reset_stats(vsi, queue_pair);
13542
13543 return err;
13544 }
13545
13546
13547
13548
13549
13550
13551
13552
13553 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13554 {
13555 int err;
13556
13557 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13558 if (err)
13559 return err;
13560
13561 if (i40e_enabled_xdp_vsi(vsi)) {
13562 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13563 if (err)
13564 return err;
13565 }
13566
13567 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13568 if (err)
13569 return err;
13570
13571 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true );
13572 i40e_queue_pair_toggle_napi(vsi, queue_pair, true );
13573 i40e_queue_pair_enable_irq(vsi, queue_pair);
13574
13575 i40e_exit_busy_conf(vsi);
13576
13577 return err;
13578 }
13579
13580
13581
13582
13583
13584
13585 static int i40e_xdp(struct net_device *dev,
13586 struct netdev_bpf *xdp)
13587 {
13588 struct i40e_netdev_priv *np = netdev_priv(dev);
13589 struct i40e_vsi *vsi = np->vsi;
13590
13591 if (vsi->type != I40E_VSI_MAIN)
13592 return -EINVAL;
13593
13594 switch (xdp->command) {
13595 case XDP_SETUP_PROG:
13596 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13597 case XDP_SETUP_XSK_POOL:
13598 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13599 xdp->xsk.queue_id);
13600 default:
13601 return -EINVAL;
13602 }
13603 }
13604
13605 static const struct net_device_ops i40e_netdev_ops = {
13606 .ndo_open = i40e_open,
13607 .ndo_stop = i40e_close,
13608 .ndo_start_xmit = i40e_lan_xmit_frame,
13609 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
13610 .ndo_set_rx_mode = i40e_set_rx_mode,
13611 .ndo_validate_addr = eth_validate_addr,
13612 .ndo_set_mac_address = i40e_set_mac,
13613 .ndo_change_mtu = i40e_change_mtu,
13614 .ndo_eth_ioctl = i40e_ioctl,
13615 .ndo_tx_timeout = i40e_tx_timeout,
13616 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
13617 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
13618 #ifdef CONFIG_NET_POLL_CONTROLLER
13619 .ndo_poll_controller = i40e_netpoll,
13620 #endif
13621 .ndo_setup_tc = __i40e_setup_tc,
13622 .ndo_select_queue = i40e_lan_select_queue,
13623 .ndo_set_features = i40e_set_features,
13624 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
13625 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
13626 .ndo_get_vf_stats = i40e_get_vf_stats,
13627 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
13628 .ndo_get_vf_config = i40e_ndo_get_vf_config,
13629 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
13630 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
13631 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
13632 .ndo_get_phys_port_id = i40e_get_phys_port_id,
13633 .ndo_fdb_add = i40e_ndo_fdb_add,
13634 .ndo_features_check = i40e_features_check,
13635 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
13636 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
13637 .ndo_bpf = i40e_xdp,
13638 .ndo_xdp_xmit = i40e_xdp_xmit,
13639 .ndo_xsk_wakeup = i40e_xsk_wakeup,
13640 .ndo_dfwd_add_station = i40e_fwd_add,
13641 .ndo_dfwd_del_station = i40e_fwd_del,
13642 };
13643
13644
13645
13646
13647
13648
13649
13650 static int i40e_config_netdev(struct i40e_vsi *vsi)
13651 {
13652 struct i40e_pf *pf = vsi->back;
13653 struct i40e_hw *hw = &pf->hw;
13654 struct i40e_netdev_priv *np;
13655 struct net_device *netdev;
13656 u8 broadcast[ETH_ALEN];
13657 u8 mac_addr[ETH_ALEN];
13658 int etherdev_size;
13659 netdev_features_t hw_enc_features;
13660 netdev_features_t hw_features;
13661
13662 etherdev_size = sizeof(struct i40e_netdev_priv);
13663 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13664 if (!netdev)
13665 return -ENOMEM;
13666
13667 vsi->netdev = netdev;
13668 np = netdev_priv(netdev);
13669 np->vsi = vsi;
13670
13671 hw_enc_features = NETIF_F_SG |
13672 NETIF_F_HW_CSUM |
13673 NETIF_F_HIGHDMA |
13674 NETIF_F_SOFT_FEATURES |
13675 NETIF_F_TSO |
13676 NETIF_F_TSO_ECN |
13677 NETIF_F_TSO6 |
13678 NETIF_F_GSO_GRE |
13679 NETIF_F_GSO_GRE_CSUM |
13680 NETIF_F_GSO_PARTIAL |
13681 NETIF_F_GSO_IPXIP4 |
13682 NETIF_F_GSO_IPXIP6 |
13683 NETIF_F_GSO_UDP_TUNNEL |
13684 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13685 NETIF_F_GSO_UDP_L4 |
13686 NETIF_F_SCTP_CRC |
13687 NETIF_F_RXHASH |
13688 NETIF_F_RXCSUM |
13689 0;
13690
13691 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13692 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13693
13694 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13695
13696 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13697
13698 netdev->hw_enc_features |= hw_enc_features;
13699
13700
13701 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13702
13703 #define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
13704 NETIF_F_GSO_GRE_CSUM | \
13705 NETIF_F_GSO_IPXIP4 | \
13706 NETIF_F_GSO_IPXIP6 | \
13707 NETIF_F_GSO_UDP_TUNNEL | \
13708 NETIF_F_GSO_UDP_TUNNEL_CSUM)
13709
13710 netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
13711 netdev->features |= NETIF_F_GSO_PARTIAL |
13712 I40E_GSO_PARTIAL_FEATURES;
13713
13714 netdev->mpls_features |= NETIF_F_SG;
13715 netdev->mpls_features |= NETIF_F_HW_CSUM;
13716 netdev->mpls_features |= NETIF_F_TSO;
13717 netdev->mpls_features |= NETIF_F_TSO6;
13718 netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
13719
13720
13721 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13722
13723 hw_features = hw_enc_features |
13724 NETIF_F_HW_VLAN_CTAG_TX |
13725 NETIF_F_HW_VLAN_CTAG_RX;
13726
13727 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13728 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13729
13730 netdev->hw_features |= hw_features;
13731
13732 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13733 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13734
13735 netdev->features &= ~NETIF_F_HW_TC;
13736
13737 if (vsi->type == I40E_VSI_MAIN) {
13738 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13739 ether_addr_copy(mac_addr, hw->mac.perm_addr);
13740
13741
13742
13743
13744
13745
13746
13747
13748
13749
13750 i40e_rm_default_mac_filter(vsi, mac_addr);
13751 spin_lock_bh(&vsi->mac_filter_hash_lock);
13752 i40e_add_mac_filter(vsi, mac_addr);
13753 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13754 } else {
13755
13756
13757
13758
13759
13760 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13761 IFNAMSIZ - 4,
13762 pf->vsi[pf->lan_vsi]->netdev->name);
13763 eth_random_addr(mac_addr);
13764
13765 spin_lock_bh(&vsi->mac_filter_hash_lock);
13766 i40e_add_mac_filter(vsi, mac_addr);
13767 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13768 }
13769
13770
13771
13772
13773
13774
13775
13776
13777
13778
13779
13780
13781
13782
13783 eth_broadcast_addr(broadcast);
13784 spin_lock_bh(&vsi->mac_filter_hash_lock);
13785 i40e_add_mac_filter(vsi, broadcast);
13786 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13787
13788 eth_hw_addr_set(netdev, mac_addr);
13789 ether_addr_copy(netdev->perm_addr, mac_addr);
13790
13791
13792 netdev->neigh_priv_len = sizeof(u32) * 4;
13793
13794 netdev->priv_flags |= IFF_UNICAST_FLT;
13795 netdev->priv_flags |= IFF_SUPP_NOFCS;
13796
13797 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13798
13799 netdev->netdev_ops = &i40e_netdev_ops;
13800 netdev->watchdog_timeo = 5 * HZ;
13801 i40e_set_ethtool_ops(netdev);
13802
13803
13804 netdev->min_mtu = ETH_MIN_MTU;
13805 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13806
13807 return 0;
13808 }
13809
13810
13811
13812
13813
13814
13815
13816 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13817 {
13818
13819 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13820 return;
13821
13822 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13823 }
13824
13825
13826
13827
13828
13829
13830
13831 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13832 {
13833 struct i40e_veb *veb;
13834 struct i40e_pf *pf = vsi->back;
13835
13836
13837 if (vsi->veb_idx >= I40E_MAX_VEB)
13838 return 1;
13839
13840 veb = pf->veb[vsi->veb_idx];
13841 if (!veb) {
13842 dev_info(&pf->pdev->dev,
13843 "There is no veb associated with the bridge\n");
13844 return -ENOENT;
13845 }
13846
13847
13848 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13849 return 0;
13850 } else {
13851
13852 return 1;
13853 }
13854
13855
13856 return 0;
13857 }
13858
13859
13860
13861
13862
13863
13864
13865
13866 static int i40e_add_vsi(struct i40e_vsi *vsi)
13867 {
13868 int ret = -ENODEV;
13869 struct i40e_pf *pf = vsi->back;
13870 struct i40e_hw *hw = &pf->hw;
13871 struct i40e_vsi_context ctxt;
13872 struct i40e_mac_filter *f;
13873 struct hlist_node *h;
13874 int bkt;
13875
13876 u8 enabled_tc = 0x1;
13877 int f_count = 0;
13878
13879 memset(&ctxt, 0, sizeof(ctxt));
13880 switch (vsi->type) {
13881 case I40E_VSI_MAIN:
13882
13883
13884
13885
13886
13887 ctxt.seid = pf->main_vsi_seid;
13888 ctxt.pf_num = pf->hw.pf_id;
13889 ctxt.vf_num = 0;
13890 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13891 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13892 if (ret) {
13893 dev_info(&pf->pdev->dev,
13894 "couldn't get PF vsi config, err %s aq_err %s\n",
13895 i40e_stat_str(&pf->hw, ret),
13896 i40e_aq_str(&pf->hw,
13897 pf->hw.aq.asq_last_status));
13898 return -ENOENT;
13899 }
13900 vsi->info = ctxt.info;
13901 vsi->info.valid_sections = 0;
13902
13903 vsi->seid = ctxt.seid;
13904 vsi->id = ctxt.vsi_number;
13905
13906 enabled_tc = i40e_pf_get_tc_map(pf);
13907
13908
13909
13910
13911
13912 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13913 memset(&ctxt, 0, sizeof(ctxt));
13914 ctxt.seid = pf->main_vsi_seid;
13915 ctxt.pf_num = pf->hw.pf_id;
13916 ctxt.vf_num = 0;
13917 ctxt.info.valid_sections |=
13918 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13919 ctxt.info.switch_id =
13920 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13921 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13922 if (ret) {
13923 dev_info(&pf->pdev->dev,
13924 "update vsi failed, err %s aq_err %s\n",
13925 i40e_stat_str(&pf->hw, ret),
13926 i40e_aq_str(&pf->hw,
13927 pf->hw.aq.asq_last_status));
13928 ret = -ENOENT;
13929 goto err;
13930 }
13931 }
13932
13933
13934 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13935 !(pf->hw.func_caps.iscsi)) {
13936 memset(&ctxt, 0, sizeof(ctxt));
13937 ctxt.seid = pf->main_vsi_seid;
13938 ctxt.pf_num = pf->hw.pf_id;
13939 ctxt.vf_num = 0;
13940 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13941 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13942 if (ret) {
13943 dev_info(&pf->pdev->dev,
13944 "update vsi failed, err %s aq_err %s\n",
13945 i40e_stat_str(&pf->hw, ret),
13946 i40e_aq_str(&pf->hw,
13947 pf->hw.aq.asq_last_status));
13948 ret = -ENOENT;
13949 goto err;
13950 }
13951
13952 i40e_vsi_update_queue_map(vsi, &ctxt);
13953 vsi->info.valid_sections = 0;
13954 } else {
13955
13956
13957
13958
13959
13960
13961 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13962 if (ret) {
13963
13964
13965
13966 dev_info(&pf->pdev->dev,
13967 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13968 enabled_tc,
13969 i40e_stat_str(&pf->hw, ret),
13970 i40e_aq_str(&pf->hw,
13971 pf->hw.aq.asq_last_status));
13972 }
13973 }
13974 break;
13975
13976 case I40E_VSI_FDIR:
13977 ctxt.pf_num = hw->pf_id;
13978 ctxt.vf_num = 0;
13979 ctxt.uplink_seid = vsi->uplink_seid;
13980 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13981 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13982 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13983 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13984 ctxt.info.valid_sections |=
13985 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13986 ctxt.info.switch_id =
13987 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13988 }
13989 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13990 break;
13991
13992 case I40E_VSI_VMDQ2:
13993 ctxt.pf_num = hw->pf_id;
13994 ctxt.vf_num = 0;
13995 ctxt.uplink_seid = vsi->uplink_seid;
13996 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13997 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13998
13999
14000
14001
14002 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
14003 ctxt.info.valid_sections |=
14004 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14005 ctxt.info.switch_id =
14006 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14007 }
14008
14009
14010 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14011 break;
14012
14013 case I40E_VSI_SRIOV:
14014 ctxt.pf_num = hw->pf_id;
14015 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
14016 ctxt.uplink_seid = vsi->uplink_seid;
14017 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14018 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
14019
14020
14021
14022
14023 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
14024 ctxt.info.valid_sections |=
14025 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14026 ctxt.info.switch_id =
14027 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14028 }
14029
14030 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
14031 ctxt.info.valid_sections |=
14032 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
14033 ctxt.info.queueing_opt_flags |=
14034 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
14035 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
14036 }
14037
14038 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
14039 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
14040 if (pf->vf[vsi->vf_id].spoofchk) {
14041 ctxt.info.valid_sections |=
14042 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
14043 ctxt.info.sec_flags |=
14044 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
14045 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
14046 }
14047
14048 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14049 break;
14050
14051 case I40E_VSI_IWARP:
14052
14053 break;
14054
14055 default:
14056 return -ENODEV;
14057 }
14058
14059 if (vsi->type != I40E_VSI_MAIN) {
14060 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
14061 if (ret) {
14062 dev_info(&vsi->back->pdev->dev,
14063 "add vsi failed, err %s aq_err %s\n",
14064 i40e_stat_str(&pf->hw, ret),
14065 i40e_aq_str(&pf->hw,
14066 pf->hw.aq.asq_last_status));
14067 ret = -ENOENT;
14068 goto err;
14069 }
14070 vsi->info = ctxt.info;
14071 vsi->info.valid_sections = 0;
14072 vsi->seid = ctxt.seid;
14073 vsi->id = ctxt.vsi_number;
14074 }
14075
14076 vsi->active_filters = 0;
14077 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
14078 spin_lock_bh(&vsi->mac_filter_hash_lock);
14079
14080 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
14081 f->state = I40E_FILTER_NEW;
14082 f_count++;
14083 }
14084 spin_unlock_bh(&vsi->mac_filter_hash_lock);
14085
14086 if (f_count) {
14087 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
14088 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
14089 }
14090
14091
14092 ret = i40e_vsi_get_bw_info(vsi);
14093 if (ret) {
14094 dev_info(&pf->pdev->dev,
14095 "couldn't get vsi bw info, err %s aq_err %s\n",
14096 i40e_stat_str(&pf->hw, ret),
14097 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14098
14099 ret = 0;
14100 }
14101
14102 err:
14103 return ret;
14104 }
14105
14106
14107
14108
14109
14110
14111
14112 int i40e_vsi_release(struct i40e_vsi *vsi)
14113 {
14114 struct i40e_mac_filter *f;
14115 struct hlist_node *h;
14116 struct i40e_veb *veb = NULL;
14117 struct i40e_pf *pf;
14118 u16 uplink_seid;
14119 int i, n, bkt;
14120
14121 pf = vsi->back;
14122
14123
14124 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
14125 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
14126 vsi->seid, vsi->uplink_seid);
14127 return -ENODEV;
14128 }
14129 if (vsi == pf->vsi[pf->lan_vsi] &&
14130 !test_bit(__I40E_DOWN, pf->state)) {
14131 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
14132 return -ENODEV;
14133 }
14134 set_bit(__I40E_VSI_RELEASING, vsi->state);
14135 uplink_seid = vsi->uplink_seid;
14136 if (vsi->type != I40E_VSI_SRIOV) {
14137 if (vsi->netdev_registered) {
14138 vsi->netdev_registered = false;
14139 if (vsi->netdev) {
14140
14141 unregister_netdev(vsi->netdev);
14142 }
14143 } else {
14144 i40e_vsi_close(vsi);
14145 }
14146 i40e_vsi_disable_irq(vsi);
14147 }
14148
14149 spin_lock_bh(&vsi->mac_filter_hash_lock);
14150
14151
14152 if (vsi->netdev) {
14153 __dev_uc_unsync(vsi->netdev, NULL);
14154 __dev_mc_unsync(vsi->netdev, NULL);
14155 }
14156
14157
14158 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
14159 __i40e_del_filter(vsi, f);
14160
14161 spin_unlock_bh(&vsi->mac_filter_hash_lock);
14162
14163 i40e_sync_vsi_filters(vsi);
14164
14165 i40e_vsi_delete(vsi);
14166 i40e_vsi_free_q_vectors(vsi);
14167 if (vsi->netdev) {
14168 free_netdev(vsi->netdev);
14169 vsi->netdev = NULL;
14170 }
14171 i40e_vsi_clear_rings(vsi);
14172 i40e_vsi_clear(vsi);
14173
14174
14175
14176
14177
14178
14179
14180
14181
14182 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
14183 if (pf->vsi[i] &&
14184 pf->vsi[i]->uplink_seid == uplink_seid &&
14185 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14186 n++;
14187 }
14188 }
14189 for (i = 0; i < I40E_MAX_VEB; i++) {
14190 if (!pf->veb[i])
14191 continue;
14192 if (pf->veb[i]->uplink_seid == uplink_seid)
14193 n++;
14194 if (pf->veb[i]->seid == uplink_seid)
14195 veb = pf->veb[i];
14196 }
14197 if (n == 0 && veb && veb->uplink_seid != 0)
14198 i40e_veb_release(veb);
14199
14200 return 0;
14201 }
14202
14203
14204
14205
14206
14207
14208
14209
14210
14211
14212
14213 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
14214 {
14215 int ret = -ENOENT;
14216 struct i40e_pf *pf = vsi->back;
14217
14218 if (vsi->q_vectors[0]) {
14219 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
14220 vsi->seid);
14221 return -EEXIST;
14222 }
14223
14224 if (vsi->base_vector) {
14225 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
14226 vsi->seid, vsi->base_vector);
14227 return -EEXIST;
14228 }
14229
14230 ret = i40e_vsi_alloc_q_vectors(vsi);
14231 if (ret) {
14232 dev_info(&pf->pdev->dev,
14233 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
14234 vsi->num_q_vectors, vsi->seid, ret);
14235 vsi->num_q_vectors = 0;
14236 goto vector_setup_out;
14237 }
14238
14239
14240
14241
14242 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
14243 return ret;
14244 if (vsi->num_q_vectors)
14245 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
14246 vsi->num_q_vectors, vsi->idx);
14247 if (vsi->base_vector < 0) {
14248 dev_info(&pf->pdev->dev,
14249 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
14250 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
14251 i40e_vsi_free_q_vectors(vsi);
14252 ret = -ENOENT;
14253 goto vector_setup_out;
14254 }
14255
14256 vector_setup_out:
14257 return ret;
14258 }
14259
14260
14261
14262
14263
14264
14265
14266
14267
14268
14269 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
14270 {
14271 u16 alloc_queue_pairs;
14272 struct i40e_pf *pf;
14273 u8 enabled_tc;
14274 int ret;
14275
14276 if (!vsi)
14277 return NULL;
14278
14279 pf = vsi->back;
14280
14281 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
14282 i40e_vsi_clear_rings(vsi);
14283
14284 i40e_vsi_free_arrays(vsi, false);
14285 i40e_set_num_rings_in_vsi(vsi);
14286 ret = i40e_vsi_alloc_arrays(vsi, false);
14287 if (ret)
14288 goto err_vsi;
14289
14290 alloc_queue_pairs = vsi->alloc_queue_pairs *
14291 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14292
14293 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14294 if (ret < 0) {
14295 dev_info(&pf->pdev->dev,
14296 "failed to get tracking for %d queues for VSI %d err %d\n",
14297 alloc_queue_pairs, vsi->seid, ret);
14298 goto err_vsi;
14299 }
14300 vsi->base_queue = ret;
14301
14302
14303
14304
14305 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14306 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14307 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14308 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14309 if (vsi->type == I40E_VSI_MAIN)
14310 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
14311
14312
14313 ret = i40e_alloc_rings(vsi);
14314 if (ret)
14315 goto err_rings;
14316
14317
14318 i40e_vsi_map_rings_to_vectors(vsi);
14319 return vsi;
14320
14321 err_rings:
14322 i40e_vsi_free_q_vectors(vsi);
14323 if (vsi->netdev_registered) {
14324 vsi->netdev_registered = false;
14325 unregister_netdev(vsi->netdev);
14326 free_netdev(vsi->netdev);
14327 vsi->netdev = NULL;
14328 }
14329 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14330 err_vsi:
14331 i40e_vsi_clear(vsi);
14332 return NULL;
14333 }
14334
14335
14336
14337
14338
14339
14340
14341
14342
14343
14344
14345
14346
14347
14348 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
14349 u16 uplink_seid, u32 param1)
14350 {
14351 struct i40e_vsi *vsi = NULL;
14352 struct i40e_veb *veb = NULL;
14353 u16 alloc_queue_pairs;
14354 int ret, i;
14355 int v_idx;
14356
14357
14358
14359
14360
14361
14362
14363
14364
14365
14366
14367
14368
14369
14370 for (i = 0; i < I40E_MAX_VEB; i++) {
14371 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
14372 veb = pf->veb[i];
14373 break;
14374 }
14375 }
14376
14377 if (!veb && uplink_seid != pf->mac_seid) {
14378
14379 for (i = 0; i < pf->num_alloc_vsi; i++) {
14380 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14381 vsi = pf->vsi[i];
14382 break;
14383 }
14384 }
14385 if (!vsi) {
14386 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14387 uplink_seid);
14388 return NULL;
14389 }
14390
14391 if (vsi->uplink_seid == pf->mac_seid)
14392 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14393 vsi->tc_config.enabled_tc);
14394 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14395 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14396 vsi->tc_config.enabled_tc);
14397 if (veb) {
14398 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14399 dev_info(&vsi->back->pdev->dev,
14400 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14401 return NULL;
14402 }
14403
14404
14405
14406
14407 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
14408 veb->bridge_mode = BRIDGE_MODE_VEPA;
14409 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
14410 }
14411 i40e_config_bridge_mode(veb);
14412 }
14413 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14414 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14415 veb = pf->veb[i];
14416 }
14417 if (!veb) {
14418 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14419 return NULL;
14420 }
14421
14422 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14423 uplink_seid = veb->seid;
14424 }
14425
14426
14427 v_idx = i40e_vsi_mem_alloc(pf, type);
14428 if (v_idx < 0)
14429 goto err_alloc;
14430 vsi = pf->vsi[v_idx];
14431 if (!vsi)
14432 goto err_alloc;
14433 vsi->type = type;
14434 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14435
14436 if (type == I40E_VSI_MAIN)
14437 pf->lan_vsi = v_idx;
14438 else if (type == I40E_VSI_SRIOV)
14439 vsi->vf_id = param1;
14440
14441 alloc_queue_pairs = vsi->alloc_queue_pairs *
14442 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14443
14444 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14445 if (ret < 0) {
14446 dev_info(&pf->pdev->dev,
14447 "failed to get tracking for %d queues for VSI %d err=%d\n",
14448 alloc_queue_pairs, vsi->seid, ret);
14449 goto err_vsi;
14450 }
14451 vsi->base_queue = ret;
14452
14453
14454 vsi->uplink_seid = uplink_seid;
14455 ret = i40e_add_vsi(vsi);
14456 if (ret)
14457 goto err_vsi;
14458
14459 switch (vsi->type) {
14460
14461 case I40E_VSI_MAIN:
14462 case I40E_VSI_VMDQ2:
14463 ret = i40e_config_netdev(vsi);
14464 if (ret)
14465 goto err_netdev;
14466 ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
14467 if (ret)
14468 goto err_netdev;
14469 ret = register_netdev(vsi->netdev);
14470 if (ret)
14471 goto err_netdev;
14472 vsi->netdev_registered = true;
14473 netif_carrier_off(vsi->netdev);
14474 #ifdef CONFIG_I40E_DCB
14475
14476 i40e_dcbnl_setup(vsi);
14477 #endif
14478 fallthrough;
14479 case I40E_VSI_FDIR:
14480
14481 ret = i40e_vsi_setup_vectors(vsi);
14482 if (ret)
14483 goto err_msix;
14484
14485 ret = i40e_alloc_rings(vsi);
14486 if (ret)
14487 goto err_rings;
14488
14489
14490 i40e_vsi_map_rings_to_vectors(vsi);
14491
14492 i40e_vsi_reset_stats(vsi);
14493 break;
14494 default:
14495
14496 break;
14497 }
14498
14499 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
14500 (vsi->type == I40E_VSI_VMDQ2)) {
14501 ret = i40e_vsi_config_rss(vsi);
14502 }
14503 return vsi;
14504
14505 err_rings:
14506 i40e_vsi_free_q_vectors(vsi);
14507 err_msix:
14508 if (vsi->netdev_registered) {
14509 vsi->netdev_registered = false;
14510 unregister_netdev(vsi->netdev);
14511 free_netdev(vsi->netdev);
14512 vsi->netdev = NULL;
14513 }
14514 err_netdev:
14515 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14516 err_vsi:
14517 i40e_vsi_clear(vsi);
14518 err_alloc:
14519 return NULL;
14520 }
14521
14522
14523
14524
14525
14526
14527
14528 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14529 {
14530 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14531 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14532 struct i40e_pf *pf = veb->pf;
14533 struct i40e_hw *hw = &pf->hw;
14534 u32 tc_bw_max;
14535 int ret = 0;
14536 int i;
14537
14538 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14539 &bw_data, NULL);
14540 if (ret) {
14541 dev_info(&pf->pdev->dev,
14542 "query veb bw config failed, err %s aq_err %s\n",
14543 i40e_stat_str(&pf->hw, ret),
14544 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14545 goto out;
14546 }
14547
14548 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14549 &ets_data, NULL);
14550 if (ret) {
14551 dev_info(&pf->pdev->dev,
14552 "query veb bw ets config failed, err %s aq_err %s\n",
14553 i40e_stat_str(&pf->hw, ret),
14554 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14555 goto out;
14556 }
14557
14558 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14559 veb->bw_max_quanta = ets_data.tc_bw_max;
14560 veb->is_abs_credits = bw_data.absolute_credits_enable;
14561 veb->enabled_tc = ets_data.tc_valid_bits;
14562 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14563 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14564 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14565 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14566 veb->bw_tc_limit_credits[i] =
14567 le16_to_cpu(bw_data.tc_bw_limits[i]);
14568 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14569 }
14570
14571 out:
14572 return ret;
14573 }
14574
14575
14576
14577
14578
14579
14580
14581
14582 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14583 {
14584 int ret = -ENOENT;
14585 struct i40e_veb *veb;
14586 int i;
14587
14588
14589 mutex_lock(&pf->switch_mutex);
14590
14591
14592
14593
14594
14595
14596
14597 i = 0;
14598 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14599 i++;
14600 if (i >= I40E_MAX_VEB) {
14601 ret = -ENOMEM;
14602 goto err_alloc_veb;
14603 }
14604
14605 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14606 if (!veb) {
14607 ret = -ENOMEM;
14608 goto err_alloc_veb;
14609 }
14610 veb->pf = pf;
14611 veb->idx = i;
14612 veb->enabled_tc = 1;
14613
14614 pf->veb[i] = veb;
14615 ret = i;
14616 err_alloc_veb:
14617 mutex_unlock(&pf->switch_mutex);
14618 return ret;
14619 }
14620
14621
14622
14623
14624
14625
14626
14627
14628 static void i40e_switch_branch_release(struct i40e_veb *branch)
14629 {
14630 struct i40e_pf *pf = branch->pf;
14631 u16 branch_seid = branch->seid;
14632 u16 veb_idx = branch->idx;
14633 int i;
14634
14635
14636 for (i = 0; i < I40E_MAX_VEB; i++) {
14637 if (!pf->veb[i])
14638 continue;
14639 if (pf->veb[i]->uplink_seid == branch->seid)
14640 i40e_switch_branch_release(pf->veb[i]);
14641 }
14642
14643
14644
14645
14646
14647
14648 for (i = 0; i < pf->num_alloc_vsi; i++) {
14649 if (!pf->vsi[i])
14650 continue;
14651 if (pf->vsi[i]->uplink_seid == branch_seid &&
14652 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14653 i40e_vsi_release(pf->vsi[i]);
14654 }
14655 }
14656
14657
14658
14659
14660
14661
14662 if (pf->veb[veb_idx])
14663 i40e_veb_release(pf->veb[veb_idx]);
14664 }
14665
14666
14667
14668
14669
14670 static void i40e_veb_clear(struct i40e_veb *veb)
14671 {
14672 if (!veb)
14673 return;
14674
14675 if (veb->pf) {
14676 struct i40e_pf *pf = veb->pf;
14677
14678 mutex_lock(&pf->switch_mutex);
14679 if (pf->veb[veb->idx] == veb)
14680 pf->veb[veb->idx] = NULL;
14681 mutex_unlock(&pf->switch_mutex);
14682 }
14683
14684 kfree(veb);
14685 }
14686
14687
14688
14689
14690
14691 void i40e_veb_release(struct i40e_veb *veb)
14692 {
14693 struct i40e_vsi *vsi = NULL;
14694 struct i40e_pf *pf;
14695 int i, n = 0;
14696
14697 pf = veb->pf;
14698
14699
14700 for (i = 0; i < pf->num_alloc_vsi; i++) {
14701 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14702 n++;
14703 vsi = pf->vsi[i];
14704 }
14705 }
14706 if (n != 1) {
14707 dev_info(&pf->pdev->dev,
14708 "can't remove VEB %d with %d VSIs left\n",
14709 veb->seid, n);
14710 return;
14711 }
14712
14713
14714 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14715 if (veb->uplink_seid) {
14716 vsi->uplink_seid = veb->uplink_seid;
14717 if (veb->uplink_seid == pf->mac_seid)
14718 vsi->veb_idx = I40E_NO_VEB;
14719 else
14720 vsi->veb_idx = veb->veb_idx;
14721 } else {
14722
14723 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14724 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14725 }
14726
14727 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14728 i40e_veb_clear(veb);
14729 }
14730
14731
14732
14733
14734
14735
14736 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14737 {
14738 struct i40e_pf *pf = veb->pf;
14739 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14740 int ret;
14741
14742 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14743 veb->enabled_tc, false,
14744 &veb->seid, enable_stats, NULL);
14745
14746
14747 if (ret) {
14748 dev_info(&pf->pdev->dev,
14749 "couldn't add VEB, err %s aq_err %s\n",
14750 i40e_stat_str(&pf->hw, ret),
14751 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14752 return -EPERM;
14753 }
14754
14755
14756 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14757 &veb->stats_idx, NULL, NULL, NULL);
14758 if (ret) {
14759 dev_info(&pf->pdev->dev,
14760 "couldn't get VEB statistics idx, err %s aq_err %s\n",
14761 i40e_stat_str(&pf->hw, ret),
14762 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14763 return -EPERM;
14764 }
14765 ret = i40e_veb_get_bw_info(veb);
14766 if (ret) {
14767 dev_info(&pf->pdev->dev,
14768 "couldn't get VEB bw info, err %s aq_err %s\n",
14769 i40e_stat_str(&pf->hw, ret),
14770 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14771 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14772 return -ENOENT;
14773 }
14774
14775 vsi->uplink_seid = veb->seid;
14776 vsi->veb_idx = veb->idx;
14777 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14778
14779 return 0;
14780 }
14781
14782
14783
14784
14785
14786
14787
14788
14789
14790
14791
14792
14793
14794
14795
14796
14797
14798 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14799 u16 uplink_seid, u16 vsi_seid,
14800 u8 enabled_tc)
14801 {
14802 struct i40e_veb *veb, *uplink_veb = NULL;
14803 int vsi_idx, veb_idx;
14804 int ret;
14805
14806
14807 if ((uplink_seid == 0 || vsi_seid == 0) &&
14808 (uplink_seid + vsi_seid != 0)) {
14809 dev_info(&pf->pdev->dev,
14810 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14811 uplink_seid, vsi_seid);
14812 return NULL;
14813 }
14814
14815
14816 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14817 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14818 break;
14819 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14820 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14821 vsi_seid);
14822 return NULL;
14823 }
14824
14825 if (uplink_seid && uplink_seid != pf->mac_seid) {
14826 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14827 if (pf->veb[veb_idx] &&
14828 pf->veb[veb_idx]->seid == uplink_seid) {
14829 uplink_veb = pf->veb[veb_idx];
14830 break;
14831 }
14832 }
14833 if (!uplink_veb) {
14834 dev_info(&pf->pdev->dev,
14835 "uplink seid %d not found\n", uplink_seid);
14836 return NULL;
14837 }
14838 }
14839
14840
14841 veb_idx = i40e_veb_mem_alloc(pf);
14842 if (veb_idx < 0)
14843 goto err_alloc;
14844 veb = pf->veb[veb_idx];
14845 veb->flags = flags;
14846 veb->uplink_seid = uplink_seid;
14847 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14848 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14849
14850
14851 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14852 if (ret)
14853 goto err_veb;
14854 if (vsi_idx == pf->lan_vsi)
14855 pf->lan_veb = veb->idx;
14856
14857 return veb;
14858
14859 err_veb:
14860 i40e_veb_clear(veb);
14861 err_alloc:
14862 return NULL;
14863 }
14864
14865
14866
14867
14868
14869
14870
14871
14872
14873
14874 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14875 struct i40e_aqc_switch_config_element_resp *ele,
14876 u16 num_reported, bool printconfig)
14877 {
14878 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14879 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14880 u8 element_type = ele->element_type;
14881 u16 seid = le16_to_cpu(ele->seid);
14882
14883 if (printconfig)
14884 dev_info(&pf->pdev->dev,
14885 "type=%d seid=%d uplink=%d downlink=%d\n",
14886 element_type, seid, uplink_seid, downlink_seid);
14887
14888 switch (element_type) {
14889 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14890 pf->mac_seid = seid;
14891 break;
14892 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14893
14894 if (uplink_seid != pf->mac_seid)
14895 break;
14896 if (pf->lan_veb >= I40E_MAX_VEB) {
14897 int v;
14898
14899
14900 for (v = 0; v < I40E_MAX_VEB; v++) {
14901 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14902 pf->lan_veb = v;
14903 break;
14904 }
14905 }
14906 if (pf->lan_veb >= I40E_MAX_VEB) {
14907 v = i40e_veb_mem_alloc(pf);
14908 if (v < 0)
14909 break;
14910 pf->lan_veb = v;
14911 }
14912 }
14913 if (pf->lan_veb >= I40E_MAX_VEB)
14914 break;
14915
14916 pf->veb[pf->lan_veb]->seid = seid;
14917 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14918 pf->veb[pf->lan_veb]->pf = pf;
14919 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14920 break;
14921 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14922 if (num_reported != 1)
14923 break;
14924
14925
14926
14927 pf->mac_seid = uplink_seid;
14928 pf->pf_seid = downlink_seid;
14929 pf->main_vsi_seid = seid;
14930 if (printconfig)
14931 dev_info(&pf->pdev->dev,
14932 "pf_seid=%d main_vsi_seid=%d\n",
14933 pf->pf_seid, pf->main_vsi_seid);
14934 break;
14935 case I40E_SWITCH_ELEMENT_TYPE_PF:
14936 case I40E_SWITCH_ELEMENT_TYPE_VF:
14937 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14938 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14939 case I40E_SWITCH_ELEMENT_TYPE_PE:
14940 case I40E_SWITCH_ELEMENT_TYPE_PA:
14941
14942 break;
14943 default:
14944 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14945 element_type, seid);
14946 break;
14947 }
14948 }
14949
14950
14951
14952
14953
14954
14955
14956
14957
14958 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14959 {
14960 struct i40e_aqc_get_switch_config_resp *sw_config;
14961 u16 next_seid = 0;
14962 int ret = 0;
14963 u8 *aq_buf;
14964 int i;
14965
14966 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14967 if (!aq_buf)
14968 return -ENOMEM;
14969
14970 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14971 do {
14972 u16 num_reported, num_total;
14973
14974 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14975 I40E_AQ_LARGE_BUF,
14976 &next_seid, NULL);
14977 if (ret) {
14978 dev_info(&pf->pdev->dev,
14979 "get switch config failed err %s aq_err %s\n",
14980 i40e_stat_str(&pf->hw, ret),
14981 i40e_aq_str(&pf->hw,
14982 pf->hw.aq.asq_last_status));
14983 kfree(aq_buf);
14984 return -ENOENT;
14985 }
14986
14987 num_reported = le16_to_cpu(sw_config->header.num_reported);
14988 num_total = le16_to_cpu(sw_config->header.num_total);
14989
14990 if (printconfig)
14991 dev_info(&pf->pdev->dev,
14992 "header: %d reported %d total\n",
14993 num_reported, num_total);
14994
14995 for (i = 0; i < num_reported; i++) {
14996 struct i40e_aqc_switch_config_element_resp *ele =
14997 &sw_config->element[i];
14998
14999 i40e_setup_pf_switch_element(pf, ele, num_reported,
15000 printconfig);
15001 }
15002 } while (next_seid != 0);
15003
15004 kfree(aq_buf);
15005 return ret;
15006 }
15007
15008
15009
15010
15011
15012
15013
15014
15015
15016 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
15017 {
15018 u16 flags = 0;
15019 int ret;
15020
15021
15022 ret = i40e_fetch_switch_configuration(pf, false);
15023 if (ret) {
15024 dev_info(&pf->pdev->dev,
15025 "couldn't fetch switch config, err %s aq_err %s\n",
15026 i40e_stat_str(&pf->hw, ret),
15027 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15028 return ret;
15029 }
15030 i40e_pf_reset_stats(pf);
15031
15032
15033
15034
15035
15036
15037
15038 if ((pf->hw.pf_id == 0) &&
15039 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
15040 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15041 pf->last_sw_conf_flags = flags;
15042 }
15043
15044 if (pf->hw.pf_id == 0) {
15045 u16 valid_flags;
15046
15047 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15048 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
15049 NULL);
15050 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
15051 dev_info(&pf->pdev->dev,
15052 "couldn't set switch config bits, err %s aq_err %s\n",
15053 i40e_stat_str(&pf->hw, ret),
15054 i40e_aq_str(&pf->hw,
15055 pf->hw.aq.asq_last_status));
15056
15057 }
15058 pf->last_sw_conf_valid_flags = valid_flags;
15059 }
15060
15061
15062 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
15063 struct i40e_vsi *vsi = NULL;
15064 u16 uplink_seid;
15065
15066
15067
15068
15069 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
15070 uplink_seid = pf->veb[pf->lan_veb]->seid;
15071 else
15072 uplink_seid = pf->mac_seid;
15073 if (pf->lan_vsi == I40E_NO_VSI)
15074 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
15075 else if (reinit)
15076 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
15077 if (!vsi) {
15078 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
15079 i40e_cloud_filter_exit(pf);
15080 i40e_fdir_teardown(pf);
15081 return -EAGAIN;
15082 }
15083 } else {
15084
15085 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
15086
15087 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
15088 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
15089 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
15090 }
15091 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
15092
15093 i40e_fdir_sb_setup(pf);
15094
15095
15096 ret = i40e_setup_pf_filter_control(pf);
15097 if (ret) {
15098 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
15099 ret);
15100
15101 }
15102
15103
15104
15105
15106 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
15107 i40e_pf_config_rss(pf);
15108
15109
15110 i40e_link_event(pf);
15111
15112
15113 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
15114 I40E_AQ_AN_COMPLETED) ? true : false);
15115
15116 i40e_ptp_init(pf);
15117
15118 if (!lock_acquired)
15119 rtnl_lock();
15120
15121
15122 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
15123
15124 if (!lock_acquired)
15125 rtnl_unlock();
15126
15127 return ret;
15128 }
15129
15130
15131
15132
15133
15134 static void i40e_determine_queue_usage(struct i40e_pf *pf)
15135 {
15136 int queues_left;
15137 int q_max;
15138
15139 pf->num_lan_qps = 0;
15140
15141
15142
15143
15144
15145 queues_left = pf->hw.func_caps.num_tx_qp;
15146
15147 if ((queues_left == 1) ||
15148 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
15149
15150 queues_left = 0;
15151 pf->alloc_rss_size = pf->num_lan_qps = 1;
15152
15153
15154 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
15155 I40E_FLAG_IWARP_ENABLED |
15156 I40E_FLAG_FD_SB_ENABLED |
15157 I40E_FLAG_FD_ATR_ENABLED |
15158 I40E_FLAG_DCB_CAPABLE |
15159 I40E_FLAG_DCB_ENABLED |
15160 I40E_FLAG_SRIOV_ENABLED |
15161 I40E_FLAG_VMDQ_ENABLED);
15162 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15163 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
15164 I40E_FLAG_FD_SB_ENABLED |
15165 I40E_FLAG_FD_ATR_ENABLED |
15166 I40E_FLAG_DCB_CAPABLE))) {
15167
15168 pf->alloc_rss_size = pf->num_lan_qps = 1;
15169 queues_left -= pf->num_lan_qps;
15170
15171 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
15172 I40E_FLAG_IWARP_ENABLED |
15173 I40E_FLAG_FD_SB_ENABLED |
15174 I40E_FLAG_FD_ATR_ENABLED |
15175 I40E_FLAG_DCB_ENABLED |
15176 I40E_FLAG_VMDQ_ENABLED);
15177 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15178 } else {
15179
15180 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
15181 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
15182 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
15183 I40E_FLAG_DCB_ENABLED);
15184 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
15185 }
15186
15187
15188 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
15189 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
15190 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
15191 pf->num_lan_qps = q_max;
15192
15193 queues_left -= pf->num_lan_qps;
15194 }
15195
15196 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15197 if (queues_left > 1) {
15198 queues_left -= 1;
15199 } else {
15200 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
15201 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15202 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
15203 }
15204 }
15205
15206 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15207 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
15208 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
15209 (queues_left / pf->num_vf_qps));
15210 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
15211 }
15212
15213 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
15214 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
15215 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
15216 (queues_left / pf->num_vmdq_qps));
15217 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
15218 }
15219
15220 pf->queues_left = queues_left;
15221 dev_dbg(&pf->pdev->dev,
15222 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
15223 pf->hw.func_caps.num_tx_qp,
15224 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
15225 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
15226 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
15227 queues_left);
15228 }
15229
15230
15231
15232
15233
15234
15235
15236
15237
15238
15239
15240
15241 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
15242 {
15243 struct i40e_filter_control_settings *settings = &pf->filter_settings;
15244
15245 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
15246
15247
15248 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
15249 settings->enable_fdir = true;
15250
15251
15252 settings->enable_ethtype = true;
15253 settings->enable_macvlan = true;
15254
15255 if (i40e_set_filter_control(&pf->hw, settings))
15256 return -ENOENT;
15257
15258 return 0;
15259 }
15260
15261 #define INFO_STRING_LEN 255
15262 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15263 static void i40e_print_features(struct i40e_pf *pf)
15264 {
15265 struct i40e_hw *hw = &pf->hw;
15266 char *buf;
15267 int i;
15268
15269 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
15270 if (!buf)
15271 return;
15272
15273 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
15274 #ifdef CONFIG_PCI_IOV
15275 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
15276 #endif
15277 i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
15278 pf->hw.func_caps.num_vsis,
15279 pf->vsi[pf->lan_vsi]->num_queue_pairs);
15280 if (pf->flags & I40E_FLAG_RSS_ENABLED)
15281 i += scnprintf(&buf[i], REMAIN(i), " RSS");
15282 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
15283 i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
15284 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15285 i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
15286 i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
15287 }
15288 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
15289 i += scnprintf(&buf[i], REMAIN(i), " DCB");
15290 i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
15291 i += scnprintf(&buf[i], REMAIN(i), " Geneve");
15292 if (pf->flags & I40E_FLAG_PTP)
15293 i += scnprintf(&buf[i], REMAIN(i), " PTP");
15294 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
15295 i += scnprintf(&buf[i], REMAIN(i), " VEB");
15296 else
15297 i += scnprintf(&buf[i], REMAIN(i), " VEPA");
15298
15299 dev_info(&pf->pdev->dev, "%s\n", buf);
15300 kfree(buf);
15301 WARN_ON(i > INFO_STRING_LEN);
15302 }
15303
15304
15305
15306
15307
15308
15309
15310
15311
15312
15313
15314 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
15315 {
15316 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
15317 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
15318 }
15319
15320
15321
15322
15323
15324
15325 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
15326 {
15327 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
15328 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
15329 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
15330 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
15331 *flags |= I40E_FLAG_RS_FEC;
15332 *flags &= ~I40E_FLAG_BASE_R_FEC;
15333 }
15334 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
15335 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
15336 *flags |= I40E_FLAG_BASE_R_FEC;
15337 *flags &= ~I40E_FLAG_RS_FEC;
15338 }
15339 if (fec_cfg == 0)
15340 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
15341 }
15342
15343
15344
15345
15346
15347
15348
15349
15350
15351
15352 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
15353 {
15354 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
15355
15356 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15357 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
15358 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
15359 set_bit(__I40E_RECOVERY_MODE, pf->state);
15360
15361 return true;
15362 }
15363 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15364 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
15365
15366 return false;
15367 }
15368
15369
15370
15371
15372
15373
15374
15375
15376
15377
15378
15379
15380
15381
15382
15383
15384
15385
15386
15387
15388
15389
15390 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
15391 {
15392
15393 const unsigned long time_end = jiffies + 10 * HZ;
15394
15395 struct i40e_hw *hw = &pf->hw;
15396 i40e_status ret;
15397
15398 ret = i40e_pf_reset(hw);
15399 while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
15400 usleep_range(10000, 20000);
15401 ret = i40e_pf_reset(hw);
15402 }
15403
15404 if (ret == I40E_SUCCESS)
15405 pf->pfr_count++;
15406 else
15407 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15408
15409 return ret;
15410 }
15411
15412
15413
15414
15415
15416
15417
15418
15419
15420
15421
15422
15423 static bool i40e_check_fw_empr(struct i40e_pf *pf)
15424 {
15425 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15426 I40E_GL_FWSTS_FWS1B_MASK;
15427 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15428 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15429 }
15430
15431
15432
15433
15434
15435
15436
15437
15438
15439
15440
15441
15442 static i40e_status i40e_handle_resets(struct i40e_pf *pf)
15443 {
15444 const i40e_status pfr = i40e_pf_loop_reset(pf);
15445 const bool is_empr = i40e_check_fw_empr(pf);
15446
15447 if (is_empr || pfr != I40E_SUCCESS)
15448 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15449
15450 return is_empr ? I40E_ERR_RESET_FAILED : pfr;
15451 }
15452
15453
15454
15455
15456
15457
15458
15459
15460
15461
15462
15463 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15464 {
15465 struct i40e_vsi *vsi;
15466 int err;
15467 int v_idx;
15468
15469 pci_save_state(pf->pdev);
15470
15471
15472 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15473 pf->service_timer_period = HZ;
15474
15475 INIT_WORK(&pf->service_task, i40e_service_task);
15476 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15477
15478 err = i40e_init_interrupt_scheme(pf);
15479 if (err)
15480 goto err_switch_setup;
15481
15482
15483
15484
15485
15486
15487 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15488 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15489 else
15490 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15491
15492
15493 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15494 GFP_KERNEL);
15495 if (!pf->vsi) {
15496 err = -ENOMEM;
15497 goto err_switch_setup;
15498 }
15499
15500
15501
15502
15503 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15504 if (v_idx < 0) {
15505 err = v_idx;
15506 goto err_switch_setup;
15507 }
15508 pf->lan_vsi = v_idx;
15509 vsi = pf->vsi[v_idx];
15510 if (!vsi) {
15511 err = -EFAULT;
15512 goto err_switch_setup;
15513 }
15514 vsi->alloc_queue_pairs = 1;
15515 err = i40e_config_netdev(vsi);
15516 if (err)
15517 goto err_switch_setup;
15518 err = register_netdev(vsi->netdev);
15519 if (err)
15520 goto err_switch_setup;
15521 vsi->netdev_registered = true;
15522 i40e_dbg_pf_init(pf);
15523
15524 err = i40e_setup_misc_vector_for_recovery_mode(pf);
15525 if (err)
15526 goto err_switch_setup;
15527
15528
15529 i40e_send_version(pf);
15530
15531
15532 mod_timer(&pf->service_timer,
15533 round_jiffies(jiffies + pf->service_timer_period));
15534
15535 return 0;
15536
15537 err_switch_setup:
15538 i40e_reset_interrupt_capability(pf);
15539 del_timer_sync(&pf->service_timer);
15540 i40e_shutdown_adminq(hw);
15541 iounmap(hw->hw_addr);
15542 pci_disable_pcie_error_reporting(pf->pdev);
15543 pci_release_mem_regions(pf->pdev);
15544 pci_disable_device(pf->pdev);
15545 kfree(pf);
15546
15547 return err;
15548 }
15549
15550
15551
15552
15553
15554
15555
15556
15557 static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
15558 {
15559 struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
15560
15561 hw->subsystem_device_id = pdev->subsystem_device ?
15562 pdev->subsystem_device :
15563 (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
15564 }
15565
15566
15567
15568
15569
15570
15571
15572
15573
15574
15575
15576
15577 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15578 {
15579 struct i40e_aq_get_phy_abilities_resp abilities;
15580 #ifdef CONFIG_I40E_DCB
15581 enum i40e_get_fw_lldp_status_resp lldp_status;
15582 i40e_status status;
15583 #endif
15584 struct i40e_pf *pf;
15585 struct i40e_hw *hw;
15586 static u16 pfs_found;
15587 u16 wol_nvm_bits;
15588 u16 link_status;
15589 int err;
15590 u32 val;
15591 u32 i;
15592
15593 err = pci_enable_device_mem(pdev);
15594 if (err)
15595 return err;
15596
15597
15598 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15599 if (err) {
15600 dev_err(&pdev->dev,
15601 "DMA configuration failed: 0x%x\n", err);
15602 goto err_dma;
15603 }
15604
15605
15606 err = pci_request_mem_regions(pdev, i40e_driver_name);
15607 if (err) {
15608 dev_info(&pdev->dev,
15609 "pci_request_selected_regions failed %d\n", err);
15610 goto err_pci_reg;
15611 }
15612
15613 pci_enable_pcie_error_reporting(pdev);
15614 pci_set_master(pdev);
15615
15616
15617
15618
15619
15620
15621 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
15622 if (!pf) {
15623 err = -ENOMEM;
15624 goto err_pf_alloc;
15625 }
15626 pf->next_vsi = 0;
15627 pf->pdev = pdev;
15628 set_bit(__I40E_DOWN, pf->state);
15629
15630 hw = &pf->hw;
15631 hw->back = pf;
15632
15633 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15634 I40E_MAX_CSR_SPACE);
15635
15636
15637
15638
15639
15640 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15641 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15642 pf->ioremap_len);
15643 err = -ENOMEM;
15644 goto err_ioremap;
15645 }
15646 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15647 if (!hw->hw_addr) {
15648 err = -EIO;
15649 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15650 (unsigned int)pci_resource_start(pdev, 0),
15651 pf->ioremap_len, err);
15652 goto err_ioremap;
15653 }
15654 hw->vendor_id = pdev->vendor;
15655 hw->device_id = pdev->device;
15656 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15657 hw->subsystem_vendor_id = pdev->subsystem_vendor;
15658 i40e_set_subsystem_device_id(hw);
15659 hw->bus.device = PCI_SLOT(pdev->devfn);
15660 hw->bus.func = PCI_FUNC(pdev->devfn);
15661 hw->bus.bus_id = pdev->bus->number;
15662 pf->instance = pfs_found;
15663
15664
15665
15666
15667 hw->switch_tag = 0xffff;
15668 hw->first_tag = ETH_P_8021AD;
15669 hw->second_tag = ETH_P_8021Q;
15670
15671 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15672 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15673 INIT_LIST_HEAD(&pf->ddp_old_prof);
15674
15675
15676
15677
15678 mutex_init(&hw->aq.asq_mutex);
15679 mutex_init(&hw->aq.arq_mutex);
15680
15681 pf->msg_enable = netif_msg_init(debug,
15682 NETIF_MSG_DRV |
15683 NETIF_MSG_PROBE |
15684 NETIF_MSG_LINK);
15685 if (debug < -1)
15686 pf->hw.debug_mask = debug;
15687
15688
15689 if (hw->revision_id == 0 &&
15690 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15691 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15692 i40e_flush(hw);
15693 msleep(200);
15694 pf->corer_count++;
15695
15696 i40e_clear_pxe_mode(hw);
15697 }
15698
15699
15700 i40e_clear_hw(hw);
15701
15702 err = i40e_set_mac_type(hw);
15703 if (err) {
15704 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15705 err);
15706 goto err_pf_reset;
15707 }
15708
15709 err = i40e_handle_resets(pf);
15710 if (err)
15711 goto err_pf_reset;
15712
15713 i40e_check_recovery_mode(pf);
15714
15715 if (is_kdump_kernel()) {
15716 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15717 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15718 } else {
15719 hw->aq.num_arq_entries = I40E_AQ_LEN;
15720 hw->aq.num_asq_entries = I40E_AQ_LEN;
15721 }
15722 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15723 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15724 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15725
15726 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15727 "%s-%s:misc",
15728 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15729
15730 err = i40e_init_shared_code(hw);
15731 if (err) {
15732 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15733 err);
15734 goto err_pf_reset;
15735 }
15736
15737
15738 pf->hw.fc.requested_mode = I40E_FC_NONE;
15739
15740 err = i40e_init_adminq(hw);
15741 if (err) {
15742 if (err == I40E_ERR_FIRMWARE_API_VERSION)
15743 dev_info(&pdev->dev,
15744 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15745 hw->aq.api_maj_ver,
15746 hw->aq.api_min_ver,
15747 I40E_FW_API_VERSION_MAJOR,
15748 I40E_FW_MINOR_VERSION(hw));
15749 else
15750 dev_info(&pdev->dev,
15751 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15752
15753 goto err_pf_reset;
15754 }
15755 i40e_get_oem_version(hw);
15756
15757
15758 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15759 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15760 hw->aq.api_maj_ver, hw->aq.api_min_ver,
15761 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15762 hw->subsystem_vendor_id, hw->subsystem_device_id);
15763
15764 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15765 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15766 dev_dbg(&pdev->dev,
15767 "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15768 hw->aq.api_maj_ver,
15769 hw->aq.api_min_ver,
15770 I40E_FW_API_VERSION_MAJOR,
15771 I40E_FW_MINOR_VERSION(hw));
15772 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15773 dev_info(&pdev->dev,
15774 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15775 hw->aq.api_maj_ver,
15776 hw->aq.api_min_ver,
15777 I40E_FW_API_VERSION_MAJOR,
15778 I40E_FW_MINOR_VERSION(hw));
15779
15780 i40e_verify_eeprom(pf);
15781
15782
15783 if (hw->revision_id < 1)
15784 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15785
15786 i40e_clear_pxe_mode(hw);
15787
15788 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15789 if (err)
15790 goto err_adminq_setup;
15791
15792 err = i40e_sw_init(pf);
15793 if (err) {
15794 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15795 goto err_sw_init;
15796 }
15797
15798 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15799 return i40e_init_recovery_mode(pf, hw);
15800
15801 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15802 hw->func_caps.num_rx_qp, 0, 0);
15803 if (err) {
15804 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15805 goto err_init_lan_hmc;
15806 }
15807
15808 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15809 if (err) {
15810 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15811 err = -ENOENT;
15812 goto err_configure_lan_hmc;
15813 }
15814
15815
15816
15817
15818
15819 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15820 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15821 i40e_aq_stop_lldp(hw, true, false, NULL);
15822 }
15823
15824
15825 i40e_get_platform_mac_addr(pdev, pf);
15826
15827 if (!is_valid_ether_addr(hw->mac.addr)) {
15828 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15829 err = -EIO;
15830 goto err_mac_addr;
15831 }
15832 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15833 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15834 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15835 if (is_valid_ether_addr(hw->mac.port_addr))
15836 pf->hw_features |= I40E_HW_PORT_ID_VALID;
15837
15838 i40e_ptp_alloc_pins(pf);
15839 pci_set_drvdata(pdev, pf);
15840 pci_save_state(pdev);
15841
15842 #ifdef CONFIG_I40E_DCB
15843 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15844 (!status &&
15845 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15846 (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
15847 (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
15848 dev_info(&pdev->dev,
15849 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15850 "FW LLDP is disabled\n" :
15851 "FW LLDP is enabled\n");
15852
15853
15854 i40e_aq_set_dcb_parameters(hw, true, NULL);
15855
15856 err = i40e_init_pf_dcb(pf);
15857 if (err) {
15858 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15859 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15860
15861 }
15862 #endif
15863
15864
15865 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15866 pf->service_timer_period = HZ;
15867
15868 INIT_WORK(&pf->service_task, i40e_service_task);
15869 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15870
15871
15872 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15873 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15874 pf->wol_en = false;
15875 else
15876 pf->wol_en = true;
15877 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15878
15879
15880 i40e_determine_queue_usage(pf);
15881 err = i40e_init_interrupt_scheme(pf);
15882 if (err)
15883 goto err_switch_setup;
15884
15885
15886
15887
15888
15889
15890 if (is_kdump_kernel())
15891 pf->num_lan_msix = 1;
15892
15893 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15894 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15895 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15896 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15897 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15898 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15899 UDP_TUNNEL_TYPE_GENEVE;
15900
15901
15902
15903
15904
15905
15906 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15907 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15908 else
15909 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15910 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15911 dev_warn(&pf->pdev->dev,
15912 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15913 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15914 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15915 }
15916
15917
15918 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15919 GFP_KERNEL);
15920 if (!pf->vsi) {
15921 err = -ENOMEM;
15922 goto err_switch_setup;
15923 }
15924
15925 #ifdef CONFIG_PCI_IOV
15926
15927 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15928 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15929 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15930 if (pci_num_vf(pdev))
15931 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15932 }
15933 #endif
15934 err = i40e_setup_pf_switch(pf, false, false);
15935 if (err) {
15936 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15937 goto err_vsis;
15938 }
15939 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15940
15941
15942 for (i = 0; i < pf->num_alloc_vsi; i++) {
15943 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15944 i40e_vsi_open(pf->vsi[i]);
15945 break;
15946 }
15947 }
15948
15949
15950
15951
15952 err = i40e_aq_set_phy_int_mask(&pf->hw,
15953 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15954 I40E_AQ_EVENT_MEDIA_NA |
15955 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15956 if (err)
15957 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15958 i40e_stat_str(&pf->hw, err),
15959 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15960
15961
15962
15963
15964
15965 val = rd32(hw, I40E_REG_MSS);
15966 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15967 val &= ~I40E_REG_MSS_MIN_MASK;
15968 val |= I40E_64BYTE_MSS;
15969 wr32(hw, I40E_REG_MSS, val);
15970 }
15971
15972 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15973 msleep(75);
15974 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15975 if (err)
15976 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15977 i40e_stat_str(&pf->hw, err),
15978 i40e_aq_str(&pf->hw,
15979 pf->hw.aq.asq_last_status));
15980 }
15981
15982
15983
15984
15985 clear_bit(__I40E_DOWN, pf->state);
15986
15987
15988
15989
15990
15991
15992 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15993 err = i40e_setup_misc_vector(pf);
15994 if (err) {
15995 dev_info(&pdev->dev,
15996 "setup of misc vector failed: %d\n", err);
15997 i40e_cloud_filter_exit(pf);
15998 i40e_fdir_teardown(pf);
15999 goto err_vsis;
16000 }
16001 }
16002
16003 #ifdef CONFIG_PCI_IOV
16004
16005 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
16006 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
16007 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
16008
16009 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
16010 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
16011 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
16012 i40e_flush(hw);
16013
16014 if (pci_num_vf(pdev)) {
16015 dev_info(&pdev->dev,
16016 "Active VFs found, allocating resources.\n");
16017 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
16018 if (err)
16019 dev_info(&pdev->dev,
16020 "Error %d allocating resources for existing VFs\n",
16021 err);
16022 }
16023 }
16024 #endif
16025
16026 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16027 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
16028 pf->num_iwarp_msix,
16029 I40E_IWARP_IRQ_PILE_ID);
16030 if (pf->iwarp_base_vector < 0) {
16031 dev_info(&pdev->dev,
16032 "failed to get tracking for %d vectors for IWARP err=%d\n",
16033 pf->num_iwarp_msix, pf->iwarp_base_vector);
16034 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
16035 }
16036 }
16037
16038 i40e_dbg_pf_init(pf);
16039
16040
16041 i40e_send_version(pf);
16042
16043
16044 mod_timer(&pf->service_timer,
16045 round_jiffies(jiffies + pf->service_timer_period));
16046
16047
16048 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16049 err = i40e_lan_add_device(pf);
16050 if (err)
16051 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
16052 err);
16053 }
16054
16055 #define PCI_SPEED_SIZE 8
16056 #define PCI_WIDTH_SIZE 8
16057
16058
16059
16060
16061 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
16062 char speed[PCI_SPEED_SIZE] = "Unknown";
16063 char width[PCI_WIDTH_SIZE] = "Unknown";
16064
16065
16066
16067
16068 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
16069 &link_status);
16070
16071 i40e_set_pci_config_data(hw, link_status);
16072
16073 switch (hw->bus.speed) {
16074 case i40e_bus_speed_8000:
16075 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
16076 case i40e_bus_speed_5000:
16077 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
16078 case i40e_bus_speed_2500:
16079 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
16080 default:
16081 break;
16082 }
16083 switch (hw->bus.width) {
16084 case i40e_bus_width_pcie_x8:
16085 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
16086 case i40e_bus_width_pcie_x4:
16087 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
16088 case i40e_bus_width_pcie_x2:
16089 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
16090 case i40e_bus_width_pcie_x1:
16091 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
16092 default:
16093 break;
16094 }
16095
16096 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
16097 speed, width);
16098
16099 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
16100 hw->bus.speed < i40e_bus_speed_8000) {
16101 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
16102 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
16103 }
16104 }
16105
16106
16107 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
16108 if (err)
16109 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
16110 i40e_stat_str(&pf->hw, err),
16111 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16112 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
16113
16114
16115 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
16116
16117
16118 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
16119 if (err)
16120 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
16121 i40e_stat_str(&pf->hw, err),
16122 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16123
16124
16125 #define MAX_FRAME_SIZE_DEFAULT 0x2600
16126 val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
16127 I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
16128 if (val < MAX_FRAME_SIZE_DEFAULT)
16129 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
16130 i, val);
16131
16132
16133
16134
16135
16136
16137
16138 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
16139 pf->main_vsi_seid);
16140
16141 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
16142 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
16143 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
16144 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
16145 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
16146
16147 i40e_print_features(pf);
16148
16149 return 0;
16150
16151
16152 err_vsis:
16153 set_bit(__I40E_DOWN, pf->state);
16154 i40e_clear_interrupt_scheme(pf);
16155 kfree(pf->vsi);
16156 err_switch_setup:
16157 i40e_reset_interrupt_capability(pf);
16158 del_timer_sync(&pf->service_timer);
16159 err_mac_addr:
16160 err_configure_lan_hmc:
16161 (void)i40e_shutdown_lan_hmc(hw);
16162 err_init_lan_hmc:
16163 kfree(pf->qp_pile);
16164 err_sw_init:
16165 err_adminq_setup:
16166 err_pf_reset:
16167 iounmap(hw->hw_addr);
16168 err_ioremap:
16169 kfree(pf);
16170 err_pf_alloc:
16171 pci_disable_pcie_error_reporting(pdev);
16172 pci_release_mem_regions(pdev);
16173 err_pci_reg:
16174 err_dma:
16175 pci_disable_device(pdev);
16176 return err;
16177 }
16178
16179
16180
16181
16182
16183
16184
16185
16186
16187
16188 static void i40e_remove(struct pci_dev *pdev)
16189 {
16190 struct i40e_pf *pf = pci_get_drvdata(pdev);
16191 struct i40e_hw *hw = &pf->hw;
16192 i40e_status ret_code;
16193 int i;
16194
16195 i40e_dbg_pf_exit(pf);
16196
16197 i40e_ptp_stop(pf);
16198
16199
16200 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
16201 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
16202
16203
16204
16205
16206
16207 while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
16208 usleep_range(1000, 2000);
16209 set_bit(__I40E_IN_REMOVE, pf->state);
16210
16211 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
16212 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
16213 i40e_free_vfs(pf);
16214 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
16215 }
16216
16217 set_bit(__I40E_SUSPENDED, pf->state);
16218 set_bit(__I40E_DOWN, pf->state);
16219 if (pf->service_timer.function)
16220 del_timer_sync(&pf->service_timer);
16221 if (pf->service_task.func)
16222 cancel_work_sync(&pf->service_task);
16223
16224 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
16225 struct i40e_vsi *vsi = pf->vsi[0];
16226
16227
16228
16229
16230
16231 unregister_netdev(vsi->netdev);
16232 free_netdev(vsi->netdev);
16233
16234 goto unmap;
16235 }
16236
16237
16238
16239
16240 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16241
16242 i40e_fdir_teardown(pf);
16243
16244
16245
16246
16247 for (i = 0; i < I40E_MAX_VEB; i++) {
16248 if (!pf->veb[i])
16249 continue;
16250
16251 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
16252 pf->veb[i]->uplink_seid == 0)
16253 i40e_switch_branch_release(pf->veb[i]);
16254 }
16255
16256
16257
16258
16259 if (pf->vsi[pf->lan_vsi])
16260 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
16261
16262 i40e_cloud_filter_exit(pf);
16263
16264
16265 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16266 ret_code = i40e_lan_del_device(pf);
16267 if (ret_code)
16268 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
16269 ret_code);
16270 }
16271
16272
16273 if (hw->hmc.hmc_obj) {
16274 ret_code = i40e_shutdown_lan_hmc(hw);
16275 if (ret_code)
16276 dev_warn(&pdev->dev,
16277 "Failed to destroy the HMC resources: %d\n",
16278 ret_code);
16279 }
16280
16281 unmap:
16282
16283 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16284 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16285 free_irq(pf->pdev->irq, pf);
16286
16287
16288 i40e_shutdown_adminq(hw);
16289
16290
16291 mutex_destroy(&hw->aq.arq_mutex);
16292 mutex_destroy(&hw->aq.asq_mutex);
16293
16294
16295 rtnl_lock();
16296 i40e_clear_interrupt_scheme(pf);
16297 for (i = 0; i < pf->num_alloc_vsi; i++) {
16298 if (pf->vsi[i]) {
16299 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
16300 i40e_vsi_clear_rings(pf->vsi[i]);
16301 i40e_vsi_clear(pf->vsi[i]);
16302 pf->vsi[i] = NULL;
16303 }
16304 }
16305 rtnl_unlock();
16306
16307 for (i = 0; i < I40E_MAX_VEB; i++) {
16308 kfree(pf->veb[i]);
16309 pf->veb[i] = NULL;
16310 }
16311
16312 kfree(pf->qp_pile);
16313 kfree(pf->vsi);
16314
16315 iounmap(hw->hw_addr);
16316 kfree(pf);
16317 pci_release_mem_regions(pdev);
16318
16319 pci_disable_pcie_error_reporting(pdev);
16320 pci_disable_device(pdev);
16321 }
16322
16323
16324
16325
16326
16327
16328
16329
16330
16331
16332 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
16333 pci_channel_state_t error)
16334 {
16335 struct i40e_pf *pf = pci_get_drvdata(pdev);
16336
16337 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
16338
16339 if (!pf) {
16340 dev_info(&pdev->dev,
16341 "Cannot recover - error happened during device probe\n");
16342 return PCI_ERS_RESULT_DISCONNECT;
16343 }
16344
16345
16346 if (!test_bit(__I40E_SUSPENDED, pf->state))
16347 i40e_prep_for_reset(pf);
16348
16349
16350 return PCI_ERS_RESULT_NEED_RESET;
16351 }
16352
16353
16354
16355
16356
16357
16358
16359
16360
16361
16362 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
16363 {
16364 struct i40e_pf *pf = pci_get_drvdata(pdev);
16365 pci_ers_result_t result;
16366 u32 reg;
16367
16368 dev_dbg(&pdev->dev, "%s\n", __func__);
16369 if (pci_enable_device_mem(pdev)) {
16370 dev_info(&pdev->dev,
16371 "Cannot re-enable PCI device after reset.\n");
16372 result = PCI_ERS_RESULT_DISCONNECT;
16373 } else {
16374 pci_set_master(pdev);
16375 pci_restore_state(pdev);
16376 pci_save_state(pdev);
16377 pci_wake_from_d3(pdev, false);
16378
16379 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
16380 if (reg == 0)
16381 result = PCI_ERS_RESULT_RECOVERED;
16382 else
16383 result = PCI_ERS_RESULT_DISCONNECT;
16384 }
16385
16386 return result;
16387 }
16388
16389
16390
16391
16392
16393 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16394 {
16395 struct i40e_pf *pf = pci_get_drvdata(pdev);
16396
16397 i40e_prep_for_reset(pf);
16398 }
16399
16400
16401
16402
16403
16404 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16405 {
16406 struct i40e_pf *pf = pci_get_drvdata(pdev);
16407
16408 if (test_bit(__I40E_IN_REMOVE, pf->state))
16409 return;
16410
16411 i40e_reset_and_rebuild(pf, false, false);
16412 }
16413
16414
16415
16416
16417
16418
16419
16420
16421 static void i40e_pci_error_resume(struct pci_dev *pdev)
16422 {
16423 struct i40e_pf *pf = pci_get_drvdata(pdev);
16424
16425 dev_dbg(&pdev->dev, "%s\n", __func__);
16426 if (test_bit(__I40E_SUSPENDED, pf->state))
16427 return;
16428
16429 i40e_handle_reset_warning(pf, false);
16430 }
16431
16432
16433
16434
16435
16436
16437 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16438 {
16439 struct i40e_hw *hw = &pf->hw;
16440 i40e_status ret;
16441 u8 mac_addr[6];
16442 u16 flags = 0;
16443
16444
16445 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16446 ether_addr_copy(mac_addr,
16447 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16448 } else {
16449 dev_err(&pf->pdev->dev,
16450 "Failed to retrieve MAC address; using default\n");
16451 ether_addr_copy(mac_addr, hw->mac.addr);
16452 }
16453
16454
16455
16456
16457
16458 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16459
16460 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16461 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16462
16463 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16464 if (ret) {
16465 dev_err(&pf->pdev->dev,
16466 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16467 return;
16468 }
16469
16470 flags = I40E_AQC_MC_MAG_EN
16471 | I40E_AQC_WOL_PRESERVE_ON_PFR
16472 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16473 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16474 if (ret)
16475 dev_err(&pf->pdev->dev,
16476 "Failed to enable Multicast Magic Packet wake up\n");
16477 }
16478
16479
16480
16481
16482
16483 static void i40e_shutdown(struct pci_dev *pdev)
16484 {
16485 struct i40e_pf *pf = pci_get_drvdata(pdev);
16486 struct i40e_hw *hw = &pf->hw;
16487
16488 set_bit(__I40E_SUSPENDED, pf->state);
16489 set_bit(__I40E_DOWN, pf->state);
16490
16491 del_timer_sync(&pf->service_timer);
16492 cancel_work_sync(&pf->service_task);
16493 i40e_cloud_filter_exit(pf);
16494 i40e_fdir_teardown(pf);
16495
16496
16497
16498
16499 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16500
16501 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16502 i40e_enable_mc_magic_wake(pf);
16503
16504 i40e_prep_for_reset(pf);
16505
16506 wr32(hw, I40E_PFPM_APM,
16507 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16508 wr32(hw, I40E_PFPM_WUFC,
16509 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16510
16511
16512 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16513 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16514 free_irq(pf->pdev->irq, pf);
16515
16516
16517
16518
16519
16520 rtnl_lock();
16521 i40e_clear_interrupt_scheme(pf);
16522 rtnl_unlock();
16523
16524 if (system_state == SYSTEM_POWER_OFF) {
16525 pci_wake_from_d3(pdev, pf->wol_en);
16526 pci_set_power_state(pdev, PCI_D3hot);
16527 }
16528 }
16529
16530
16531
16532
16533
16534 static int __maybe_unused i40e_suspend(struct device *dev)
16535 {
16536 struct i40e_pf *pf = dev_get_drvdata(dev);
16537 struct i40e_hw *hw = &pf->hw;
16538
16539
16540 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16541 return 0;
16542
16543 set_bit(__I40E_DOWN, pf->state);
16544
16545
16546 del_timer_sync(&pf->service_timer);
16547 cancel_work_sync(&pf->service_task);
16548
16549
16550
16551
16552 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16553
16554 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16555 i40e_enable_mc_magic_wake(pf);
16556
16557
16558
16559
16560
16561 rtnl_lock();
16562
16563 i40e_prep_for_reset(pf);
16564
16565 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16566 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16567
16568
16569
16570
16571
16572
16573 i40e_clear_interrupt_scheme(pf);
16574
16575 rtnl_unlock();
16576
16577 return 0;
16578 }
16579
16580
16581
16582
16583
16584 static int __maybe_unused i40e_resume(struct device *dev)
16585 {
16586 struct i40e_pf *pf = dev_get_drvdata(dev);
16587 int err;
16588
16589
16590 if (!test_bit(__I40E_SUSPENDED, pf->state))
16591 return 0;
16592
16593
16594
16595
16596 rtnl_lock();
16597
16598
16599
16600
16601 err = i40e_restore_interrupt_scheme(pf);
16602 if (err) {
16603 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16604 err);
16605 }
16606
16607 clear_bit(__I40E_DOWN, pf->state);
16608 i40e_reset_and_rebuild(pf, false, true);
16609
16610 rtnl_unlock();
16611
16612
16613 clear_bit(__I40E_SUSPENDED, pf->state);
16614
16615
16616 mod_timer(&pf->service_timer,
16617 round_jiffies(jiffies + pf->service_timer_period));
16618
16619 return 0;
16620 }
16621
16622 static const struct pci_error_handlers i40e_err_handler = {
16623 .error_detected = i40e_pci_error_detected,
16624 .slot_reset = i40e_pci_error_slot_reset,
16625 .reset_prepare = i40e_pci_error_reset_prepare,
16626 .reset_done = i40e_pci_error_reset_done,
16627 .resume = i40e_pci_error_resume,
16628 };
16629
16630 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16631
16632 static struct pci_driver i40e_driver = {
16633 .name = i40e_driver_name,
16634 .id_table = i40e_pci_tbl,
16635 .probe = i40e_probe,
16636 .remove = i40e_remove,
16637 .driver = {
16638 .pm = &i40e_pm_ops,
16639 },
16640 .shutdown = i40e_shutdown,
16641 .err_handler = &i40e_err_handler,
16642 .sriov_configure = i40e_pci_sriov_configure,
16643 };
16644
16645
16646
16647
16648
16649
16650
16651 static int __init i40e_init_module(void)
16652 {
16653 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16654 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16655
16656
16657
16658
16659
16660
16661
16662
16663 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16664 if (!i40e_wq) {
16665 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16666 return -ENOMEM;
16667 }
16668
16669 i40e_dbg_init();
16670 return pci_register_driver(&i40e_driver);
16671 }
16672 module_init(i40e_init_module);
16673
16674
16675
16676
16677
16678
16679
16680 static void __exit i40e_exit_module(void)
16681 {
16682 pci_unregister_driver(&i40e_driver);
16683 destroy_workqueue(i40e_wq);
16684 ida_destroy(&i40e_client_ida);
16685 i40e_dbg_exit();
16686 }
16687 module_exit(i40e_exit_module);