0001
0002
0003
0004 #include <linux/types.h>
0005 #include <linux/module.h>
0006 #include <linux/pci.h>
0007 #include <linux/netdevice.h>
0008 #include <linux/vmalloc.h>
0009 #include <linux/string.h>
0010 #include <linux/in.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/ip.h>
0013 #include <linux/tcp.h>
0014 #include <linux/sctp.h>
0015 #include <linux/pkt_sched.h>
0016 #include <linux/ipv6.h>
0017 #include <linux/slab.h>
0018 #include <net/checksum.h>
0019 #include <net/ip6_checksum.h>
0020 #include <linux/etherdevice.h>
0021 #include <linux/ethtool.h>
0022 #include <linux/if.h>
0023 #include <linux/if_vlan.h>
0024 #include <linux/if_macvlan.h>
0025 #include <linux/if_bridge.h>
0026 #include <linux/prefetch.h>
0027 #include <linux/bpf.h>
0028 #include <linux/bpf_trace.h>
0029 #include <linux/atomic.h>
0030 #include <linux/numa.h>
0031 #include <generated/utsrelease.h>
0032 #include <scsi/fc/fc_fcoe.h>
0033 #include <net/udp_tunnel.h>
0034 #include <net/pkt_cls.h>
0035 #include <net/tc_act/tc_gact.h>
0036 #include <net/tc_act/tc_mirred.h>
0037 #include <net/vxlan.h>
0038 #include <net/mpls.h>
0039 #include <net/xdp_sock_drv.h>
0040 #include <net/xfrm.h>
0041
0042 #include "ixgbe.h"
0043 #include "ixgbe_common.h"
0044 #include "ixgbe_dcb_82599.h"
0045 #include "ixgbe_phy.h"
0046 #include "ixgbe_sriov.h"
0047 #include "ixgbe_model.h"
0048 #include "ixgbe_txrx_common.h"
0049
0050 char ixgbe_driver_name[] = "ixgbe";
0051 static const char ixgbe_driver_string[] =
0052 "Intel(R) 10 Gigabit PCI Express Network Driver";
0053 #ifdef IXGBE_FCOE
0054 char ixgbe_default_device_descr[] =
0055 "Intel(R) 10 Gigabit Network Connection";
0056 #else
0057 static char ixgbe_default_device_descr[] =
0058 "Intel(R) 10 Gigabit Network Connection";
0059 #endif
0060 static const char ixgbe_copyright[] =
0061 "Copyright (c) 1999-2016 Intel Corporation.";
0062
0063 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
0064
0065 static const struct ixgbe_info *ixgbe_info_tbl[] = {
0066 [board_82598] = &ixgbe_82598_info,
0067 [board_82599] = &ixgbe_82599_info,
0068 [board_X540] = &ixgbe_X540_info,
0069 [board_X550] = &ixgbe_X550_info,
0070 [board_X550EM_x] = &ixgbe_X550EM_x_info,
0071 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
0072 [board_x550em_a] = &ixgbe_x550em_a_info,
0073 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
0074 };
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 static const struct pci_device_id ixgbe_pci_tbl[] = {
0085 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
0086 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
0087 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
0088 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
0089 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
0090 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
0091 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
0092 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
0093 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
0094 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
0095 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
0096 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
0097 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
0098 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
0099 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
0100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
0101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
0102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
0103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
0104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
0105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
0106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
0107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
0108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
0109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
0110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
0111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
0112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
0113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
0114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
0115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
0116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
0117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
0118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
0119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
0120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
0121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
0122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
0123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
0124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
0125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
0126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
0127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
0128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
0129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
0130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
0131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
0132
0133 {0, }
0134 };
0135 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
0136
0137 #ifdef CONFIG_IXGBE_DCA
0138 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
0139 void *p);
0140 static struct notifier_block dca_notifier = {
0141 .notifier_call = ixgbe_notify_dca,
0142 .next = NULL,
0143 .priority = 0
0144 };
0145 #endif
0146
0147 #ifdef CONFIG_PCI_IOV
0148 static unsigned int max_vfs;
0149 module_param(max_vfs, uint, 0);
0150 MODULE_PARM_DESC(max_vfs,
0151 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
0152 #endif
0153
0154 static bool allow_unsupported_sfp;
0155 module_param(allow_unsupported_sfp, bool, 0);
0156 MODULE_PARM_DESC(allow_unsupported_sfp,
0157 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
0158
0159 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
0160 static int debug = -1;
0161 module_param(debug, int, 0);
0162 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
0163
0164 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
0165 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
0166 MODULE_LICENSE("GPL v2");
0167
0168 DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
0169 EXPORT_SYMBOL(ixgbe_xdp_locking_key);
0170
0171 static struct workqueue_struct *ixgbe_wq;
0172
0173 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
0174 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
0175
0176 static const struct net_device_ops ixgbe_netdev_ops;
0177
0178 static bool netif_is_ixgbe(struct net_device *dev)
0179 {
0180 return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
0181 }
0182
0183 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
0184 u32 reg, u16 *value)
0185 {
0186 struct pci_dev *parent_dev;
0187 struct pci_bus *parent_bus;
0188
0189 parent_bus = adapter->pdev->bus->parent;
0190 if (!parent_bus)
0191 return -1;
0192
0193 parent_dev = parent_bus->self;
0194 if (!parent_dev)
0195 return -1;
0196
0197 if (!pci_is_pcie(parent_dev))
0198 return -1;
0199
0200 pcie_capability_read_word(parent_dev, reg, value);
0201 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
0202 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
0203 return -1;
0204 return 0;
0205 }
0206
0207 static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
0208 {
0209 struct ixgbe_hw *hw = &adapter->hw;
0210 u16 link_status = 0;
0211 int err;
0212
0213 hw->bus.type = ixgbe_bus_type_pci_express;
0214
0215
0216
0217
0218 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
0219
0220
0221 if (err)
0222 return err;
0223
0224 hw->bus.width = ixgbe_convert_bus_width(link_status);
0225 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
0226
0227 return 0;
0228 }
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239 static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
0240 {
0241 switch (hw->device_id) {
0242 case IXGBE_DEV_ID_82599_SFP_SF_QP:
0243 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
0244 return true;
0245 default:
0246 return false;
0247 }
0248 }
0249
0250 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
0251 int expected_gts)
0252 {
0253 struct ixgbe_hw *hw = &adapter->hw;
0254 struct pci_dev *pdev;
0255
0256
0257
0258
0259
0260 if (hw->bus.type == ixgbe_bus_type_internal)
0261 return;
0262
0263
0264 if (ixgbe_pcie_from_parent(&adapter->hw))
0265 pdev = adapter->pdev->bus->parent->self;
0266 else
0267 pdev = adapter->pdev;
0268
0269 pcie_print_link_status(pdev);
0270 }
0271
0272 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
0273 {
0274 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
0275 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
0276 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
0277 queue_work(ixgbe_wq, &adapter->service_task);
0278 }
0279
0280 static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
0281 {
0282 struct ixgbe_adapter *adapter = hw->back;
0283
0284 if (!hw->hw_addr)
0285 return;
0286 hw->hw_addr = NULL;
0287 e_dev_err("Adapter removed\n");
0288 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
0289 ixgbe_service_event_schedule(adapter);
0290 }
0291
0292 static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
0293 {
0294 u8 __iomem *reg_addr;
0295 u32 value;
0296 int i;
0297
0298 reg_addr = READ_ONCE(hw->hw_addr);
0299 if (ixgbe_removed(reg_addr))
0300 return IXGBE_FAILED_READ_REG;
0301
0302
0303
0304
0305
0306 for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
0307 value = readl(reg_addr + IXGBE_STATUS);
0308 if (value != IXGBE_FAILED_READ_REG)
0309 break;
0310 mdelay(3);
0311 }
0312
0313 if (value == IXGBE_FAILED_READ_REG)
0314 ixgbe_remove_adapter(hw);
0315 else
0316 value = readl(reg_addr + reg);
0317 return value;
0318 }
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333 u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
0334 {
0335 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
0336 u32 value;
0337
0338 if (ixgbe_removed(reg_addr))
0339 return IXGBE_FAILED_READ_REG;
0340 if (unlikely(hw->phy.nw_mng_if_sel &
0341 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
0342 struct ixgbe_adapter *adapter;
0343 int i;
0344
0345 for (i = 0; i < 200; ++i) {
0346 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
0347 if (likely(!value))
0348 goto writes_completed;
0349 if (value == IXGBE_FAILED_READ_REG) {
0350 ixgbe_remove_adapter(hw);
0351 return IXGBE_FAILED_READ_REG;
0352 }
0353 udelay(5);
0354 }
0355
0356 adapter = hw->back;
0357 e_warn(hw, "register writes incomplete %08x\n", value);
0358 }
0359
0360 writes_completed:
0361 value = readl(reg_addr + reg);
0362 if (unlikely(value == IXGBE_FAILED_READ_REG))
0363 value = ixgbe_check_remove(hw, reg);
0364 return value;
0365 }
0366
0367 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
0368 {
0369 u16 value;
0370
0371 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
0372 if (value == IXGBE_FAILED_READ_CFG_WORD) {
0373 ixgbe_remove_adapter(hw);
0374 return true;
0375 }
0376 return false;
0377 }
0378
0379 u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
0380 {
0381 struct ixgbe_adapter *adapter = hw->back;
0382 u16 value;
0383
0384 if (ixgbe_removed(hw->hw_addr))
0385 return IXGBE_FAILED_READ_CFG_WORD;
0386 pci_read_config_word(adapter->pdev, reg, &value);
0387 if (value == IXGBE_FAILED_READ_CFG_WORD &&
0388 ixgbe_check_cfg_remove(hw, adapter->pdev))
0389 return IXGBE_FAILED_READ_CFG_WORD;
0390 return value;
0391 }
0392
0393 #ifdef CONFIG_PCI_IOV
0394 static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
0395 {
0396 struct ixgbe_adapter *adapter = hw->back;
0397 u32 value;
0398
0399 if (ixgbe_removed(hw->hw_addr))
0400 return IXGBE_FAILED_READ_CFG_DWORD;
0401 pci_read_config_dword(adapter->pdev, reg, &value);
0402 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
0403 ixgbe_check_cfg_remove(hw, adapter->pdev))
0404 return IXGBE_FAILED_READ_CFG_DWORD;
0405 return value;
0406 }
0407 #endif
0408
0409 void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
0410 {
0411 struct ixgbe_adapter *adapter = hw->back;
0412
0413 if (ixgbe_removed(hw->hw_addr))
0414 return;
0415 pci_write_config_word(adapter->pdev, reg, value);
0416 }
0417
0418 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
0419 {
0420 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
0421
0422
0423 smp_mb__before_atomic();
0424 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
0425 }
0426
0427 struct ixgbe_reg_info {
0428 u32 ofs;
0429 char *name;
0430 };
0431
0432 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
0433
0434
0435 {IXGBE_CTRL, "CTRL"},
0436 {IXGBE_STATUS, "STATUS"},
0437 {IXGBE_CTRL_EXT, "CTRL_EXT"},
0438
0439
0440 {IXGBE_EICR, "EICR"},
0441
0442
0443 {IXGBE_SRRCTL(0), "SRRCTL"},
0444 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
0445 {IXGBE_RDLEN(0), "RDLEN"},
0446 {IXGBE_RDH(0), "RDH"},
0447 {IXGBE_RDT(0), "RDT"},
0448 {IXGBE_RXDCTL(0), "RXDCTL"},
0449 {IXGBE_RDBAL(0), "RDBAL"},
0450 {IXGBE_RDBAH(0), "RDBAH"},
0451
0452
0453 {IXGBE_TDBAL(0), "TDBAL"},
0454 {IXGBE_TDBAH(0), "TDBAH"},
0455 {IXGBE_TDLEN(0), "TDLEN"},
0456 {IXGBE_TDH(0), "TDH"},
0457 {IXGBE_TDT(0), "TDT"},
0458 {IXGBE_TXDCTL(0), "TXDCTL"},
0459
0460
0461 { .name = NULL }
0462 };
0463
0464
0465
0466
0467
0468 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
0469 {
0470 int i;
0471 char rname[16];
0472 u32 regs[64];
0473
0474 switch (reginfo->ofs) {
0475 case IXGBE_SRRCTL(0):
0476 for (i = 0; i < 64; i++)
0477 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
0478 break;
0479 case IXGBE_DCA_RXCTRL(0):
0480 for (i = 0; i < 64; i++)
0481 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
0482 break;
0483 case IXGBE_RDLEN(0):
0484 for (i = 0; i < 64; i++)
0485 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
0486 break;
0487 case IXGBE_RDH(0):
0488 for (i = 0; i < 64; i++)
0489 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
0490 break;
0491 case IXGBE_RDT(0):
0492 for (i = 0; i < 64; i++)
0493 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
0494 break;
0495 case IXGBE_RXDCTL(0):
0496 for (i = 0; i < 64; i++)
0497 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
0498 break;
0499 case IXGBE_RDBAL(0):
0500 for (i = 0; i < 64; i++)
0501 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
0502 break;
0503 case IXGBE_RDBAH(0):
0504 for (i = 0; i < 64; i++)
0505 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
0506 break;
0507 case IXGBE_TDBAL(0):
0508 for (i = 0; i < 64; i++)
0509 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
0510 break;
0511 case IXGBE_TDBAH(0):
0512 for (i = 0; i < 64; i++)
0513 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
0514 break;
0515 case IXGBE_TDLEN(0):
0516 for (i = 0; i < 64; i++)
0517 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
0518 break;
0519 case IXGBE_TDH(0):
0520 for (i = 0; i < 64; i++)
0521 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
0522 break;
0523 case IXGBE_TDT(0):
0524 for (i = 0; i < 64; i++)
0525 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
0526 break;
0527 case IXGBE_TXDCTL(0):
0528 for (i = 0; i < 64; i++)
0529 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
0530 break;
0531 default:
0532 pr_info("%-15s %08x\n",
0533 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
0534 return;
0535 }
0536
0537 i = 0;
0538 while (i < 64) {
0539 int j;
0540 char buf[9 * 8 + 1];
0541 char *p = buf;
0542
0543 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
0544 for (j = 0; j < 8; j++)
0545 p += sprintf(p, " %08x", regs[i++]);
0546 pr_err("%-15s%s\n", rname, buf);
0547 }
0548
0549 }
0550
0551 static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
0552 {
0553 struct ixgbe_tx_buffer *tx_buffer;
0554
0555 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
0556 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
0557 n, ring->next_to_use, ring->next_to_clean,
0558 (u64)dma_unmap_addr(tx_buffer, dma),
0559 dma_unmap_len(tx_buffer, len),
0560 tx_buffer->next_to_watch,
0561 (u64)tx_buffer->time_stamp);
0562 }
0563
0564
0565
0566
0567 static void ixgbe_dump(struct ixgbe_adapter *adapter)
0568 {
0569 struct net_device *netdev = adapter->netdev;
0570 struct ixgbe_hw *hw = &adapter->hw;
0571 struct ixgbe_reg_info *reginfo;
0572 int n = 0;
0573 struct ixgbe_ring *ring;
0574 struct ixgbe_tx_buffer *tx_buffer;
0575 union ixgbe_adv_tx_desc *tx_desc;
0576 struct my_u0 { u64 a; u64 b; } *u0;
0577 struct ixgbe_ring *rx_ring;
0578 union ixgbe_adv_rx_desc *rx_desc;
0579 struct ixgbe_rx_buffer *rx_buffer_info;
0580 int i = 0;
0581
0582 if (!netif_msg_hw(adapter))
0583 return;
0584
0585
0586 if (netdev) {
0587 dev_info(&adapter->pdev->dev, "Net device Info\n");
0588 pr_info("Device Name state "
0589 "trans_start\n");
0590 pr_info("%-15s %016lX %016lX\n",
0591 netdev->name,
0592 netdev->state,
0593 dev_trans_start(netdev));
0594 }
0595
0596
0597 dev_info(&adapter->pdev->dev, "Register Dump\n");
0598 pr_info(" Register Name Value\n");
0599 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
0600 reginfo->name; reginfo++) {
0601 ixgbe_regdump(hw, reginfo);
0602 }
0603
0604
0605 if (!netdev || !netif_running(netdev))
0606 return;
0607
0608 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
0609 pr_info(" %s %s %s %s\n",
0610 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
0611 "leng", "ntw", "timestamp");
0612 for (n = 0; n < adapter->num_tx_queues; n++) {
0613 ring = adapter->tx_ring[n];
0614 ixgbe_print_buffer(ring, n);
0615 }
0616
0617 for (n = 0; n < adapter->num_xdp_queues; n++) {
0618 ring = adapter->xdp_ring[n];
0619 ixgbe_print_buffer(ring, n);
0620 }
0621
0622
0623 if (!netif_msg_tx_done(adapter))
0624 goto rx_ring_summary;
0625
0626 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663 for (n = 0; n < adapter->num_tx_queues; n++) {
0664 ring = adapter->tx_ring[n];
0665 pr_info("------------------------------------\n");
0666 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
0667 pr_info("------------------------------------\n");
0668 pr_info("%s%s %s %s %s %s\n",
0669 "T [desc] [address 63:0 ] ",
0670 "[PlPOIdStDDt Ln] [bi->dma ] ",
0671 "leng", "ntw", "timestamp", "bi->skb");
0672
0673 for (i = 0; ring->desc && (i < ring->count); i++) {
0674 tx_desc = IXGBE_TX_DESC(ring, i);
0675 tx_buffer = &ring->tx_buffer_info[i];
0676 u0 = (struct my_u0 *)tx_desc;
0677 if (dma_unmap_len(tx_buffer, len) > 0) {
0678 const char *ring_desc;
0679
0680 if (i == ring->next_to_use &&
0681 i == ring->next_to_clean)
0682 ring_desc = " NTC/U";
0683 else if (i == ring->next_to_use)
0684 ring_desc = " NTU";
0685 else if (i == ring->next_to_clean)
0686 ring_desc = " NTC";
0687 else
0688 ring_desc = "";
0689 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
0690 i,
0691 le64_to_cpu((__force __le64)u0->a),
0692 le64_to_cpu((__force __le64)u0->b),
0693 (u64)dma_unmap_addr(tx_buffer, dma),
0694 dma_unmap_len(tx_buffer, len),
0695 tx_buffer->next_to_watch,
0696 (u64)tx_buffer->time_stamp,
0697 tx_buffer->skb,
0698 ring_desc);
0699
0700 if (netif_msg_pktdata(adapter) &&
0701 tx_buffer->skb)
0702 print_hex_dump(KERN_INFO, "",
0703 DUMP_PREFIX_ADDRESS, 16, 1,
0704 tx_buffer->skb->data,
0705 dma_unmap_len(tx_buffer, len),
0706 true);
0707 }
0708 }
0709 }
0710
0711
0712 rx_ring_summary:
0713 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
0714 pr_info("Queue [NTU] [NTC]\n");
0715 for (n = 0; n < adapter->num_rx_queues; n++) {
0716 rx_ring = adapter->rx_ring[n];
0717 pr_info("%5d %5X %5X\n",
0718 n, rx_ring->next_to_use, rx_ring->next_to_clean);
0719 }
0720
0721
0722 if (!netif_msg_rx_status(adapter))
0723 return;
0724
0725 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772 for (n = 0; n < adapter->num_rx_queues; n++) {
0773 rx_ring = adapter->rx_ring[n];
0774 pr_info("------------------------------------\n");
0775 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
0776 pr_info("------------------------------------\n");
0777 pr_info("%s%s%s\n",
0778 "R [desc] [ PktBuf A0] ",
0779 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
0780 "<-- Adv Rx Read format");
0781 pr_info("%s%s%s\n",
0782 "RWB[desc] [PcsmIpSHl PtRs] ",
0783 "[vl er S cks ln] ---------------- [bi->skb ] ",
0784 "<-- Adv Rx Write-Back format");
0785
0786 for (i = 0; i < rx_ring->count; i++) {
0787 const char *ring_desc;
0788
0789 if (i == rx_ring->next_to_use)
0790 ring_desc = " NTU";
0791 else if (i == rx_ring->next_to_clean)
0792 ring_desc = " NTC";
0793 else
0794 ring_desc = "";
0795
0796 rx_buffer_info = &rx_ring->rx_buffer_info[i];
0797 rx_desc = IXGBE_RX_DESC(rx_ring, i);
0798 u0 = (struct my_u0 *)rx_desc;
0799 if (rx_desc->wb.upper.length) {
0800
0801 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
0802 i,
0803 le64_to_cpu((__force __le64)u0->a),
0804 le64_to_cpu((__force __le64)u0->b),
0805 rx_buffer_info->skb,
0806 ring_desc);
0807 } else {
0808 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
0809 i,
0810 le64_to_cpu((__force __le64)u0->a),
0811 le64_to_cpu((__force __le64)u0->b),
0812 (u64)rx_buffer_info->dma,
0813 rx_buffer_info->skb,
0814 ring_desc);
0815
0816 if (netif_msg_pktdata(adapter) &&
0817 rx_buffer_info->dma) {
0818 print_hex_dump(KERN_INFO, "",
0819 DUMP_PREFIX_ADDRESS, 16, 1,
0820 page_address(rx_buffer_info->page) +
0821 rx_buffer_info->page_offset,
0822 ixgbe_rx_bufsz(rx_ring), true);
0823 }
0824 }
0825 }
0826 }
0827 }
0828
0829 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
0830 {
0831 u32 ctrl_ext;
0832
0833
0834 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
0835 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
0836 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
0837 }
0838
0839 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
0840 {
0841 u32 ctrl_ext;
0842
0843
0844 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
0845 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
0846 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
0847 }
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
0858 u8 queue, u8 msix_vector)
0859 {
0860 u32 ivar, index;
0861 struct ixgbe_hw *hw = &adapter->hw;
0862 switch (hw->mac.type) {
0863 case ixgbe_mac_82598EB:
0864 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
0865 if (direction == -1)
0866 direction = 0;
0867 index = (((direction * 64) + queue) >> 2) & 0x1F;
0868 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
0869 ivar &= ~(0xFF << (8 * (queue & 0x3)));
0870 ivar |= (msix_vector << (8 * (queue & 0x3)));
0871 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
0872 break;
0873 case ixgbe_mac_82599EB:
0874 case ixgbe_mac_X540:
0875 case ixgbe_mac_X550:
0876 case ixgbe_mac_X550EM_x:
0877 case ixgbe_mac_x550em_a:
0878 if (direction == -1) {
0879
0880 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
0881 index = ((queue & 1) * 8);
0882 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
0883 ivar &= ~(0xFF << index);
0884 ivar |= (msix_vector << index);
0885 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
0886 break;
0887 } else {
0888
0889 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
0890 index = ((16 * (queue & 1)) + (8 * direction));
0891 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
0892 ivar &= ~(0xFF << index);
0893 ivar |= (msix_vector << index);
0894 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
0895 break;
0896 }
0897 default:
0898 break;
0899 }
0900 }
0901
0902 void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
0903 u64 qmask)
0904 {
0905 u32 mask;
0906
0907 switch (adapter->hw.mac.type) {
0908 case ixgbe_mac_82598EB:
0909 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
0910 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
0911 break;
0912 case ixgbe_mac_82599EB:
0913 case ixgbe_mac_X540:
0914 case ixgbe_mac_X550:
0915 case ixgbe_mac_X550EM_x:
0916 case ixgbe_mac_x550em_a:
0917 mask = (qmask & 0xFFFFFFFF);
0918 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
0919 mask = (qmask >> 32);
0920 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
0921 break;
0922 default:
0923 break;
0924 }
0925 }
0926
0927 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
0928 {
0929 struct ixgbe_hw *hw = &adapter->hw;
0930 struct ixgbe_hw_stats *hwstats = &adapter->stats;
0931 int i;
0932 u32 data;
0933
0934 if ((hw->fc.current_mode != ixgbe_fc_full) &&
0935 (hw->fc.current_mode != ixgbe_fc_rx_pause))
0936 return;
0937
0938 switch (hw->mac.type) {
0939 case ixgbe_mac_82598EB:
0940 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
0941 break;
0942 default:
0943 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
0944 }
0945 hwstats->lxoffrxc += data;
0946
0947
0948 if (!data)
0949 return;
0950
0951 for (i = 0; i < adapter->num_tx_queues; i++)
0952 clear_bit(__IXGBE_HANG_CHECK_ARMED,
0953 &adapter->tx_ring[i]->state);
0954
0955 for (i = 0; i < adapter->num_xdp_queues; i++)
0956 clear_bit(__IXGBE_HANG_CHECK_ARMED,
0957 &adapter->xdp_ring[i]->state);
0958 }
0959
0960 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
0961 {
0962 struct ixgbe_hw *hw = &adapter->hw;
0963 struct ixgbe_hw_stats *hwstats = &adapter->stats;
0964 u32 xoff[8] = {0};
0965 u8 tc;
0966 int i;
0967 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
0968
0969 if (adapter->ixgbe_ieee_pfc)
0970 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
0971
0972 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
0973 ixgbe_update_xoff_rx_lfc(adapter);
0974 return;
0975 }
0976
0977
0978 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
0979 u32 pxoffrxc;
0980
0981 switch (hw->mac.type) {
0982 case ixgbe_mac_82598EB:
0983 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
0984 break;
0985 default:
0986 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
0987 }
0988 hwstats->pxoffrxc[i] += pxoffrxc;
0989
0990 tc = netdev_get_prio_tc_map(adapter->netdev, i);
0991 xoff[tc] += pxoffrxc;
0992 }
0993
0994
0995 for (i = 0; i < adapter->num_tx_queues; i++) {
0996 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
0997
0998 tc = tx_ring->dcb_tc;
0999 if (xoff[tc])
1000 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1001 }
1002
1003 for (i = 0; i < adapter->num_xdp_queues; i++) {
1004 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1005
1006 tc = xdp_ring->dcb_tc;
1007 if (xoff[tc])
1008 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1009 }
1010 }
1011
1012 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1013 {
1014 return ring->stats.packets;
1015 }
1016
1017 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1018 {
1019 unsigned int head, tail;
1020
1021 head = ring->next_to_clean;
1022 tail = ring->next_to_use;
1023
1024 return ((head <= tail) ? tail : tail + ring->count) - head;
1025 }
1026
1027 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1028 {
1029 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1030 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1031 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1032
1033 clear_check_for_tx_hang(tx_ring);
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047 if (tx_done_old == tx_done && tx_pending)
1048
1049 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1050 &tx_ring->state);
1051
1052 tx_ring->tx_stats.tx_done_old = tx_done;
1053
1054 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1055
1056 return false;
1057 }
1058
1059
1060
1061
1062
1063 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1064 {
1065
1066
1067 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1068 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1069 e_warn(drv, "initiating reset due to tx timeout\n");
1070 ixgbe_service_event_schedule(adapter);
1071 }
1072 }
1073
1074
1075
1076
1077
1078
1079
1080 static int ixgbe_tx_maxrate(struct net_device *netdev,
1081 int queue_index, u32 maxrate)
1082 {
1083 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1084 struct ixgbe_hw *hw = &adapter->hw;
1085 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1086
1087 if (!maxrate)
1088 return 0;
1089
1090
1091 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1092 bcnrc_val /= maxrate;
1093
1094
1095 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1096 IXGBE_RTTBCNRC_RF_DEC_MASK;
1097
1098
1099 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1100
1101 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1102 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1103
1104 return 0;
1105 }
1106
1107
1108
1109
1110
1111
1112
1113 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1114 struct ixgbe_ring *tx_ring, int napi_budget)
1115 {
1116 struct ixgbe_adapter *adapter = q_vector->adapter;
1117 struct ixgbe_tx_buffer *tx_buffer;
1118 union ixgbe_adv_tx_desc *tx_desc;
1119 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1120 unsigned int budget = q_vector->tx.work_limit;
1121 unsigned int i = tx_ring->next_to_clean;
1122
1123 if (test_bit(__IXGBE_DOWN, &adapter->state))
1124 return true;
1125
1126 tx_buffer = &tx_ring->tx_buffer_info[i];
1127 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1128 i -= tx_ring->count;
1129
1130 do {
1131 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1132
1133
1134 if (!eop_desc)
1135 break;
1136
1137
1138 smp_rmb();
1139
1140
1141 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1142 break;
1143
1144
1145 tx_buffer->next_to_watch = NULL;
1146
1147
1148 total_bytes += tx_buffer->bytecount;
1149 total_packets += tx_buffer->gso_segs;
1150 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1151 total_ipsec++;
1152
1153
1154 if (ring_is_xdp(tx_ring))
1155 xdp_return_frame(tx_buffer->xdpf);
1156 else
1157 napi_consume_skb(tx_buffer->skb, napi_budget);
1158
1159
1160 dma_unmap_single(tx_ring->dev,
1161 dma_unmap_addr(tx_buffer, dma),
1162 dma_unmap_len(tx_buffer, len),
1163 DMA_TO_DEVICE);
1164
1165
1166 dma_unmap_len_set(tx_buffer, len, 0);
1167
1168
1169 while (tx_desc != eop_desc) {
1170 tx_buffer++;
1171 tx_desc++;
1172 i++;
1173 if (unlikely(!i)) {
1174 i -= tx_ring->count;
1175 tx_buffer = tx_ring->tx_buffer_info;
1176 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1177 }
1178
1179
1180 if (dma_unmap_len(tx_buffer, len)) {
1181 dma_unmap_page(tx_ring->dev,
1182 dma_unmap_addr(tx_buffer, dma),
1183 dma_unmap_len(tx_buffer, len),
1184 DMA_TO_DEVICE);
1185 dma_unmap_len_set(tx_buffer, len, 0);
1186 }
1187 }
1188
1189
1190 tx_buffer++;
1191 tx_desc++;
1192 i++;
1193 if (unlikely(!i)) {
1194 i -= tx_ring->count;
1195 tx_buffer = tx_ring->tx_buffer_info;
1196 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1197 }
1198
1199
1200 prefetch(tx_desc);
1201
1202
1203 budget--;
1204 } while (likely(budget));
1205
1206 i += tx_ring->count;
1207 tx_ring->next_to_clean = i;
1208 u64_stats_update_begin(&tx_ring->syncp);
1209 tx_ring->stats.bytes += total_bytes;
1210 tx_ring->stats.packets += total_packets;
1211 u64_stats_update_end(&tx_ring->syncp);
1212 q_vector->tx.total_bytes += total_bytes;
1213 q_vector->tx.total_packets += total_packets;
1214 adapter->tx_ipsec += total_ipsec;
1215
1216 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1217
1218 struct ixgbe_hw *hw = &adapter->hw;
1219 e_err(drv, "Detected Tx Unit Hang %s\n"
1220 " Tx Queue <%d>\n"
1221 " TDH, TDT <%x>, <%x>\n"
1222 " next_to_use <%x>\n"
1223 " next_to_clean <%x>\n"
1224 "tx_buffer_info[next_to_clean]\n"
1225 " time_stamp <%lx>\n"
1226 " jiffies <%lx>\n",
1227 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1228 tx_ring->queue_index,
1229 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1230 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1231 tx_ring->next_to_use, i,
1232 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1233
1234 if (!ring_is_xdp(tx_ring))
1235 netif_stop_subqueue(tx_ring->netdev,
1236 tx_ring->queue_index);
1237
1238 e_info(probe,
1239 "tx hang %d detected on queue %d, resetting adapter\n",
1240 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1241
1242
1243 ixgbe_tx_timeout_reset(adapter);
1244
1245
1246 return true;
1247 }
1248
1249 if (ring_is_xdp(tx_ring))
1250 return !!budget;
1251
1252 netdev_tx_completed_queue(txring_txq(tx_ring),
1253 total_packets, total_bytes);
1254
1255 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1256 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1257 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1258
1259
1260
1261 smp_mb();
1262 if (__netif_subqueue_stopped(tx_ring->netdev,
1263 tx_ring->queue_index)
1264 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1265 netif_wake_subqueue(tx_ring->netdev,
1266 tx_ring->queue_index);
1267 ++tx_ring->tx_stats.restart_queue;
1268 }
1269 }
1270
1271 return !!budget;
1272 }
1273
1274 #ifdef CONFIG_IXGBE_DCA
1275 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1276 struct ixgbe_ring *tx_ring,
1277 int cpu)
1278 {
1279 struct ixgbe_hw *hw = &adapter->hw;
1280 u32 txctrl = 0;
1281 u16 reg_offset;
1282
1283 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1284 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1285
1286 switch (hw->mac.type) {
1287 case ixgbe_mac_82598EB:
1288 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1289 break;
1290 case ixgbe_mac_82599EB:
1291 case ixgbe_mac_X540:
1292 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1293 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1294 break;
1295 default:
1296
1297 return;
1298 }
1299
1300
1301
1302
1303
1304
1305 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1306 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1307 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1308
1309 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1310 }
1311
1312 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1313 struct ixgbe_ring *rx_ring,
1314 int cpu)
1315 {
1316 struct ixgbe_hw *hw = &adapter->hw;
1317 u32 rxctrl = 0;
1318 u8 reg_idx = rx_ring->reg_idx;
1319
1320 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1321 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1322
1323 switch (hw->mac.type) {
1324 case ixgbe_mac_82599EB:
1325 case ixgbe_mac_X540:
1326 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1327 break;
1328 default:
1329 break;
1330 }
1331
1332
1333
1334
1335
1336
1337 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1338 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1339 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1340
1341 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1342 }
1343
1344 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1345 {
1346 struct ixgbe_adapter *adapter = q_vector->adapter;
1347 struct ixgbe_ring *ring;
1348 int cpu = get_cpu();
1349
1350 if (q_vector->cpu == cpu)
1351 goto out_no_update;
1352
1353 ixgbe_for_each_ring(ring, q_vector->tx)
1354 ixgbe_update_tx_dca(adapter, ring, cpu);
1355
1356 ixgbe_for_each_ring(ring, q_vector->rx)
1357 ixgbe_update_rx_dca(adapter, ring, cpu);
1358
1359 q_vector->cpu = cpu;
1360 out_no_update:
1361 put_cpu();
1362 }
1363
1364 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1365 {
1366 int i;
1367
1368
1369 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1370 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1371 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1372 else
1373 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1374 IXGBE_DCA_CTRL_DCA_DISABLE);
1375
1376 for (i = 0; i < adapter->num_q_vectors; i++) {
1377 adapter->q_vector[i]->cpu = -1;
1378 ixgbe_update_dca(adapter->q_vector[i]);
1379 }
1380 }
1381
1382 static int __ixgbe_notify_dca(struct device *dev, void *data)
1383 {
1384 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1385 unsigned long event = *(unsigned long *)data;
1386
1387 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1388 return 0;
1389
1390 switch (event) {
1391 case DCA_PROVIDER_ADD:
1392
1393 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1394 break;
1395 if (dca_add_requester(dev) == 0) {
1396 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1397 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1398 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1399 break;
1400 }
1401 fallthrough;
1402 case DCA_PROVIDER_REMOVE:
1403 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1404 dca_remove_requester(dev);
1405 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1406 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1407 IXGBE_DCA_CTRL_DCA_DISABLE);
1408 }
1409 break;
1410 }
1411
1412 return 0;
1413 }
1414
1415 #endif
1416
1417 #define IXGBE_RSS_L4_TYPES_MASK \
1418 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1419 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1420 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1421 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1422
1423 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1424 union ixgbe_adv_rx_desc *rx_desc,
1425 struct sk_buff *skb)
1426 {
1427 u16 rss_type;
1428
1429 if (!(ring->netdev->features & NETIF_F_RXHASH))
1430 return;
1431
1432 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1433 IXGBE_RXDADV_RSSTYPE_MASK;
1434
1435 if (!rss_type)
1436 return;
1437
1438 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1439 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1440 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1441 }
1442
1443 #ifdef IXGBE_FCOE
1444
1445
1446
1447
1448
1449
1450
1451 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1452 union ixgbe_adv_rx_desc *rx_desc)
1453 {
1454 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1455
1456 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1457 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1458 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1459 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1460 }
1461
1462 #endif
1463
1464
1465
1466
1467
1468
1469 static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1470 union ixgbe_adv_rx_desc *rx_desc,
1471 struct sk_buff *skb)
1472 {
1473 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1474 bool encap_pkt = false;
1475
1476 skb_checksum_none_assert(skb);
1477
1478
1479 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1480 return;
1481
1482
1483 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1484 encap_pkt = true;
1485 skb->encapsulation = 1;
1486 }
1487
1488
1489 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1490 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1491 ring->rx_stats.csum_err++;
1492 return;
1493 }
1494
1495 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1496 return;
1497
1498 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1499
1500
1501
1502
1503 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1504 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1505 return;
1506
1507 ring->rx_stats.csum_err++;
1508 return;
1509 }
1510
1511
1512 skb->ip_summed = CHECKSUM_UNNECESSARY;
1513 if (encap_pkt) {
1514 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1515 return;
1516
1517 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1518 skb->ip_summed = CHECKSUM_NONE;
1519 return;
1520 }
1521
1522 skb->csum_level = 1;
1523 }
1524 }
1525
1526 static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1527 {
1528 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1529 }
1530
1531 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1532 struct ixgbe_rx_buffer *bi)
1533 {
1534 struct page *page = bi->page;
1535 dma_addr_t dma;
1536
1537
1538 if (likely(page))
1539 return true;
1540
1541
1542 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1543 if (unlikely(!page)) {
1544 rx_ring->rx_stats.alloc_rx_page_failed++;
1545 return false;
1546 }
1547
1548
1549 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1550 ixgbe_rx_pg_size(rx_ring),
1551 DMA_FROM_DEVICE,
1552 IXGBE_RX_DMA_ATTR);
1553
1554
1555
1556
1557
1558 if (dma_mapping_error(rx_ring->dev, dma)) {
1559 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1560
1561 rx_ring->rx_stats.alloc_rx_page_failed++;
1562 return false;
1563 }
1564
1565 bi->dma = dma;
1566 bi->page = page;
1567 bi->page_offset = rx_ring->rx_offset;
1568 page_ref_add(page, USHRT_MAX - 1);
1569 bi->pagecnt_bias = USHRT_MAX;
1570 rx_ring->rx_stats.alloc_rx_page++;
1571
1572 return true;
1573 }
1574
1575
1576
1577
1578
1579
1580 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1581 {
1582 union ixgbe_adv_rx_desc *rx_desc;
1583 struct ixgbe_rx_buffer *bi;
1584 u16 i = rx_ring->next_to_use;
1585 u16 bufsz;
1586
1587
1588 if (!cleaned_count)
1589 return;
1590
1591 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1592 bi = &rx_ring->rx_buffer_info[i];
1593 i -= rx_ring->count;
1594
1595 bufsz = ixgbe_rx_bufsz(rx_ring);
1596
1597 do {
1598 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1599 break;
1600
1601
1602 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1603 bi->page_offset, bufsz,
1604 DMA_FROM_DEVICE);
1605
1606
1607
1608
1609
1610 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1611
1612 rx_desc++;
1613 bi++;
1614 i++;
1615 if (unlikely(!i)) {
1616 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1617 bi = rx_ring->rx_buffer_info;
1618 i -= rx_ring->count;
1619 }
1620
1621
1622 rx_desc->wb.upper.length = 0;
1623
1624 cleaned_count--;
1625 } while (cleaned_count);
1626
1627 i += rx_ring->count;
1628
1629 if (rx_ring->next_to_use != i) {
1630 rx_ring->next_to_use = i;
1631
1632
1633 rx_ring->next_to_alloc = i;
1634
1635
1636
1637
1638
1639
1640 wmb();
1641 writel(i, rx_ring->tail);
1642 }
1643 }
1644
1645 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1646 struct sk_buff *skb)
1647 {
1648 u16 hdr_len = skb_headlen(skb);
1649
1650
1651 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1652 IXGBE_CB(skb)->append_cnt);
1653 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1654 }
1655
1656 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1657 struct sk_buff *skb)
1658 {
1659
1660 if (!IXGBE_CB(skb)->append_cnt)
1661 return;
1662
1663 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1664 rx_ring->rx_stats.rsc_flush++;
1665
1666 ixgbe_set_rsc_gso_size(rx_ring, skb);
1667
1668
1669 IXGBE_CB(skb)->append_cnt = 0;
1670 }
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682 void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1683 union ixgbe_adv_rx_desc *rx_desc,
1684 struct sk_buff *skb)
1685 {
1686 struct net_device *dev = rx_ring->netdev;
1687 u32 flags = rx_ring->q_vector->adapter->flags;
1688
1689 ixgbe_update_rsc_stats(rx_ring, skb);
1690
1691 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1692
1693 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1694
1695 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1696 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1697
1698 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1699 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1700 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1701 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1702 }
1703
1704 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1705 ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1706
1707
1708 if (netif_is_ixgbe(dev))
1709 skb_record_rx_queue(skb, rx_ring->queue_index);
1710 else
1711 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
1712 false);
1713
1714 skb->protocol = eth_type_trans(skb, dev);
1715 }
1716
1717 void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1718 struct sk_buff *skb)
1719 {
1720 napi_gro_receive(&q_vector->napi, skb);
1721 }
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1735 union ixgbe_adv_rx_desc *rx_desc,
1736 struct sk_buff *skb)
1737 {
1738 u32 ntc = rx_ring->next_to_clean + 1;
1739
1740
1741 ntc = (ntc < rx_ring->count) ? ntc : 0;
1742 rx_ring->next_to_clean = ntc;
1743
1744 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1745
1746
1747 if (ring_is_rsc_enabled(rx_ring)) {
1748 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1749 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1750
1751 if (unlikely(rsc_enabled)) {
1752 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1753
1754 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1755 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1756
1757
1758 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1759 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1760 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1761 }
1762 }
1763
1764
1765 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1766 return false;
1767
1768
1769 rx_ring->rx_buffer_info[ntc].skb = skb;
1770 rx_ring->rx_stats.non_eop_descs++;
1771
1772 return true;
1773 }
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1788 struct sk_buff *skb)
1789 {
1790 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1791 unsigned char *va;
1792 unsigned int pull_len;
1793
1794
1795
1796
1797
1798
1799 va = skb_frag_address(frag);
1800
1801
1802
1803
1804
1805 pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
1806
1807
1808 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1809
1810
1811 skb_frag_size_sub(frag, pull_len);
1812 skb_frag_off_add(frag, pull_len);
1813 skb->data_len -= pull_len;
1814 skb->tail += pull_len;
1815 }
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1828 struct sk_buff *skb)
1829 {
1830 if (ring_uses_build_skb(rx_ring)) {
1831 unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1;
1832 unsigned long offset = (unsigned long)(skb->data) & mask;
1833
1834 dma_sync_single_range_for_cpu(rx_ring->dev,
1835 IXGBE_CB(skb)->dma,
1836 offset,
1837 skb_headlen(skb),
1838 DMA_FROM_DEVICE);
1839 } else {
1840 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1841
1842 dma_sync_single_range_for_cpu(rx_ring->dev,
1843 IXGBE_CB(skb)->dma,
1844 skb_frag_off(frag),
1845 skb_frag_size(frag),
1846 DMA_FROM_DEVICE);
1847 }
1848
1849
1850 if (unlikely(IXGBE_CB(skb)->page_released)) {
1851 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1852 ixgbe_rx_pg_size(rx_ring),
1853 DMA_FROM_DEVICE,
1854 IXGBE_RX_DMA_ATTR);
1855 }
1856 }
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1881 union ixgbe_adv_rx_desc *rx_desc,
1882 struct sk_buff *skb)
1883 {
1884 struct net_device *netdev = rx_ring->netdev;
1885
1886
1887 if (IS_ERR(skb))
1888 return true;
1889
1890
1891
1892
1893 if (!netdev ||
1894 (unlikely(ixgbe_test_staterr(rx_desc,
1895 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1896 !(netdev->features & NETIF_F_RXALL)))) {
1897 dev_kfree_skb_any(skb);
1898 return true;
1899 }
1900
1901
1902 if (!skb_headlen(skb))
1903 ixgbe_pull_tail(rx_ring, skb);
1904
1905 #ifdef IXGBE_FCOE
1906
1907 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1908 return false;
1909
1910 #endif
1911
1912 if (eth_skb_pad(skb))
1913 return true;
1914
1915 return false;
1916 }
1917
1918
1919
1920
1921
1922
1923
1924
1925 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1926 struct ixgbe_rx_buffer *old_buff)
1927 {
1928 struct ixgbe_rx_buffer *new_buff;
1929 u16 nta = rx_ring->next_to_alloc;
1930
1931 new_buff = &rx_ring->rx_buffer_info[nta];
1932
1933
1934 nta++;
1935 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1936
1937
1938
1939
1940
1941 new_buff->dma = old_buff->dma;
1942 new_buff->page = old_buff->page;
1943 new_buff->page_offset = old_buff->page_offset;
1944 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1945 }
1946
1947 static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
1948 int rx_buffer_pgcnt)
1949 {
1950 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1951 struct page *page = rx_buffer->page;
1952
1953
1954 if (!dev_page_is_reusable(page))
1955 return false;
1956
1957 #if (PAGE_SIZE < 8192)
1958
1959 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1960 return false;
1961 #else
1962
1963
1964
1965
1966
1967 #define IXGBE_LAST_OFFSET \
1968 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
1969 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
1970 return false;
1971 #endif
1972
1973
1974
1975
1976
1977 if (unlikely(pagecnt_bias == 1)) {
1978 page_ref_add(page, USHRT_MAX - 1);
1979 rx_buffer->pagecnt_bias = USHRT_MAX;
1980 }
1981
1982 return true;
1983 }
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000 static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
2001 struct ixgbe_rx_buffer *rx_buffer,
2002 struct sk_buff *skb,
2003 unsigned int size)
2004 {
2005 #if (PAGE_SIZE < 8192)
2006 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2007 #else
2008 unsigned int truesize = rx_ring->rx_offset ?
2009 SKB_DATA_ALIGN(rx_ring->rx_offset + size) :
2010 SKB_DATA_ALIGN(size);
2011 #endif
2012 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2013 rx_buffer->page_offset, size, truesize);
2014 #if (PAGE_SIZE < 8192)
2015 rx_buffer->page_offset ^= truesize;
2016 #else
2017 rx_buffer->page_offset += truesize;
2018 #endif
2019 }
2020
2021 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2022 union ixgbe_adv_rx_desc *rx_desc,
2023 struct sk_buff **skb,
2024 const unsigned int size,
2025 int *rx_buffer_pgcnt)
2026 {
2027 struct ixgbe_rx_buffer *rx_buffer;
2028
2029 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2030 *rx_buffer_pgcnt =
2031 #if (PAGE_SIZE < 8192)
2032 page_count(rx_buffer->page);
2033 #else
2034 0;
2035 #endif
2036 prefetchw(rx_buffer->page);
2037 *skb = rx_buffer->skb;
2038
2039
2040
2041
2042
2043 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2044 if (!*skb)
2045 goto skip_sync;
2046 } else {
2047 if (*skb)
2048 ixgbe_dma_sync_frag(rx_ring, *skb);
2049 }
2050
2051
2052 dma_sync_single_range_for_cpu(rx_ring->dev,
2053 rx_buffer->dma,
2054 rx_buffer->page_offset,
2055 size,
2056 DMA_FROM_DEVICE);
2057 skip_sync:
2058 rx_buffer->pagecnt_bias--;
2059
2060 return rx_buffer;
2061 }
2062
2063 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2064 struct ixgbe_rx_buffer *rx_buffer,
2065 struct sk_buff *skb,
2066 int rx_buffer_pgcnt)
2067 {
2068 if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2069
2070 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2071 } else {
2072 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2073
2074 IXGBE_CB(skb)->page_released = true;
2075 } else {
2076
2077 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2078 ixgbe_rx_pg_size(rx_ring),
2079 DMA_FROM_DEVICE,
2080 IXGBE_RX_DMA_ATTR);
2081 }
2082 __page_frag_cache_drain(rx_buffer->page,
2083 rx_buffer->pagecnt_bias);
2084 }
2085
2086
2087 rx_buffer->page = NULL;
2088 rx_buffer->skb = NULL;
2089 }
2090
2091 static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2092 struct ixgbe_rx_buffer *rx_buffer,
2093 struct xdp_buff *xdp,
2094 union ixgbe_adv_rx_desc *rx_desc)
2095 {
2096 unsigned int size = xdp->data_end - xdp->data;
2097 #if (PAGE_SIZE < 8192)
2098 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2099 #else
2100 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2101 xdp->data_hard_start);
2102 #endif
2103 struct sk_buff *skb;
2104
2105
2106 net_prefetch(xdp->data);
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2126 if (unlikely(!skb))
2127 return NULL;
2128
2129 if (size > IXGBE_RX_HDR_SIZE) {
2130 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2131 IXGBE_CB(skb)->dma = rx_buffer->dma;
2132
2133 skb_add_rx_frag(skb, 0, rx_buffer->page,
2134 xdp->data - page_address(rx_buffer->page),
2135 size, truesize);
2136 #if (PAGE_SIZE < 8192)
2137 rx_buffer->page_offset ^= truesize;
2138 #else
2139 rx_buffer->page_offset += truesize;
2140 #endif
2141 } else {
2142 memcpy(__skb_put(skb, size),
2143 xdp->data, ALIGN(size, sizeof(long)));
2144 rx_buffer->pagecnt_bias++;
2145 }
2146
2147 return skb;
2148 }
2149
2150 static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2151 struct ixgbe_rx_buffer *rx_buffer,
2152 struct xdp_buff *xdp,
2153 union ixgbe_adv_rx_desc *rx_desc)
2154 {
2155 unsigned int metasize = xdp->data - xdp->data_meta;
2156 #if (PAGE_SIZE < 8192)
2157 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2158 #else
2159 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2160 SKB_DATA_ALIGN(xdp->data_end -
2161 xdp->data_hard_start);
2162 #endif
2163 struct sk_buff *skb;
2164
2165
2166
2167
2168
2169
2170 net_prefetch(xdp->data_meta);
2171
2172
2173 skb = napi_build_skb(xdp->data_hard_start, truesize);
2174 if (unlikely(!skb))
2175 return NULL;
2176
2177
2178 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2179 __skb_put(skb, xdp->data_end - xdp->data);
2180 if (metasize)
2181 skb_metadata_set(skb, metasize);
2182
2183
2184 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2185 IXGBE_CB(skb)->dma = rx_buffer->dma;
2186
2187
2188 #if (PAGE_SIZE < 8192)
2189 rx_buffer->page_offset ^= truesize;
2190 #else
2191 rx_buffer->page_offset += truesize;
2192 #endif
2193
2194 return skb;
2195 }
2196
2197 static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2198 struct ixgbe_ring *rx_ring,
2199 struct xdp_buff *xdp)
2200 {
2201 int err, result = IXGBE_XDP_PASS;
2202 struct bpf_prog *xdp_prog;
2203 struct ixgbe_ring *ring;
2204 struct xdp_frame *xdpf;
2205 u32 act;
2206
2207 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2208
2209 if (!xdp_prog)
2210 goto xdp_out;
2211
2212 prefetchw(xdp->data_hard_start);
2213
2214 act = bpf_prog_run_xdp(xdp_prog, xdp);
2215 switch (act) {
2216 case XDP_PASS:
2217 break;
2218 case XDP_TX:
2219 xdpf = xdp_convert_buff_to_frame(xdp);
2220 if (unlikely(!xdpf))
2221 goto out_failure;
2222 ring = ixgbe_determine_xdp_ring(adapter);
2223 if (static_branch_unlikely(&ixgbe_xdp_locking_key))
2224 spin_lock(&ring->tx_lock);
2225 result = ixgbe_xmit_xdp_ring(ring, xdpf);
2226 if (static_branch_unlikely(&ixgbe_xdp_locking_key))
2227 spin_unlock(&ring->tx_lock);
2228 if (result == IXGBE_XDP_CONSUMED)
2229 goto out_failure;
2230 break;
2231 case XDP_REDIRECT:
2232 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2233 if (err)
2234 goto out_failure;
2235 result = IXGBE_XDP_REDIR;
2236 break;
2237 default:
2238 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
2239 fallthrough;
2240 case XDP_ABORTED:
2241 out_failure:
2242 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2243 fallthrough;
2244 case XDP_DROP:
2245 result = IXGBE_XDP_CONSUMED;
2246 break;
2247 }
2248 xdp_out:
2249 return ERR_PTR(-result);
2250 }
2251
2252 static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
2253 unsigned int size)
2254 {
2255 unsigned int truesize;
2256
2257 #if (PAGE_SIZE < 8192)
2258 truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2259 #else
2260 truesize = rx_ring->rx_offset ?
2261 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
2262 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2263 SKB_DATA_ALIGN(size);
2264 #endif
2265 return truesize;
2266 }
2267
2268 static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2269 struct ixgbe_rx_buffer *rx_buffer,
2270 unsigned int size)
2271 {
2272 unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size);
2273 #if (PAGE_SIZE < 8192)
2274 rx_buffer->page_offset ^= truesize;
2275 #else
2276 rx_buffer->page_offset += truesize;
2277 #endif
2278 }
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2294 struct ixgbe_ring *rx_ring,
2295 const int budget)
2296 {
2297 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
2298 struct ixgbe_adapter *adapter = q_vector->adapter;
2299 #ifdef IXGBE_FCOE
2300 int ddp_bytes;
2301 unsigned int mss = 0;
2302 #endif
2303 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2304 unsigned int offset = rx_ring->rx_offset;
2305 unsigned int xdp_xmit = 0;
2306 struct xdp_buff xdp;
2307
2308
2309 #if (PAGE_SIZE < 8192)
2310 frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
2311 #endif
2312 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
2313
2314 while (likely(total_rx_packets < budget)) {
2315 union ixgbe_adv_rx_desc *rx_desc;
2316 struct ixgbe_rx_buffer *rx_buffer;
2317 struct sk_buff *skb;
2318 int rx_buffer_pgcnt;
2319 unsigned int size;
2320
2321
2322 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2323 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2324 cleaned_count = 0;
2325 }
2326
2327 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2328 size = le16_to_cpu(rx_desc->wb.upper.length);
2329 if (!size)
2330 break;
2331
2332
2333
2334
2335
2336 dma_rmb();
2337
2338 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
2339
2340
2341 if (!skb) {
2342 unsigned char *hard_start;
2343
2344 hard_start = page_address(rx_buffer->page) +
2345 rx_buffer->page_offset - offset;
2346 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
2347 xdp_buff_clear_frags_flag(&xdp);
2348 #if (PAGE_SIZE > 4096)
2349
2350 xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
2351 #endif
2352 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2353 }
2354
2355 if (IS_ERR(skb)) {
2356 unsigned int xdp_res = -PTR_ERR(skb);
2357
2358 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2359 xdp_xmit |= xdp_res;
2360 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2361 } else {
2362 rx_buffer->pagecnt_bias++;
2363 }
2364 total_rx_packets++;
2365 total_rx_bytes += size;
2366 } else if (skb) {
2367 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2368 } else if (ring_uses_build_skb(rx_ring)) {
2369 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2370 &xdp, rx_desc);
2371 } else {
2372 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2373 &xdp, rx_desc);
2374 }
2375
2376
2377 if (!skb) {
2378 rx_ring->rx_stats.alloc_rx_buff_failed++;
2379 rx_buffer->pagecnt_bias++;
2380 break;
2381 }
2382
2383 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
2384 cleaned_count++;
2385
2386
2387 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2388 continue;
2389
2390
2391 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2392 continue;
2393
2394
2395 total_rx_bytes += skb->len;
2396
2397
2398 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2399
2400 #ifdef IXGBE_FCOE
2401
2402 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2403 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2404
2405 if (ddp_bytes > 0) {
2406 if (!mss) {
2407 mss = rx_ring->netdev->mtu -
2408 sizeof(struct fcoe_hdr) -
2409 sizeof(struct fc_frame_header) -
2410 sizeof(struct fcoe_crc_eof);
2411 if (mss > 512)
2412 mss &= ~511;
2413 }
2414 total_rx_bytes += ddp_bytes;
2415 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2416 mss);
2417 }
2418 if (!ddp_bytes) {
2419 dev_kfree_skb_any(skb);
2420 continue;
2421 }
2422 }
2423
2424 #endif
2425 ixgbe_rx_skb(q_vector, skb);
2426
2427
2428 total_rx_packets++;
2429 }
2430
2431 if (xdp_xmit & IXGBE_XDP_REDIR)
2432 xdp_do_flush_map();
2433
2434 if (xdp_xmit & IXGBE_XDP_TX) {
2435 struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
2436
2437 ixgbe_xdp_ring_update_tail_locked(ring);
2438 }
2439
2440 u64_stats_update_begin(&rx_ring->syncp);
2441 rx_ring->stats.packets += total_rx_packets;
2442 rx_ring->stats.bytes += total_rx_bytes;
2443 u64_stats_update_end(&rx_ring->syncp);
2444 q_vector->rx.total_packets += total_rx_packets;
2445 q_vector->rx.total_bytes += total_rx_bytes;
2446
2447 return total_rx_packets;
2448 }
2449
2450
2451
2452
2453
2454
2455
2456
2457 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2458 {
2459 struct ixgbe_q_vector *q_vector;
2460 int v_idx;
2461 u32 mask;
2462
2463
2464 if (adapter->num_vfs > 32) {
2465 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2466 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2467 }
2468
2469
2470
2471
2472
2473 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2474 struct ixgbe_ring *ring;
2475 q_vector = adapter->q_vector[v_idx];
2476
2477 ixgbe_for_each_ring(ring, q_vector->rx)
2478 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2479
2480 ixgbe_for_each_ring(ring, q_vector->tx)
2481 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2482
2483 ixgbe_write_eitr(q_vector);
2484 }
2485
2486 switch (adapter->hw.mac.type) {
2487 case ixgbe_mac_82598EB:
2488 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2489 v_idx);
2490 break;
2491 case ixgbe_mac_82599EB:
2492 case ixgbe_mac_X540:
2493 case ixgbe_mac_X550:
2494 case ixgbe_mac_X550EM_x:
2495 case ixgbe_mac_x550em_a:
2496 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2497 break;
2498 default:
2499 break;
2500 }
2501 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2502
2503
2504 mask = IXGBE_EIMS_ENABLE_MASK;
2505 mask &= ~(IXGBE_EIMS_OTHER |
2506 IXGBE_EIMS_MAILBOX |
2507 IXGBE_EIMS_LSC);
2508
2509 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2510 }
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2526 struct ixgbe_ring_container *ring_container)
2527 {
2528 unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
2529 IXGBE_ITR_ADAPTIVE_LATENCY;
2530 unsigned int avg_wire_size, packets, bytes;
2531 unsigned long next_update = jiffies;
2532
2533
2534
2535
2536 if (!ring_container->ring)
2537 return;
2538
2539
2540
2541
2542
2543
2544 if (time_after(next_update, ring_container->next_update))
2545 goto clear_counts;
2546
2547 packets = ring_container->total_packets;
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557 if (!packets) {
2558 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2559 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2560 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2561 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
2562 goto clear_counts;
2563 }
2564
2565 bytes = ring_container->total_bytes;
2566
2567
2568
2569
2570
2571 if (packets < 4 && bytes < 9000) {
2572 itr = IXGBE_ITR_ADAPTIVE_LATENCY;
2573 goto adjust_by_size;
2574 }
2575
2576
2577
2578
2579
2580 if (packets < 48) {
2581 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2582 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2583 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2584 goto clear_counts;
2585 }
2586
2587
2588
2589
2590 if (packets < 96) {
2591 itr = q_vector->itr >> 2;
2592 goto clear_counts;
2593 }
2594
2595
2596
2597
2598
2599 if (packets < 256) {
2600 itr = q_vector->itr >> 3;
2601 if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
2602 itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
2603 goto clear_counts;
2604 }
2605
2606
2607
2608
2609
2610
2611
2612 itr = IXGBE_ITR_ADAPTIVE_BULK;
2613
2614 adjust_by_size:
2615
2616
2617
2618
2619
2620 avg_wire_size = bytes / packets;
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637 if (avg_wire_size <= 60) {
2638
2639 avg_wire_size = 5120;
2640 } else if (avg_wire_size <= 316) {
2641
2642 avg_wire_size *= 40;
2643 avg_wire_size += 2720;
2644 } else if (avg_wire_size <= 1084) {
2645
2646 avg_wire_size *= 15;
2647 avg_wire_size += 11452;
2648 } else if (avg_wire_size < 1968) {
2649
2650 avg_wire_size *= 5;
2651 avg_wire_size += 22420;
2652 } else {
2653
2654 avg_wire_size = 32256;
2655 }
2656
2657
2658
2659
2660 if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
2661 avg_wire_size >>= 1;
2662
2663
2664
2665
2666
2667
2668
2669
2670 switch (q_vector->adapter->link_speed) {
2671 case IXGBE_LINK_SPEED_10GB_FULL:
2672 case IXGBE_LINK_SPEED_100_FULL:
2673 default:
2674 itr += DIV_ROUND_UP(avg_wire_size,
2675 IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
2676 IXGBE_ITR_ADAPTIVE_MIN_INC;
2677 break;
2678 case IXGBE_LINK_SPEED_2_5GB_FULL:
2679 case IXGBE_LINK_SPEED_1GB_FULL:
2680 case IXGBE_LINK_SPEED_10_FULL:
2681 if (avg_wire_size > 8064)
2682 avg_wire_size = 8064;
2683 itr += DIV_ROUND_UP(avg_wire_size,
2684 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2685 IXGBE_ITR_ADAPTIVE_MIN_INC;
2686 break;
2687 }
2688
2689 clear_counts:
2690
2691 ring_container->itr = itr;
2692
2693
2694 ring_container->next_update = next_update + 1;
2695
2696 ring_container->total_bytes = 0;
2697 ring_container->total_packets = 0;
2698 }
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2709 {
2710 struct ixgbe_adapter *adapter = q_vector->adapter;
2711 struct ixgbe_hw *hw = &adapter->hw;
2712 int v_idx = q_vector->v_idx;
2713 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2714
2715 switch (adapter->hw.mac.type) {
2716 case ixgbe_mac_82598EB:
2717
2718 itr_reg |= (itr_reg << 16);
2719 break;
2720 case ixgbe_mac_82599EB:
2721 case ixgbe_mac_X540:
2722 case ixgbe_mac_X550:
2723 case ixgbe_mac_X550EM_x:
2724 case ixgbe_mac_x550em_a:
2725
2726
2727
2728
2729 itr_reg |= IXGBE_EITR_CNT_WDIS;
2730 break;
2731 default:
2732 break;
2733 }
2734 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2735 }
2736
2737 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2738 {
2739 u32 new_itr;
2740
2741 ixgbe_update_itr(q_vector, &q_vector->tx);
2742 ixgbe_update_itr(q_vector, &q_vector->rx);
2743
2744
2745 new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
2746
2747
2748 new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
2749 new_itr <<= 2;
2750
2751 if (new_itr != q_vector->itr) {
2752
2753 q_vector->itr = new_itr;
2754
2755 ixgbe_write_eitr(q_vector);
2756 }
2757 }
2758
2759
2760
2761
2762
2763 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2764 {
2765 struct ixgbe_hw *hw = &adapter->hw;
2766 u32 eicr = adapter->interrupt_event;
2767 s32 rc;
2768
2769 if (test_bit(__IXGBE_DOWN, &adapter->state))
2770 return;
2771
2772 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2773 return;
2774
2775 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2776
2777 switch (hw->device_id) {
2778 case IXGBE_DEV_ID_82599_T3_LOM:
2779
2780
2781
2782
2783
2784
2785
2786 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2787 !(eicr & IXGBE_EICR_LSC))
2788 return;
2789
2790 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2791 u32 speed;
2792 bool link_up = false;
2793
2794 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2795
2796 if (link_up)
2797 return;
2798 }
2799
2800
2801 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2802 return;
2803
2804 break;
2805 case IXGBE_DEV_ID_X550EM_A_1G_T:
2806 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2807 rc = hw->phy.ops.check_overtemp(hw);
2808 if (rc != IXGBE_ERR_OVERTEMP)
2809 return;
2810 break;
2811 default:
2812 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2813 return;
2814 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2815 return;
2816 break;
2817 }
2818 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2819
2820 adapter->interrupt_event = 0;
2821 }
2822
2823 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2824 {
2825 struct ixgbe_hw *hw = &adapter->hw;
2826
2827 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2828 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2829 e_crit(probe, "Fan has stopped, replace the adapter\n");
2830
2831 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2832 }
2833 }
2834
2835 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2836 {
2837 struct ixgbe_hw *hw = &adapter->hw;
2838
2839 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2840 return;
2841
2842 switch (adapter->hw.mac.type) {
2843 case ixgbe_mac_82599EB:
2844
2845
2846
2847
2848 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2849 (eicr & IXGBE_EICR_LSC)) &&
2850 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2851 adapter->interrupt_event = eicr;
2852 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2853 ixgbe_service_event_schedule(adapter);
2854 return;
2855 }
2856 return;
2857 case ixgbe_mac_x550em_a:
2858 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2859 adapter->interrupt_event = eicr;
2860 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2861 ixgbe_service_event_schedule(adapter);
2862 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2863 IXGBE_EICR_GPI_SDP0_X550EM_a);
2864 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2865 IXGBE_EICR_GPI_SDP0_X550EM_a);
2866 }
2867 return;
2868 case ixgbe_mac_X550:
2869 case ixgbe_mac_X540:
2870 if (!(eicr & IXGBE_EICR_TS))
2871 return;
2872 break;
2873 default:
2874 return;
2875 }
2876
2877 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2878 }
2879
2880 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2881 {
2882 switch (hw->mac.type) {
2883 case ixgbe_mac_82598EB:
2884 if (hw->phy.type == ixgbe_phy_nl)
2885 return true;
2886 return false;
2887 case ixgbe_mac_82599EB:
2888 case ixgbe_mac_X550EM_x:
2889 case ixgbe_mac_x550em_a:
2890 switch (hw->mac.ops.get_media_type(hw)) {
2891 case ixgbe_media_type_fiber:
2892 case ixgbe_media_type_fiber_qsfp:
2893 return true;
2894 default:
2895 return false;
2896 }
2897 default:
2898 return false;
2899 }
2900 }
2901
2902 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2903 {
2904 struct ixgbe_hw *hw = &adapter->hw;
2905 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2906
2907 if (!ixgbe_is_sfp(hw))
2908 return;
2909
2910
2911 if (hw->mac.type >= ixgbe_mac_X540)
2912 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2913
2914 if (eicr & eicr_mask) {
2915
2916 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2917 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2918 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2919 adapter->sfp_poll_time = 0;
2920 ixgbe_service_event_schedule(adapter);
2921 }
2922 }
2923
2924 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2925 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2926
2927 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2928 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2929 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2930 ixgbe_service_event_schedule(adapter);
2931 }
2932 }
2933 }
2934
2935 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2936 {
2937 struct ixgbe_hw *hw = &adapter->hw;
2938
2939 adapter->lsc_int++;
2940 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2941 adapter->link_check_timeout = jiffies;
2942 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2943 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2944 IXGBE_WRITE_FLUSH(hw);
2945 ixgbe_service_event_schedule(adapter);
2946 }
2947 }
2948
2949 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2950 u64 qmask)
2951 {
2952 u32 mask;
2953 struct ixgbe_hw *hw = &adapter->hw;
2954
2955 switch (hw->mac.type) {
2956 case ixgbe_mac_82598EB:
2957 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2958 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2959 break;
2960 case ixgbe_mac_82599EB:
2961 case ixgbe_mac_X540:
2962 case ixgbe_mac_X550:
2963 case ixgbe_mac_X550EM_x:
2964 case ixgbe_mac_x550em_a:
2965 mask = (qmask & 0xFFFFFFFF);
2966 if (mask)
2967 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2968 mask = (qmask >> 32);
2969 if (mask)
2970 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2971 break;
2972 default:
2973 break;
2974 }
2975
2976 }
2977
2978
2979
2980
2981
2982
2983
2984 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2985 bool flush)
2986 {
2987 struct ixgbe_hw *hw = &adapter->hw;
2988 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2989
2990
2991 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2992 mask &= ~IXGBE_EIMS_LSC;
2993
2994 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2995 switch (adapter->hw.mac.type) {
2996 case ixgbe_mac_82599EB:
2997 mask |= IXGBE_EIMS_GPI_SDP0(hw);
2998 break;
2999 case ixgbe_mac_X540:
3000 case ixgbe_mac_X550:
3001 case ixgbe_mac_X550EM_x:
3002 case ixgbe_mac_x550em_a:
3003 mask |= IXGBE_EIMS_TS;
3004 break;
3005 default:
3006 break;
3007 }
3008 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3009 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3010 switch (adapter->hw.mac.type) {
3011 case ixgbe_mac_82599EB:
3012 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3013 mask |= IXGBE_EIMS_GPI_SDP2(hw);
3014 fallthrough;
3015 case ixgbe_mac_X540:
3016 case ixgbe_mac_X550:
3017 case ixgbe_mac_X550EM_x:
3018 case ixgbe_mac_x550em_a:
3019 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3020 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3021 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
3022 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
3023 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
3024 mask |= IXGBE_EICR_GPI_SDP0_X540;
3025 mask |= IXGBE_EIMS_ECC;
3026 mask |= IXGBE_EIMS_MAILBOX;
3027 break;
3028 default:
3029 break;
3030 }
3031
3032 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
3033 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
3034 mask |= IXGBE_EIMS_FLOW_DIR;
3035
3036 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
3037 if (queues)
3038 ixgbe_irq_enable_queues(adapter, ~0);
3039 if (flush)
3040 IXGBE_WRITE_FLUSH(&adapter->hw);
3041 }
3042
3043 static irqreturn_t ixgbe_msix_other(int irq, void *data)
3044 {
3045 struct ixgbe_adapter *adapter = data;
3046 struct ixgbe_hw *hw = &adapter->hw;
3047 u32 eicr;
3048
3049
3050
3051
3052
3053
3054
3055 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3056
3057
3058
3059
3060
3061
3062
3063
3064 eicr &= 0xFFFF0000;
3065
3066 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3067
3068 if (eicr & IXGBE_EICR_LSC)
3069 ixgbe_check_lsc(adapter);
3070
3071 if (eicr & IXGBE_EICR_MAILBOX)
3072 ixgbe_msg_task(adapter);
3073
3074 switch (hw->mac.type) {
3075 case ixgbe_mac_82599EB:
3076 case ixgbe_mac_X540:
3077 case ixgbe_mac_X550:
3078 case ixgbe_mac_X550EM_x:
3079 case ixgbe_mac_x550em_a:
3080 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
3081 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3082 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
3083 ixgbe_service_event_schedule(adapter);
3084 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3085 IXGBE_EICR_GPI_SDP0_X540);
3086 }
3087 if (eicr & IXGBE_EICR_ECC) {
3088 e_info(link, "Received ECC Err, initiating reset\n");
3089 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3090 ixgbe_service_event_schedule(adapter);
3091 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3092 }
3093
3094 if (eicr & IXGBE_EICR_FLOW_DIR) {
3095 int reinit_count = 0;
3096 int i;
3097 for (i = 0; i < adapter->num_tx_queues; i++) {
3098 struct ixgbe_ring *ring = adapter->tx_ring[i];
3099 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3100 &ring->state))
3101 reinit_count++;
3102 }
3103 if (reinit_count) {
3104
3105 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3106 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3107 ixgbe_service_event_schedule(adapter);
3108 }
3109 }
3110 ixgbe_check_sfp_event(adapter, eicr);
3111 ixgbe_check_overtemp_event(adapter, eicr);
3112 break;
3113 default:
3114 break;
3115 }
3116
3117 ixgbe_check_fan_failure(adapter, eicr);
3118
3119 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3120 ixgbe_ptp_check_pps_event(adapter);
3121
3122
3123 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3124 ixgbe_irq_enable(adapter, false, false);
3125
3126 return IRQ_HANDLED;
3127 }
3128
3129 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3130 {
3131 struct ixgbe_q_vector *q_vector = data;
3132
3133
3134
3135 if (q_vector->rx.ring || q_vector->tx.ring)
3136 napi_schedule_irqoff(&q_vector->napi);
3137
3138 return IRQ_HANDLED;
3139 }
3140
3141
3142
3143
3144
3145
3146
3147
3148 int ixgbe_poll(struct napi_struct *napi, int budget)
3149 {
3150 struct ixgbe_q_vector *q_vector =
3151 container_of(napi, struct ixgbe_q_vector, napi);
3152 struct ixgbe_adapter *adapter = q_vector->adapter;
3153 struct ixgbe_ring *ring;
3154 int per_ring_budget, work_done = 0;
3155 bool clean_complete = true;
3156
3157 #ifdef CONFIG_IXGBE_DCA
3158 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3159 ixgbe_update_dca(q_vector);
3160 #endif
3161
3162 ixgbe_for_each_ring(ring, q_vector->tx) {
3163 bool wd = ring->xsk_pool ?
3164 ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
3165 ixgbe_clean_tx_irq(q_vector, ring, budget);
3166
3167 if (!wd)
3168 clean_complete = false;
3169 }
3170
3171
3172 if (budget <= 0)
3173 return budget;
3174
3175
3176
3177 if (q_vector->rx.count > 1)
3178 per_ring_budget = max(budget/q_vector->rx.count, 1);
3179 else
3180 per_ring_budget = budget;
3181
3182 ixgbe_for_each_ring(ring, q_vector->rx) {
3183 int cleaned = ring->xsk_pool ?
3184 ixgbe_clean_rx_irq_zc(q_vector, ring,
3185 per_ring_budget) :
3186 ixgbe_clean_rx_irq(q_vector, ring,
3187 per_ring_budget);
3188
3189 work_done += cleaned;
3190 if (cleaned >= per_ring_budget)
3191 clean_complete = false;
3192 }
3193
3194
3195 if (!clean_complete)
3196 return budget;
3197
3198
3199 if (likely(napi_complete_done(napi, work_done))) {
3200 if (adapter->rx_itr_setting & 1)
3201 ixgbe_set_itr(q_vector);
3202 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3203 ixgbe_irq_enable_queues(adapter,
3204 BIT_ULL(q_vector->v_idx));
3205 }
3206
3207 return min(work_done, budget - 1);
3208 }
3209
3210
3211
3212
3213
3214
3215
3216
3217 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3218 {
3219 struct net_device *netdev = adapter->netdev;
3220 unsigned int ri = 0, ti = 0;
3221 int vector, err;
3222
3223 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3224 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3225 struct msix_entry *entry = &adapter->msix_entries[vector];
3226
3227 if (q_vector->tx.ring && q_vector->rx.ring) {
3228 snprintf(q_vector->name, sizeof(q_vector->name),
3229 "%s-TxRx-%u", netdev->name, ri++);
3230 ti++;
3231 } else if (q_vector->rx.ring) {
3232 snprintf(q_vector->name, sizeof(q_vector->name),
3233 "%s-rx-%u", netdev->name, ri++);
3234 } else if (q_vector->tx.ring) {
3235 snprintf(q_vector->name, sizeof(q_vector->name),
3236 "%s-tx-%u", netdev->name, ti++);
3237 } else {
3238
3239 continue;
3240 }
3241 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3242 q_vector->name, q_vector);
3243 if (err) {
3244 e_err(probe, "request_irq failed for MSIX interrupt "
3245 "Error: %d\n", err);
3246 goto free_queue_irqs;
3247 }
3248
3249 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3250
3251 irq_update_affinity_hint(entry->vector,
3252 &q_vector->affinity_mask);
3253 }
3254 }
3255
3256 err = request_irq(adapter->msix_entries[vector].vector,
3257 ixgbe_msix_other, 0, netdev->name, adapter);
3258 if (err) {
3259 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3260 goto free_queue_irqs;
3261 }
3262
3263 return 0;
3264
3265 free_queue_irqs:
3266 while (vector) {
3267 vector--;
3268 irq_update_affinity_hint(adapter->msix_entries[vector].vector,
3269 NULL);
3270 free_irq(adapter->msix_entries[vector].vector,
3271 adapter->q_vector[vector]);
3272 }
3273 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3274 pci_disable_msix(adapter->pdev);
3275 kfree(adapter->msix_entries);
3276 adapter->msix_entries = NULL;
3277 return err;
3278 }
3279
3280
3281
3282
3283
3284
3285 static irqreturn_t ixgbe_intr(int irq, void *data)
3286 {
3287 struct ixgbe_adapter *adapter = data;
3288 struct ixgbe_hw *hw = &adapter->hw;
3289 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3290 u32 eicr;
3291
3292
3293
3294
3295
3296 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3297
3298
3299
3300 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3301 if (!eicr) {
3302
3303
3304
3305
3306
3307
3308
3309 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3310 ixgbe_irq_enable(adapter, true, true);
3311 return IRQ_NONE;
3312 }
3313
3314 if (eicr & IXGBE_EICR_LSC)
3315 ixgbe_check_lsc(adapter);
3316
3317 switch (hw->mac.type) {
3318 case ixgbe_mac_82599EB:
3319 ixgbe_check_sfp_event(adapter, eicr);
3320 fallthrough;
3321 case ixgbe_mac_X540:
3322 case ixgbe_mac_X550:
3323 case ixgbe_mac_X550EM_x:
3324 case ixgbe_mac_x550em_a:
3325 if (eicr & IXGBE_EICR_ECC) {
3326 e_info(link, "Received ECC Err, initiating reset\n");
3327 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3328 ixgbe_service_event_schedule(adapter);
3329 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3330 }
3331 ixgbe_check_overtemp_event(adapter, eicr);
3332 break;
3333 default:
3334 break;
3335 }
3336
3337 ixgbe_check_fan_failure(adapter, eicr);
3338 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3339 ixgbe_ptp_check_pps_event(adapter);
3340
3341
3342 napi_schedule_irqoff(&q_vector->napi);
3343
3344
3345
3346
3347
3348 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3349 ixgbe_irq_enable(adapter, false, false);
3350
3351 return IRQ_HANDLED;
3352 }
3353
3354
3355
3356
3357
3358
3359
3360
3361 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3362 {
3363 struct net_device *netdev = adapter->netdev;
3364 int err;
3365
3366 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3367 err = ixgbe_request_msix_irqs(adapter);
3368 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3369 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3370 netdev->name, adapter);
3371 else
3372 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3373 netdev->name, adapter);
3374
3375 if (err)
3376 e_err(probe, "request_irq failed, Error %d\n", err);
3377
3378 return err;
3379 }
3380
3381 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3382 {
3383 int vector;
3384
3385 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3386 free_irq(adapter->pdev->irq, adapter);
3387 return;
3388 }
3389
3390 if (!adapter->msix_entries)
3391 return;
3392
3393 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3394 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3395 struct msix_entry *entry = &adapter->msix_entries[vector];
3396
3397
3398 if (!q_vector->rx.ring && !q_vector->tx.ring)
3399 continue;
3400
3401
3402 irq_update_affinity_hint(entry->vector, NULL);
3403
3404 free_irq(entry->vector, q_vector);
3405 }
3406
3407 free_irq(adapter->msix_entries[vector].vector, adapter);
3408 }
3409
3410
3411
3412
3413
3414 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3415 {
3416 switch (adapter->hw.mac.type) {
3417 case ixgbe_mac_82598EB:
3418 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3419 break;
3420 case ixgbe_mac_82599EB:
3421 case ixgbe_mac_X540:
3422 case ixgbe_mac_X550:
3423 case ixgbe_mac_X550EM_x:
3424 case ixgbe_mac_x550em_a:
3425 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3426 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3427 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3428 break;
3429 default:
3430 break;
3431 }
3432 IXGBE_WRITE_FLUSH(&adapter->hw);
3433 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3434 int vector;
3435
3436 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3437 synchronize_irq(adapter->msix_entries[vector].vector);
3438
3439 synchronize_irq(adapter->msix_entries[vector++].vector);
3440 } else {
3441 synchronize_irq(adapter->pdev->irq);
3442 }
3443 }
3444
3445
3446
3447
3448
3449
3450 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3451 {
3452 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3453
3454 ixgbe_write_eitr(q_vector);
3455
3456 ixgbe_set_ivar(adapter, 0, 0, 0);
3457 ixgbe_set_ivar(adapter, 1, 0, 0);
3458
3459 e_info(hw, "Legacy interrupt IVAR setup done\n");
3460 }
3461
3462
3463
3464
3465
3466
3467
3468
3469 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3470 struct ixgbe_ring *ring)
3471 {
3472 struct ixgbe_hw *hw = &adapter->hw;
3473 u64 tdba = ring->dma;
3474 int wait_loop = 10;
3475 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3476 u8 reg_idx = ring->reg_idx;
3477
3478 ring->xsk_pool = NULL;
3479 if (ring_is_xdp(ring))
3480 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
3481
3482
3483 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3484 IXGBE_WRITE_FLUSH(hw);
3485
3486 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3487 (tdba & DMA_BIT_MASK(32)));
3488 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3489 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3490 ring->count * sizeof(union ixgbe_adv_tx_desc));
3491 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3492 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3493 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3506 txdctl |= 1u << 16;
3507 else
3508 txdctl |= 8u << 16;
3509
3510
3511
3512
3513
3514 txdctl |= (1u << 8) |
3515 32;
3516
3517
3518 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3519 ring->atr_sample_rate = adapter->atr_sample_rate;
3520 ring->atr_count = 0;
3521 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3522 } else {
3523 ring->atr_sample_rate = 0;
3524 }
3525
3526
3527 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3528 struct ixgbe_q_vector *q_vector = ring->q_vector;
3529
3530 if (q_vector)
3531 netif_set_xps_queue(ring->netdev,
3532 &q_vector->affinity_mask,
3533 ring->queue_index);
3534 }
3535
3536 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3537
3538
3539 memset(ring->tx_buffer_info, 0,
3540 sizeof(struct ixgbe_tx_buffer) * ring->count);
3541
3542
3543 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3544
3545
3546 if (hw->mac.type == ixgbe_mac_82598EB &&
3547 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3548 return;
3549
3550
3551 do {
3552 usleep_range(1000, 2000);
3553 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3554 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3555 if (!wait_loop)
3556 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3557 }
3558
3559 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3560 {
3561 struct ixgbe_hw *hw = &adapter->hw;
3562 u32 rttdcs, mtqc;
3563 u8 tcs = adapter->hw_tcs;
3564
3565 if (hw->mac.type == ixgbe_mac_82598EB)
3566 return;
3567
3568
3569 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3570 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3571 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3572
3573
3574 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3575 mtqc = IXGBE_MTQC_VT_ENA;
3576 if (tcs > 4)
3577 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3578 else if (tcs > 1)
3579 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3580 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3581 IXGBE_82599_VMDQ_4Q_MASK)
3582 mtqc |= IXGBE_MTQC_32VF;
3583 else
3584 mtqc |= IXGBE_MTQC_64VF;
3585 } else {
3586 if (tcs > 4) {
3587 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3588 } else if (tcs > 1) {
3589 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3590 } else {
3591 u8 max_txq = adapter->num_tx_queues +
3592 adapter->num_xdp_queues;
3593 if (max_txq > 63)
3594 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3595 else
3596 mtqc = IXGBE_MTQC_64Q_1PB;
3597 }
3598 }
3599
3600 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3601
3602
3603 if (tcs) {
3604 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3605 sectx |= IXGBE_SECTX_DCB;
3606 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3607 }
3608
3609
3610 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3611 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3612 }
3613
3614
3615
3616
3617
3618
3619
3620 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3621 {
3622 struct ixgbe_hw *hw = &adapter->hw;
3623 u32 dmatxctl;
3624 u32 i;
3625
3626 ixgbe_setup_mtqc(adapter);
3627
3628 if (hw->mac.type != ixgbe_mac_82598EB) {
3629
3630 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3631 dmatxctl |= IXGBE_DMATXCTL_TE;
3632 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3633 }
3634
3635
3636 for (i = 0; i < adapter->num_tx_queues; i++)
3637 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3638 for (i = 0; i < adapter->num_xdp_queues; i++)
3639 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3640 }
3641
3642 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3643 struct ixgbe_ring *ring)
3644 {
3645 struct ixgbe_hw *hw = &adapter->hw;
3646 u8 reg_idx = ring->reg_idx;
3647 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3648
3649 srrctl |= IXGBE_SRRCTL_DROP_EN;
3650
3651 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3652 }
3653
3654 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3655 struct ixgbe_ring *ring)
3656 {
3657 struct ixgbe_hw *hw = &adapter->hw;
3658 u8 reg_idx = ring->reg_idx;
3659 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3660
3661 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3662
3663 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3664 }
3665
3666 #ifdef CONFIG_IXGBE_DCB
3667 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3668 #else
3669 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3670 #endif
3671 {
3672 int i;
3673 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3674
3675 if (adapter->ixgbe_ieee_pfc)
3676 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3688 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3689 for (i = 0; i < adapter->num_rx_queues; i++)
3690 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3691 } else {
3692 for (i = 0; i < adapter->num_rx_queues; i++)
3693 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3694 }
3695 }
3696
3697 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3698
3699 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3700 struct ixgbe_ring *rx_ring)
3701 {
3702 struct ixgbe_hw *hw = &adapter->hw;
3703 u32 srrctl;
3704 u8 reg_idx = rx_ring->reg_idx;
3705
3706 if (hw->mac.type == ixgbe_mac_82598EB) {
3707 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3708
3709
3710
3711
3712
3713 reg_idx &= mask;
3714 }
3715
3716
3717 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3718
3719
3720 if (rx_ring->xsk_pool) {
3721 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731 if (hw->mac.type != ixgbe_mac_82599EB)
3732 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3733 else
3734 srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3735 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
3736 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3737 } else {
3738 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3739 }
3740
3741
3742 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3743
3744 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3745 }
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3756 {
3757 if (adapter->hw.mac.type < ixgbe_mac_X550)
3758 return 128;
3759 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3760 return 64;
3761 else
3762 return 512;
3763 }
3764
3765
3766
3767
3768
3769
3770
3771 void ixgbe_store_key(struct ixgbe_adapter *adapter)
3772 {
3773 struct ixgbe_hw *hw = &adapter->hw;
3774 int i;
3775
3776 for (i = 0; i < 10; i++)
3777 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3778 }
3779
3780
3781
3782
3783
3784
3785
3786 static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3787 {
3788 u32 *rss_key;
3789
3790 if (!adapter->rss_key) {
3791 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3792 if (unlikely(!rss_key))
3793 return -ENOMEM;
3794
3795 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3796 adapter->rss_key = rss_key;
3797 }
3798
3799 return 0;
3800 }
3801
3802
3803
3804
3805
3806
3807
3808 void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3809 {
3810 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3811 struct ixgbe_hw *hw = &adapter->hw;
3812 u32 reta = 0;
3813 u32 indices_multi;
3814 u8 *indir_tbl = adapter->rss_indir_tbl;
3815
3816
3817
3818
3819
3820
3821
3822 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3823 indices_multi = 0x11;
3824 else
3825 indices_multi = 0x1;
3826
3827
3828 for (i = 0; i < reta_entries; i++) {
3829 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3830 if ((i & 3) == 3) {
3831 if (i < 128)
3832 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3833 else
3834 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3835 reta);
3836 reta = 0;
3837 }
3838 }
3839 }
3840
3841
3842
3843
3844
3845
3846
3847 static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3848 {
3849 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3850 struct ixgbe_hw *hw = &adapter->hw;
3851 u32 vfreta = 0;
3852
3853
3854 for (i = 0; i < reta_entries; i++) {
3855 u16 pool = adapter->num_rx_pools;
3856
3857 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3858 if ((i & 3) != 3)
3859 continue;
3860
3861 while (pool--)
3862 IXGBE_WRITE_REG(hw,
3863 IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
3864 vfreta);
3865 vfreta = 0;
3866 }
3867 }
3868
3869 static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3870 {
3871 u32 i, j;
3872 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3873 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3874
3875
3876
3877
3878
3879 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3880 rss_i = 4;
3881
3882
3883 ixgbe_store_key(adapter);
3884
3885
3886 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3887
3888 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3889 if (j == rss_i)
3890 j = 0;
3891
3892 adapter->rss_indir_tbl[i] = j;
3893 }
3894
3895 ixgbe_store_reta(adapter);
3896 }
3897
3898 static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3899 {
3900 struct ixgbe_hw *hw = &adapter->hw;
3901 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3902 int i, j;
3903
3904
3905 for (i = 0; i < 10; i++) {
3906 u16 pool = adapter->num_rx_pools;
3907
3908 while (pool--)
3909 IXGBE_WRITE_REG(hw,
3910 IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
3911 *(adapter->rss_key + i));
3912 }
3913
3914
3915 for (i = 0, j = 0; i < 64; i++, j++) {
3916 if (j == rss_i)
3917 j = 0;
3918
3919 adapter->rss_indir_tbl[i] = j;
3920 }
3921
3922 ixgbe_store_vfreta(adapter);
3923 }
3924
3925 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3926 {
3927 struct ixgbe_hw *hw = &adapter->hw;
3928 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3929 u32 rxcsum;
3930
3931
3932 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3933 rxcsum |= IXGBE_RXCSUM_PCSD;
3934 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3935
3936 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3937 if (adapter->ring_feature[RING_F_RSS].mask)
3938 mrqc = IXGBE_MRQC_RSSEN;
3939 } else {
3940 u8 tcs = adapter->hw_tcs;
3941
3942 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3943 if (tcs > 4)
3944 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3945 else if (tcs > 1)
3946 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3947 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3948 IXGBE_82599_VMDQ_4Q_MASK)
3949 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3950 else
3951 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3952
3953
3954
3955
3956 if (hw->mac.type >= ixgbe_mac_X550)
3957 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3958 } else {
3959 if (tcs > 4)
3960 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3961 else if (tcs > 1)
3962 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3963 else
3964 mrqc = IXGBE_MRQC_RSSEN;
3965 }
3966 }
3967
3968
3969 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3970 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3971 IXGBE_MRQC_RSS_FIELD_IPV6 |
3972 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3973
3974 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3975 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3976 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3977 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3978
3979 if ((hw->mac.type >= ixgbe_mac_X550) &&
3980 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3981 u16 pool = adapter->num_rx_pools;
3982
3983
3984 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3985 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3986
3987
3988 ixgbe_setup_vfreta(adapter);
3989 vfmrqc = IXGBE_MRQC_RSSEN;
3990 vfmrqc |= rss_field;
3991
3992 while (pool--)
3993 IXGBE_WRITE_REG(hw,
3994 IXGBE_PFVFMRQC(VMDQ_P(pool)),
3995 vfmrqc);
3996 } else {
3997 ixgbe_setup_reta(adapter);
3998 mrqc |= rss_field;
3999 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4000 }
4001 }
4002
4003
4004
4005
4006
4007
4008 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
4009 struct ixgbe_ring *ring)
4010 {
4011 struct ixgbe_hw *hw = &adapter->hw;
4012 u32 rscctrl;
4013 u8 reg_idx = ring->reg_idx;
4014
4015 if (!ring_is_rsc_enabled(ring))
4016 return;
4017
4018 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
4019 rscctrl |= IXGBE_RSCCTL_RSCEN;
4020
4021
4022
4023
4024
4025 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
4026 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
4027 }
4028
4029 #define IXGBE_MAX_RX_DESC_POLL 10
4030 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
4031 struct ixgbe_ring *ring)
4032 {
4033 struct ixgbe_hw *hw = &adapter->hw;
4034 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4035 u32 rxdctl;
4036 u8 reg_idx = ring->reg_idx;
4037
4038 if (ixgbe_removed(hw->hw_addr))
4039 return;
4040
4041 if (hw->mac.type == ixgbe_mac_82598EB &&
4042 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4043 return;
4044
4045 do {
4046 usleep_range(1000, 2000);
4047 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4048 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4049
4050 if (!wait_loop) {
4051 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
4052 "the polling period\n", reg_idx);
4053 }
4054 }
4055
4056 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4057 struct ixgbe_ring *ring)
4058 {
4059 struct ixgbe_hw *hw = &adapter->hw;
4060 union ixgbe_adv_rx_desc *rx_desc;
4061 u64 rdba = ring->dma;
4062 u32 rxdctl;
4063 u8 reg_idx = ring->reg_idx;
4064
4065 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4066 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
4067 if (ring->xsk_pool) {
4068 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4069 MEM_TYPE_XSK_BUFF_POOL,
4070 NULL));
4071 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
4072 } else {
4073 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4074 MEM_TYPE_PAGE_SHARED, NULL));
4075 }
4076
4077
4078 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4079 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4080
4081
4082 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4083 IXGBE_WRITE_FLUSH(hw);
4084
4085 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
4086 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
4087 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
4088 ring->count * sizeof(union ixgbe_adv_rx_desc));
4089
4090 IXGBE_WRITE_FLUSH(hw);
4091
4092 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
4093 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
4094 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
4095
4096 ixgbe_configure_srrctl(adapter, ring);
4097 ixgbe_configure_rscctl(adapter, ring);
4098
4099 if (hw->mac.type == ixgbe_mac_82598EB) {
4100
4101
4102
4103
4104
4105
4106
4107 rxdctl &= ~0x3FFFFF;
4108 rxdctl |= 0x080420;
4109 #if (PAGE_SIZE < 8192)
4110
4111 } else if (hw->mac.type != ixgbe_mac_82599EB) {
4112 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4113 IXGBE_RXDCTL_RLPML_EN);
4114
4115
4116
4117
4118
4119 if (ring_uses_build_skb(ring) &&
4120 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4121 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
4122 IXGBE_RXDCTL_RLPML_EN;
4123 #endif
4124 }
4125
4126 ring->rx_offset = ixgbe_rx_offset(ring);
4127
4128 if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
4129 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
4130
4131 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4132 IXGBE_RXDCTL_RLPML_EN);
4133 rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
4134
4135 ring->rx_buf_len = xsk_buf_len;
4136 }
4137
4138
4139 memset(ring->rx_buffer_info, 0,
4140 sizeof(struct ixgbe_rx_buffer) * ring->count);
4141
4142
4143 rx_desc = IXGBE_RX_DESC(ring, 0);
4144 rx_desc->wb.upper.length = 0;
4145
4146
4147 rxdctl |= IXGBE_RXDCTL_ENABLE;
4148 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4149
4150 ixgbe_rx_desc_queue_enable(adapter, ring);
4151 if (ring->xsk_pool)
4152 ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
4153 else
4154 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4155 }
4156
4157 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4158 {
4159 struct ixgbe_hw *hw = &adapter->hw;
4160 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4161 u16 pool = adapter->num_rx_pools;
4162
4163
4164 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4165 IXGBE_PSRTYPE_UDPHDR |
4166 IXGBE_PSRTYPE_IPV4HDR |
4167 IXGBE_PSRTYPE_L2HDR |
4168 IXGBE_PSRTYPE_IPV6HDR;
4169
4170 if (hw->mac.type == ixgbe_mac_82598EB)
4171 return;
4172
4173 if (rss_i > 3)
4174 psrtype |= 2u << 29;
4175 else if (rss_i > 1)
4176 psrtype |= 1u << 29;
4177
4178 while (pool--)
4179 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4180 }
4181
4182 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4183 {
4184 struct ixgbe_hw *hw = &adapter->hw;
4185 u16 pool = adapter->num_rx_pools;
4186 u32 reg_offset, vf_shift, vmolr;
4187 u32 gcr_ext, vmdctl;
4188 int i;
4189
4190 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4191 return;
4192
4193 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4194 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4195 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4196 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4197 vmdctl |= IXGBE_VT_CTL_REPLEN;
4198 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4199
4200
4201
4202
4203 vmolr = IXGBE_VMOLR_AUPE;
4204 while (pool--)
4205 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
4206
4207 vf_shift = VMDQ_P(0) % 32;
4208 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4209
4210
4211 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4212 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4213 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4214 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4215 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4216 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4217
4218
4219 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4220
4221
4222 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4223
4224
4225
4226
4227
4228 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4229 case IXGBE_82599_VMDQ_8Q_MASK:
4230 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4231 break;
4232 case IXGBE_82599_VMDQ_4Q_MASK:
4233 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4234 break;
4235 default:
4236 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4237 break;
4238 }
4239
4240 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4241
4242 for (i = 0; i < adapter->num_vfs; i++) {
4243
4244 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4245 adapter->vfinfo[i].spoofchk_enabled);
4246
4247
4248 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4249 adapter->vfinfo[i].rss_query_enabled);
4250 }
4251 }
4252
4253 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4254 {
4255 struct ixgbe_hw *hw = &adapter->hw;
4256 struct net_device *netdev = adapter->netdev;
4257 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4258 struct ixgbe_ring *rx_ring;
4259 int i;
4260 u32 mhadd, hlreg0;
4261
4262 #ifdef IXGBE_FCOE
4263
4264 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4265 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4266 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4267
4268 #endif
4269
4270
4271 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4272 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4273
4274 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4275 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4276 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4277 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4278
4279 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4280 }
4281
4282 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4283
4284 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4285 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4286
4287
4288
4289
4290
4291 for (i = 0; i < adapter->num_rx_queues; i++) {
4292 rx_ring = adapter->rx_ring[i];
4293
4294 clear_ring_rsc_enabled(rx_ring);
4295 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4296 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4297
4298 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4299 set_ring_rsc_enabled(rx_ring);
4300
4301 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4302 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4303
4304 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4305 continue;
4306
4307 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4308
4309 #if (PAGE_SIZE < 8192)
4310 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4311 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4312
4313 if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4314 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4315 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4316 #endif
4317 }
4318 }
4319
4320 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4321 {
4322 struct ixgbe_hw *hw = &adapter->hw;
4323 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4324
4325 switch (hw->mac.type) {
4326 case ixgbe_mac_82598EB:
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4338 break;
4339 case ixgbe_mac_X550:
4340 case ixgbe_mac_X550EM_x:
4341 case ixgbe_mac_x550em_a:
4342 if (adapter->num_vfs)
4343 rdrxctl |= IXGBE_RDRXCTL_PSP;
4344 fallthrough;
4345 case ixgbe_mac_82599EB:
4346 case ixgbe_mac_X540:
4347
4348 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4349 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4350 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4351
4352 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4353 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4354 break;
4355 default:
4356
4357 return;
4358 }
4359
4360 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4361 }
4362
4363
4364
4365
4366
4367
4368
4369 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4370 {
4371 struct ixgbe_hw *hw = &adapter->hw;
4372 int i;
4373 u32 rxctrl, rfctl;
4374
4375
4376 hw->mac.ops.disable_rx(hw);
4377
4378 ixgbe_setup_psrtype(adapter);
4379 ixgbe_setup_rdrxctl(adapter);
4380
4381
4382 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4383 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4384 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4385 rfctl |= IXGBE_RFCTL_RSC_DIS;
4386
4387
4388 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4389 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4390
4391
4392 ixgbe_setup_mrqc(adapter);
4393
4394
4395 ixgbe_set_rx_buffer_len(adapter);
4396
4397
4398
4399
4400
4401 for (i = 0; i < adapter->num_rx_queues; i++)
4402 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4403
4404 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4405
4406 if (hw->mac.type == ixgbe_mac_82598EB)
4407 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4408
4409
4410 rxctrl |= IXGBE_RXCTRL_RXEN;
4411 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4412 }
4413
4414 static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4415 __be16 proto, u16 vid)
4416 {
4417 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4418 struct ixgbe_hw *hw = &adapter->hw;
4419
4420
4421 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4422 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4423
4424 set_bit(vid, adapter->active_vlans);
4425
4426 return 0;
4427 }
4428
4429 static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4430 {
4431 u32 vlvf;
4432 int idx;
4433
4434
4435 if (vlan == 0)
4436 return 0;
4437
4438
4439 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4440 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4441 if ((vlvf & VLAN_VID_MASK) == vlan)
4442 break;
4443 }
4444
4445 return idx;
4446 }
4447
4448 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4449 {
4450 struct ixgbe_hw *hw = &adapter->hw;
4451 u32 bits, word;
4452 int idx;
4453
4454 idx = ixgbe_find_vlvf_entry(hw, vid);
4455 if (!idx)
4456 return;
4457
4458
4459
4460
4461 word = idx * 2 + (VMDQ_P(0) / 32);
4462 bits = ~BIT(VMDQ_P(0) % 32);
4463 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4464
4465
4466 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4467 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4468 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4469 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4470 }
4471 }
4472
4473 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4474 __be16 proto, u16 vid)
4475 {
4476 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4477 struct ixgbe_hw *hw = &adapter->hw;
4478
4479
4480 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4481 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4482
4483 clear_bit(vid, adapter->active_vlans);
4484
4485 return 0;
4486 }
4487
4488
4489
4490
4491
4492 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4493 {
4494 struct ixgbe_hw *hw = &adapter->hw;
4495 u32 vlnctrl;
4496 int i, j;
4497
4498 switch (hw->mac.type) {
4499 case ixgbe_mac_82598EB:
4500 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4501 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4502 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4503 break;
4504 case ixgbe_mac_82599EB:
4505 case ixgbe_mac_X540:
4506 case ixgbe_mac_X550:
4507 case ixgbe_mac_X550EM_x:
4508 case ixgbe_mac_x550em_a:
4509 for (i = 0; i < adapter->num_rx_queues; i++) {
4510 struct ixgbe_ring *ring = adapter->rx_ring[i];
4511
4512 if (!netif_is_ixgbe(ring->netdev))
4513 continue;
4514
4515 j = ring->reg_idx;
4516 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4517 vlnctrl &= ~IXGBE_RXDCTL_VME;
4518 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4519 }
4520 break;
4521 default:
4522 break;
4523 }
4524 }
4525
4526
4527
4528
4529
4530 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4531 {
4532 struct ixgbe_hw *hw = &adapter->hw;
4533 u32 vlnctrl;
4534 int i, j;
4535
4536 switch (hw->mac.type) {
4537 case ixgbe_mac_82598EB:
4538 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4539 vlnctrl |= IXGBE_VLNCTRL_VME;
4540 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4541 break;
4542 case ixgbe_mac_82599EB:
4543 case ixgbe_mac_X540:
4544 case ixgbe_mac_X550:
4545 case ixgbe_mac_X550EM_x:
4546 case ixgbe_mac_x550em_a:
4547 for (i = 0; i < adapter->num_rx_queues; i++) {
4548 struct ixgbe_ring *ring = adapter->rx_ring[i];
4549
4550 if (!netif_is_ixgbe(ring->netdev))
4551 continue;
4552
4553 j = ring->reg_idx;
4554 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4555 vlnctrl |= IXGBE_RXDCTL_VME;
4556 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4557 }
4558 break;
4559 default:
4560 break;
4561 }
4562 }
4563
4564 static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4565 {
4566 struct ixgbe_hw *hw = &adapter->hw;
4567 u32 vlnctrl, i;
4568
4569 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4570
4571 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4572
4573 vlnctrl |= IXGBE_VLNCTRL_VFE;
4574 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4575 } else {
4576 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4577 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4578 return;
4579 }
4580
4581
4582 if (hw->mac.type == ixgbe_mac_82598EB)
4583 return;
4584
4585
4586 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4587 return;
4588
4589
4590 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4591
4592
4593 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4594 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4595 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4596
4597 vlvfb |= BIT(VMDQ_P(0) % 32);
4598 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4599 }
4600
4601
4602 for (i = hw->mac.vft_size; i--;)
4603 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4604 }
4605
4606 #define VFTA_BLOCK_SIZE 8
4607 static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4608 {
4609 struct ixgbe_hw *hw = &adapter->hw;
4610 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4611 u32 vid_start = vfta_offset * 32;
4612 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4613 u32 i, vid, word, bits;
4614
4615 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4616 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4617
4618
4619 vid = vlvf & VLAN_VID_MASK;
4620
4621
4622 if (vid < vid_start || vid >= vid_end)
4623 continue;
4624
4625 if (vlvf) {
4626
4627 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4628
4629
4630 if (test_bit(vid, adapter->active_vlans))
4631 continue;
4632 }
4633
4634
4635 word = i * 2 + VMDQ_P(0) / 32;
4636 bits = ~BIT(VMDQ_P(0) % 32);
4637 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4638 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4639 }
4640
4641
4642 for (i = VFTA_BLOCK_SIZE; i--;) {
4643 vid = (vfta_offset + i) * 32;
4644 word = vid / BITS_PER_LONG;
4645 bits = vid % BITS_PER_LONG;
4646
4647 vfta[i] |= adapter->active_vlans[word] >> bits;
4648
4649 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4650 }
4651 }
4652
4653 static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4654 {
4655 struct ixgbe_hw *hw = &adapter->hw;
4656 u32 vlnctrl, i;
4657
4658
4659 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4660 vlnctrl |= IXGBE_VLNCTRL_VFE;
4661 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4662
4663 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4664 hw->mac.type == ixgbe_mac_82598EB)
4665 return;
4666
4667
4668 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4669 return;
4670
4671
4672 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4673
4674 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4675 ixgbe_scrub_vfta(adapter, i);
4676 }
4677
4678 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4679 {
4680 u16 vid = 1;
4681
4682 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4683
4684 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4685 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4686 }
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697 static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4698 {
4699 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4700 struct ixgbe_hw *hw = &adapter->hw;
4701
4702 if (!netif_running(netdev))
4703 return 0;
4704
4705 if (hw->mac.ops.update_mc_addr_list)
4706 hw->mac.ops.update_mc_addr_list(hw, netdev);
4707 else
4708 return -ENOMEM;
4709
4710 #ifdef CONFIG_PCI_IOV
4711 ixgbe_restore_vf_multicasts(adapter);
4712 #endif
4713
4714 return netdev_mc_count(netdev);
4715 }
4716
4717 #ifdef CONFIG_PCI_IOV
4718 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4719 {
4720 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4721 struct ixgbe_hw *hw = &adapter->hw;
4722 int i;
4723
4724 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4725 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4726
4727 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4728 hw->mac.ops.set_rar(hw, i,
4729 mac_table->addr,
4730 mac_table->pool,
4731 IXGBE_RAH_AV);
4732 else
4733 hw->mac.ops.clear_rar(hw, i);
4734 }
4735 }
4736
4737 #endif
4738 static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4739 {
4740 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4741 struct ixgbe_hw *hw = &adapter->hw;
4742 int i;
4743
4744 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4745 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4746 continue;
4747
4748 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4749
4750 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4751 hw->mac.ops.set_rar(hw, i,
4752 mac_table->addr,
4753 mac_table->pool,
4754 IXGBE_RAH_AV);
4755 else
4756 hw->mac.ops.clear_rar(hw, i);
4757 }
4758 }
4759
4760 static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4761 {
4762 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4763 struct ixgbe_hw *hw = &adapter->hw;
4764 int i;
4765
4766 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4767 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4768 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4769 }
4770
4771 ixgbe_sync_mac_table(adapter);
4772 }
4773
4774 static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4775 {
4776 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4777 struct ixgbe_hw *hw = &adapter->hw;
4778 int i, count = 0;
4779
4780 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4781
4782 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4783 continue;
4784
4785
4786 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4787 if (mac_table->pool != pool)
4788 continue;
4789 }
4790
4791 count++;
4792 }
4793
4794 return count;
4795 }
4796
4797
4798 static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4799 {
4800 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4801 struct ixgbe_hw *hw = &adapter->hw;
4802
4803 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4804 mac_table->pool = VMDQ_P(0);
4805
4806 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4807
4808 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4809 IXGBE_RAH_AV);
4810 }
4811
4812 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4813 const u8 *addr, u16 pool)
4814 {
4815 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4816 struct ixgbe_hw *hw = &adapter->hw;
4817 int i;
4818
4819 if (is_zero_ether_addr(addr))
4820 return -EINVAL;
4821
4822 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4823 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4824 continue;
4825
4826 ether_addr_copy(mac_table->addr, addr);
4827 mac_table->pool = pool;
4828
4829 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4830 IXGBE_MAC_STATE_IN_USE;
4831
4832 ixgbe_sync_mac_table(adapter);
4833
4834 return i;
4835 }
4836
4837 return -ENOMEM;
4838 }
4839
4840 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4841 const u8 *addr, u16 pool)
4842 {
4843 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4844 struct ixgbe_hw *hw = &adapter->hw;
4845 int i;
4846
4847 if (is_zero_ether_addr(addr))
4848 return -EINVAL;
4849
4850
4851 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4852
4853 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4854 continue;
4855
4856 if (mac_table->pool != pool)
4857 continue;
4858
4859 if (!ether_addr_equal(addr, mac_table->addr))
4860 continue;
4861
4862 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4863 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4864
4865 ixgbe_sync_mac_table(adapter);
4866
4867 return 0;
4868 }
4869
4870 return -ENOMEM;
4871 }
4872
4873 static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4874 {
4875 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4876 int ret;
4877
4878 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4879
4880 return min_t(int, ret, 0);
4881 }
4882
4883 static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4884 {
4885 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4886
4887 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4888
4889 return 0;
4890 }
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901 void ixgbe_set_rx_mode(struct net_device *netdev)
4902 {
4903 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4904 struct ixgbe_hw *hw = &adapter->hw;
4905 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4906 netdev_features_t features = netdev->features;
4907 int count;
4908
4909
4910 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4911
4912
4913 fctrl &= ~IXGBE_FCTRL_SBP;
4914 fctrl |= IXGBE_FCTRL_BAM;
4915 fctrl |= IXGBE_FCTRL_DPF;
4916 fctrl |= IXGBE_FCTRL_PMCF;
4917
4918
4919 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4920 if (netdev->flags & IFF_PROMISC) {
4921 hw->addr_ctrl.user_set_promisc = true;
4922 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4923 vmolr |= IXGBE_VMOLR_MPE;
4924 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4925 } else {
4926 if (netdev->flags & IFF_ALLMULTI) {
4927 fctrl |= IXGBE_FCTRL_MPE;
4928 vmolr |= IXGBE_VMOLR_MPE;
4929 }
4930 hw->addr_ctrl.user_set_promisc = false;
4931 }
4932
4933
4934
4935
4936
4937
4938 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4939 fctrl |= IXGBE_FCTRL_UPE;
4940 vmolr |= IXGBE_VMOLR_ROPE;
4941 }
4942
4943
4944
4945
4946
4947 count = ixgbe_write_mc_addr_list(netdev);
4948 if (count < 0) {
4949 fctrl |= IXGBE_FCTRL_MPE;
4950 vmolr |= IXGBE_VMOLR_MPE;
4951 } else if (count) {
4952 vmolr |= IXGBE_VMOLR_ROMPE;
4953 }
4954
4955 if (hw->mac.type != ixgbe_mac_82598EB) {
4956 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4957 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4958 IXGBE_VMOLR_ROPE);
4959 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4960 }
4961
4962
4963 if (features & NETIF_F_RXALL) {
4964
4965
4966 fctrl |= (IXGBE_FCTRL_SBP |
4967 IXGBE_FCTRL_BAM |
4968 IXGBE_FCTRL_PMCF);
4969
4970 fctrl &= ~(IXGBE_FCTRL_DPF);
4971
4972 }
4973
4974 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4975
4976 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4977 ixgbe_vlan_strip_enable(adapter);
4978 else
4979 ixgbe_vlan_strip_disable(adapter);
4980
4981 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4982 ixgbe_vlan_promisc_disable(adapter);
4983 else
4984 ixgbe_vlan_promisc_enable(adapter);
4985 }
4986
4987 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4988 {
4989 int q_idx;
4990
4991 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4992 napi_enable(&adapter->q_vector[q_idx]->napi);
4993 }
4994
4995 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4996 {
4997 int q_idx;
4998
4999 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5000 napi_disable(&adapter->q_vector[q_idx]->napi);
5001 }
5002
5003 static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
5004 {
5005 struct ixgbe_adapter *adapter = netdev_priv(dev);
5006 struct ixgbe_hw *hw = &adapter->hw;
5007 struct udp_tunnel_info ti;
5008
5009 udp_tunnel_nic_get_port(dev, table, 0, &ti);
5010 if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
5011 adapter->vxlan_port = ti.port;
5012 else
5013 adapter->geneve_port = ti.port;
5014
5015 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL,
5016 ntohs(adapter->vxlan_port) |
5017 ntohs(adapter->geneve_port) <<
5018 IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT);
5019 return 0;
5020 }
5021
5022 static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = {
5023 .sync_table = ixgbe_udp_tunnel_sync,
5024 .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
5025 .tables = {
5026 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
5027 },
5028 };
5029
5030 static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = {
5031 .sync_table = ixgbe_udp_tunnel_sync,
5032 .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
5033 .tables = {
5034 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
5035 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
5036 },
5037 };
5038
5039 #ifdef CONFIG_IXGBE_DCB
5040
5041
5042
5043
5044
5045
5046
5047
5048 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
5049 {
5050 struct ixgbe_hw *hw = &adapter->hw;
5051 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
5052
5053 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
5054 if (hw->mac.type == ixgbe_mac_82598EB)
5055 netif_set_tso_max_size(adapter->netdev, 65536);
5056 return;
5057 }
5058
5059 if (hw->mac.type == ixgbe_mac_82598EB)
5060 netif_set_tso_max_size(adapter->netdev, 32768);
5061
5062 #ifdef IXGBE_FCOE
5063 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
5064 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
5065 #endif
5066
5067
5068 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
5069 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5070 DCB_TX_CONFIG);
5071 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5072 DCB_RX_CONFIG);
5073 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
5074 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
5075 ixgbe_dcb_hw_ets(&adapter->hw,
5076 adapter->ixgbe_ieee_ets,
5077 max_frame);
5078 ixgbe_dcb_hw_pfc_config(&adapter->hw,
5079 adapter->ixgbe_ieee_pfc->pfc_en,
5080 adapter->ixgbe_ieee_ets->prio_tc);
5081 }
5082
5083
5084 if (hw->mac.type != ixgbe_mac_82598EB) {
5085 u32 msb = 0;
5086 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
5087
5088 while (rss_i) {
5089 msb++;
5090 rss_i >>= 1;
5091 }
5092
5093
5094 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
5095 }
5096 }
5097 #endif
5098
5099
5100 #define IXGBE_ETH_FRAMING 20
5101
5102
5103
5104
5105
5106
5107
5108 static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
5109 {
5110 struct ixgbe_hw *hw = &adapter->hw;
5111 struct net_device *dev = adapter->netdev;
5112 int link, tc, kb, marker;
5113 u32 dv_id, rx_pba;
5114
5115
5116 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
5117
5118 #ifdef IXGBE_FCOE
5119
5120 if ((dev->features & NETIF_F_FCOE_MTU) &&
5121 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5122 (pb == ixgbe_fcoe_get_tc(adapter)))
5123 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5124 #endif
5125
5126
5127 switch (hw->mac.type) {
5128 case ixgbe_mac_X540:
5129 case ixgbe_mac_X550:
5130 case ixgbe_mac_X550EM_x:
5131 case ixgbe_mac_x550em_a:
5132 dv_id = IXGBE_DV_X540(link, tc);
5133 break;
5134 default:
5135 dv_id = IXGBE_DV(link, tc);
5136 break;
5137 }
5138
5139
5140 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5141 dv_id += IXGBE_B2BT(tc);
5142
5143
5144 kb = IXGBE_BT2KB(dv_id);
5145 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5146
5147 marker = rx_pba - kb;
5148
5149
5150
5151
5152
5153 if (marker < 0) {
5154 e_warn(drv, "Packet Buffer(%i) can not provide enough"
5155 "headroom to support flow control."
5156 "Decrease MTU or number of traffic classes\n", pb);
5157 marker = tc + 1;
5158 }
5159
5160 return marker;
5161 }
5162
5163
5164
5165
5166
5167
5168
5169 static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5170 {
5171 struct ixgbe_hw *hw = &adapter->hw;
5172 struct net_device *dev = adapter->netdev;
5173 int tc;
5174 u32 dv_id;
5175
5176
5177 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5178
5179 #ifdef IXGBE_FCOE
5180
5181 if ((dev->features & NETIF_F_FCOE_MTU) &&
5182 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5183 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5184 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5185 #endif
5186
5187
5188 switch (hw->mac.type) {
5189 case ixgbe_mac_X540:
5190 case ixgbe_mac_X550:
5191 case ixgbe_mac_X550EM_x:
5192 case ixgbe_mac_x550em_a:
5193 dv_id = IXGBE_LOW_DV_X540(tc);
5194 break;
5195 default:
5196 dv_id = IXGBE_LOW_DV(tc);
5197 break;
5198 }
5199
5200
5201 return IXGBE_BT2KB(dv_id);
5202 }
5203
5204
5205
5206
5207 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5208 {
5209 struct ixgbe_hw *hw = &adapter->hw;
5210 int num_tc = adapter->hw_tcs;
5211 int i;
5212
5213 if (!num_tc)
5214 num_tc = 1;
5215
5216 for (i = 0; i < num_tc; i++) {
5217 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5218 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5219
5220
5221 if (hw->fc.low_water[i] > hw->fc.high_water[i])
5222 hw->fc.low_water[i] = 0;
5223 }
5224
5225 for (; i < MAX_TRAFFIC_CLASS; i++)
5226 hw->fc.high_water[i] = 0;
5227 }
5228
5229 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5230 {
5231 struct ixgbe_hw *hw = &adapter->hw;
5232 int hdrm;
5233 u8 tc = adapter->hw_tcs;
5234
5235 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5236 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5237 hdrm = 32 << adapter->fdir_pballoc;
5238 else
5239 hdrm = 0;
5240
5241 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5242 ixgbe_pbthresh_setup(adapter);
5243 }
5244
5245 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5246 {
5247 struct ixgbe_hw *hw = &adapter->hw;
5248 struct hlist_node *node2;
5249 struct ixgbe_fdir_filter *filter;
5250 u8 queue;
5251
5252 spin_lock(&adapter->fdir_perfect_lock);
5253
5254 if (!hlist_empty(&adapter->fdir_filter_list))
5255 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5256
5257 hlist_for_each_entry_safe(filter, node2,
5258 &adapter->fdir_filter_list, fdir_node) {
5259 if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
5260 queue = IXGBE_FDIR_DROP_QUEUE;
5261 } else {
5262 u32 ring = ethtool_get_flow_spec_ring(filter->action);
5263 u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
5264
5265 if (!vf && (ring >= adapter->num_rx_queues)) {
5266 e_err(drv, "FDIR restore failed without VF, ring: %u\n",
5267 ring);
5268 continue;
5269 } else if (vf &&
5270 ((vf > adapter->num_vfs) ||
5271 ring >= adapter->num_rx_queues_per_pool)) {
5272 e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
5273 vf, ring);
5274 continue;
5275 }
5276
5277
5278 if (!vf)
5279 queue = adapter->rx_ring[ring]->reg_idx;
5280 else
5281 queue = ((vf - 1) *
5282 adapter->num_rx_queues_per_pool) + ring;
5283 }
5284
5285 ixgbe_fdir_write_perfect_filter_82599(hw,
5286 &filter->filter, filter->sw_idx, queue);
5287 }
5288
5289 spin_unlock(&adapter->fdir_perfect_lock);
5290 }
5291
5292
5293
5294
5295
5296 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5297 {
5298 u16 i = rx_ring->next_to_clean;
5299 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5300
5301 if (rx_ring->xsk_pool) {
5302 ixgbe_xsk_clean_rx_ring(rx_ring);
5303 goto skip_free;
5304 }
5305
5306
5307 while (i != rx_ring->next_to_alloc) {
5308 if (rx_buffer->skb) {
5309 struct sk_buff *skb = rx_buffer->skb;
5310 if (IXGBE_CB(skb)->page_released)
5311 dma_unmap_page_attrs(rx_ring->dev,
5312 IXGBE_CB(skb)->dma,
5313 ixgbe_rx_pg_size(rx_ring),
5314 DMA_FROM_DEVICE,
5315 IXGBE_RX_DMA_ATTR);
5316 dev_kfree_skb(skb);
5317 }
5318
5319
5320
5321
5322 dma_sync_single_range_for_cpu(rx_ring->dev,
5323 rx_buffer->dma,
5324 rx_buffer->page_offset,
5325 ixgbe_rx_bufsz(rx_ring),
5326 DMA_FROM_DEVICE);
5327
5328
5329 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5330 ixgbe_rx_pg_size(rx_ring),
5331 DMA_FROM_DEVICE,
5332 IXGBE_RX_DMA_ATTR);
5333 __page_frag_cache_drain(rx_buffer->page,
5334 rx_buffer->pagecnt_bias);
5335
5336 i++;
5337 rx_buffer++;
5338 if (i == rx_ring->count) {
5339 i = 0;
5340 rx_buffer = rx_ring->rx_buffer_info;
5341 }
5342 }
5343
5344 skip_free:
5345 rx_ring->next_to_alloc = 0;
5346 rx_ring->next_to_clean = 0;
5347 rx_ring->next_to_use = 0;
5348 }
5349
5350 static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
5351 struct ixgbe_fwd_adapter *accel)
5352 {
5353 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
5354 int num_tc = netdev_get_num_tc(adapter->netdev);
5355 struct net_device *vdev = accel->netdev;
5356 int i, baseq, err;
5357
5358 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5359 netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
5360 accel->pool, adapter->num_rx_pools,
5361 baseq, baseq + adapter->num_rx_queues_per_pool);
5362
5363 accel->rx_base_queue = baseq;
5364 accel->tx_base_queue = baseq;
5365
5366
5367 for (i = 0; i < num_tc; i++)
5368 netdev_bind_sb_channel_queue(adapter->netdev, vdev,
5369 i, rss_i, baseq + (rss_i * i));
5370
5371 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5372 adapter->rx_ring[baseq + i]->netdev = vdev;
5373
5374
5375
5376
5377 wmb();
5378
5379
5380
5381
5382 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
5383 VMDQ_P(accel->pool));
5384 if (err >= 0)
5385 return 0;
5386
5387
5388 macvlan_release_l2fw_offload(vdev);
5389
5390 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5391 adapter->rx_ring[baseq + i]->netdev = NULL;
5392
5393 netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
5394
5395
5396 netdev_unbind_sb_channel(adapter->netdev, vdev);
5397 netdev_set_sb_channel(vdev, 0);
5398
5399 clear_bit(accel->pool, adapter->fwd_bitmask);
5400 kfree(accel);
5401
5402 return err;
5403 }
5404
5405 static int ixgbe_macvlan_up(struct net_device *vdev,
5406 struct netdev_nested_priv *priv)
5407 {
5408 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
5409 struct ixgbe_fwd_adapter *accel;
5410
5411 if (!netif_is_macvlan(vdev))
5412 return 0;
5413
5414 accel = macvlan_accel_priv(vdev);
5415 if (!accel)
5416 return 0;
5417
5418 ixgbe_fwd_ring_up(adapter, accel);
5419
5420 return 0;
5421 }
5422
5423 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5424 {
5425 struct netdev_nested_priv priv = {
5426 .data = (void *)adapter,
5427 };
5428
5429 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5430 ixgbe_macvlan_up, &priv);
5431 }
5432
5433 static void ixgbe_configure(struct ixgbe_adapter *adapter)
5434 {
5435 struct ixgbe_hw *hw = &adapter->hw;
5436
5437 ixgbe_configure_pb(adapter);
5438 #ifdef CONFIG_IXGBE_DCB
5439 ixgbe_configure_dcb(adapter);
5440 #endif
5441
5442
5443
5444
5445 ixgbe_configure_virtualization(adapter);
5446
5447 ixgbe_set_rx_mode(adapter->netdev);
5448 ixgbe_restore_vlan(adapter);
5449 ixgbe_ipsec_restore(adapter);
5450
5451 switch (hw->mac.type) {
5452 case ixgbe_mac_82599EB:
5453 case ixgbe_mac_X540:
5454 hw->mac.ops.disable_rx_buff(hw);
5455 break;
5456 default:
5457 break;
5458 }
5459
5460 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5461 ixgbe_init_fdir_signature_82599(&adapter->hw,
5462 adapter->fdir_pballoc);
5463 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5464 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5465 adapter->fdir_pballoc);
5466 ixgbe_fdir_filter_restore(adapter);
5467 }
5468
5469 switch (hw->mac.type) {
5470 case ixgbe_mac_82599EB:
5471 case ixgbe_mac_X540:
5472 hw->mac.ops.enable_rx_buff(hw);
5473 break;
5474 default:
5475 break;
5476 }
5477
5478 #ifdef CONFIG_IXGBE_DCA
5479
5480 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5481 ixgbe_setup_dca(adapter);
5482 #endif
5483
5484 #ifdef IXGBE_FCOE
5485
5486 ixgbe_configure_fcoe(adapter);
5487
5488 #endif
5489 ixgbe_configure_tx(adapter);
5490 ixgbe_configure_rx(adapter);
5491 ixgbe_configure_dfwd(adapter);
5492 }
5493
5494
5495
5496
5497
5498 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5499 {
5500
5501
5502
5503
5504
5505
5506 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5507 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5508
5509 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5510 adapter->sfp_poll_time = 0;
5511 }
5512
5513
5514
5515
5516
5517
5518
5519 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5520 {
5521 u32 speed;
5522 bool autoneg, link_up = false;
5523 int ret = IXGBE_ERR_LINK_SETUP;
5524
5525 if (hw->mac.ops.check_link)
5526 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5527
5528 if (ret)
5529 return ret;
5530
5531 speed = hw->phy.autoneg_advertised;
5532 if (!speed && hw->mac.ops.get_link_capabilities) {
5533 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5534 &autoneg);
5535
5536
5537
5538
5539 speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
5540 IXGBE_LINK_SPEED_2_5GB_FULL);
5541 }
5542
5543 if (ret)
5544 return ret;
5545
5546 if (hw->mac.ops.setup_link)
5547 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5548
5549 return ret;
5550 }
5551
5552
5553
5554
5555
5556
5557
5558
5559 static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter)
5560 {
5561 struct ixgbe_hw *hw = &adapter->hw;
5562 int i;
5563
5564 for (i = 0; i < adapter->num_vfs; i++) {
5565 adapter->vfinfo[i].last_vfstats.gprc =
5566 IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i));
5567 adapter->vfinfo[i].saved_rst_vfstats.gprc +=
5568 adapter->vfinfo[i].vfstats.gprc;
5569 adapter->vfinfo[i].vfstats.gprc = 0;
5570 adapter->vfinfo[i].last_vfstats.gptc =
5571 IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i));
5572 adapter->vfinfo[i].saved_rst_vfstats.gptc +=
5573 adapter->vfinfo[i].vfstats.gptc;
5574 adapter->vfinfo[i].vfstats.gptc = 0;
5575 adapter->vfinfo[i].last_vfstats.gorc =
5576 IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i));
5577 adapter->vfinfo[i].saved_rst_vfstats.gorc +=
5578 adapter->vfinfo[i].vfstats.gorc;
5579 adapter->vfinfo[i].vfstats.gorc = 0;
5580 adapter->vfinfo[i].last_vfstats.gotc =
5581 IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i));
5582 adapter->vfinfo[i].saved_rst_vfstats.gotc +=
5583 adapter->vfinfo[i].vfstats.gotc;
5584 adapter->vfinfo[i].vfstats.gotc = 0;
5585 adapter->vfinfo[i].last_vfstats.mprc =
5586 IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i));
5587 adapter->vfinfo[i].saved_rst_vfstats.mprc +=
5588 adapter->vfinfo[i].vfstats.mprc;
5589 adapter->vfinfo[i].vfstats.mprc = 0;
5590 }
5591 }
5592
5593 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5594 {
5595 struct ixgbe_hw *hw = &adapter->hw;
5596 u32 gpie = 0;
5597
5598 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5599 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5600 IXGBE_GPIE_OCD;
5601 gpie |= IXGBE_GPIE_EIAME;
5602
5603
5604
5605
5606 switch (hw->mac.type) {
5607 case ixgbe_mac_82598EB:
5608 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5609 break;
5610 case ixgbe_mac_82599EB:
5611 case ixgbe_mac_X540:
5612 case ixgbe_mac_X550:
5613 case ixgbe_mac_X550EM_x:
5614 case ixgbe_mac_x550em_a:
5615 default:
5616 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5617 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5618 break;
5619 }
5620 } else {
5621
5622
5623 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5624 }
5625
5626
5627
5628
5629 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5630 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5631
5632 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5633 case IXGBE_82599_VMDQ_8Q_MASK:
5634 gpie |= IXGBE_GPIE_VTMODE_16;
5635 break;
5636 case IXGBE_82599_VMDQ_4Q_MASK:
5637 gpie |= IXGBE_GPIE_VTMODE_32;
5638 break;
5639 default:
5640 gpie |= IXGBE_GPIE_VTMODE_64;
5641 break;
5642 }
5643 }
5644
5645
5646 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5647 switch (adapter->hw.mac.type) {
5648 case ixgbe_mac_82599EB:
5649 gpie |= IXGBE_SDP0_GPIEN_8259X;
5650 break;
5651 default:
5652 break;
5653 }
5654 }
5655
5656
5657 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5658 gpie |= IXGBE_SDP1_GPIEN(hw);
5659
5660 switch (hw->mac.type) {
5661 case ixgbe_mac_82599EB:
5662 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5663 break;
5664 case ixgbe_mac_X550EM_x:
5665 case ixgbe_mac_x550em_a:
5666 gpie |= IXGBE_SDP0_GPIEN_X540;
5667 break;
5668 default:
5669 break;
5670 }
5671
5672 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5673 }
5674
5675 static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5676 {
5677 struct ixgbe_hw *hw = &adapter->hw;
5678 int err;
5679 u32 ctrl_ext;
5680
5681 ixgbe_get_hw_control(adapter);
5682 ixgbe_setup_gpie(adapter);
5683
5684 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5685 ixgbe_configure_msix(adapter);
5686 else
5687 ixgbe_configure_msi_and_legacy(adapter);
5688
5689
5690 if (hw->mac.ops.enable_tx_laser)
5691 hw->mac.ops.enable_tx_laser(hw);
5692
5693 if (hw->phy.ops.set_phy_power)
5694 hw->phy.ops.set_phy_power(hw, true);
5695
5696 smp_mb__before_atomic();
5697 clear_bit(__IXGBE_DOWN, &adapter->state);
5698 ixgbe_napi_enable_all(adapter);
5699
5700 if (ixgbe_is_sfp(hw)) {
5701 ixgbe_sfp_link_config(adapter);
5702 } else {
5703 err = ixgbe_non_sfp_link_config(hw);
5704 if (err)
5705 e_err(probe, "link_config FAILED %d\n", err);
5706 }
5707
5708
5709 IXGBE_READ_REG(hw, IXGBE_EICR);
5710 ixgbe_irq_enable(adapter, true, true);
5711
5712
5713
5714
5715
5716 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5717 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5718 if (esdp & IXGBE_ESDP_SDP1)
5719 e_crit(drv, "Fan has stopped, replace the adapter\n");
5720 }
5721
5722
5723
5724 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5725 adapter->link_check_timeout = jiffies;
5726 mod_timer(&adapter->service_timer, jiffies);
5727
5728 ixgbe_clear_vf_stats_counters(adapter);
5729
5730 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5731 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5732 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5733
5734
5735 ixgbe_set_all_vfs(adapter);
5736 }
5737
5738 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5739 {
5740
5741 netif_trans_update(adapter->netdev);
5742
5743 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5744 usleep_range(1000, 2000);
5745 if (adapter->hw.phy.type == ixgbe_phy_fw)
5746 ixgbe_watchdog_link_is_down(adapter);
5747 ixgbe_down(adapter);
5748
5749
5750
5751
5752
5753
5754 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5755 msleep(2000);
5756 ixgbe_up(adapter);
5757 clear_bit(__IXGBE_RESETTING, &adapter->state);
5758 }
5759
5760 void ixgbe_up(struct ixgbe_adapter *adapter)
5761 {
5762
5763 ixgbe_configure(adapter);
5764
5765 ixgbe_up_complete(adapter);
5766 }
5767
5768 static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
5769 {
5770 u16 devctl2;
5771
5772 pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
5773
5774 switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
5775 case IXGBE_PCIDEVCTRL2_17_34s:
5776 case IXGBE_PCIDEVCTRL2_4_8s:
5777
5778
5779
5780
5781 case IXGBE_PCIDEVCTRL2_1_2s:
5782 return 2000000ul;
5783 case IXGBE_PCIDEVCTRL2_260_520ms:
5784 return 520000ul;
5785 case IXGBE_PCIDEVCTRL2_65_130ms:
5786 return 130000ul;
5787 case IXGBE_PCIDEVCTRL2_16_32ms:
5788 return 32000ul;
5789 case IXGBE_PCIDEVCTRL2_1_2ms:
5790 return 2000ul;
5791 case IXGBE_PCIDEVCTRL2_50_100us:
5792 return 100ul;
5793 case IXGBE_PCIDEVCTRL2_16_32ms_def:
5794 return 32000ul;
5795 default:
5796 break;
5797 }
5798
5799
5800
5801
5802 return 32000ul;
5803 }
5804
5805 void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
5806 {
5807 unsigned long wait_delay, delay_interval;
5808 struct ixgbe_hw *hw = &adapter->hw;
5809 int i, wait_loop;
5810 u32 rxdctl;
5811
5812
5813 hw->mac.ops.disable_rx(hw);
5814
5815 if (ixgbe_removed(hw->hw_addr))
5816 return;
5817
5818
5819 for (i = 0; i < adapter->num_rx_queues; i++) {
5820 struct ixgbe_ring *ring = adapter->rx_ring[i];
5821 u8 reg_idx = ring->reg_idx;
5822
5823 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5824 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5825 rxdctl |= IXGBE_RXDCTL_SWFLSH;
5826
5827
5828 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
5829 }
5830
5831
5832 if (hw->mac.type == ixgbe_mac_82598EB &&
5833 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5834 return;
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5848
5849 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5850 wait_delay = delay_interval;
5851
5852 while (wait_loop--) {
5853 usleep_range(wait_delay, wait_delay + 10);
5854 wait_delay += delay_interval * 2;
5855 rxdctl = 0;
5856
5857
5858
5859
5860
5861
5862 for (i = 0; i < adapter->num_rx_queues; i++) {
5863 struct ixgbe_ring *ring = adapter->rx_ring[i];
5864 u8 reg_idx = ring->reg_idx;
5865
5866 rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5867 }
5868
5869 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
5870 return;
5871 }
5872
5873 e_err(drv,
5874 "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5875 }
5876
5877 void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
5878 {
5879 unsigned long wait_delay, delay_interval;
5880 struct ixgbe_hw *hw = &adapter->hw;
5881 int i, wait_loop;
5882 u32 txdctl;
5883
5884 if (ixgbe_removed(hw->hw_addr))
5885 return;
5886
5887
5888 for (i = 0; i < adapter->num_tx_queues; i++) {
5889 struct ixgbe_ring *ring = adapter->tx_ring[i];
5890 u8 reg_idx = ring->reg_idx;
5891
5892 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5893 }
5894
5895
5896 for (i = 0; i < adapter->num_xdp_queues; i++) {
5897 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5898 u8 reg_idx = ring->reg_idx;
5899
5900 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5901 }
5902
5903
5904
5905
5906
5907
5908 if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5909 goto dma_engine_disable;
5910
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921
5922 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5923
5924 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5925 wait_delay = delay_interval;
5926
5927 while (wait_loop--) {
5928 usleep_range(wait_delay, wait_delay + 10);
5929 wait_delay += delay_interval * 2;
5930 txdctl = 0;
5931
5932
5933
5934
5935
5936
5937 for (i = 0; i < adapter->num_tx_queues; i++) {
5938 struct ixgbe_ring *ring = adapter->tx_ring[i];
5939 u8 reg_idx = ring->reg_idx;
5940
5941 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5942 }
5943 for (i = 0; i < adapter->num_xdp_queues; i++) {
5944 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5945 u8 reg_idx = ring->reg_idx;
5946
5947 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5948 }
5949
5950 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
5951 goto dma_engine_disable;
5952 }
5953
5954 e_err(drv,
5955 "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5956
5957 dma_engine_disable:
5958
5959 switch (hw->mac.type) {
5960 case ixgbe_mac_82599EB:
5961 case ixgbe_mac_X540:
5962 case ixgbe_mac_X550:
5963 case ixgbe_mac_X550EM_x:
5964 case ixgbe_mac_x550em_a:
5965 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5966 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5967 ~IXGBE_DMATXCTL_TE));
5968 fallthrough;
5969 default:
5970 break;
5971 }
5972 }
5973
5974 void ixgbe_reset(struct ixgbe_adapter *adapter)
5975 {
5976 struct ixgbe_hw *hw = &adapter->hw;
5977 struct net_device *netdev = adapter->netdev;
5978 int err;
5979
5980 if (ixgbe_removed(hw->hw_addr))
5981 return;
5982
5983 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5984 usleep_range(1000, 2000);
5985
5986
5987 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5988 IXGBE_FLAG2_SFP_NEEDS_RESET);
5989 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5990
5991 err = hw->mac.ops.init_hw(hw);
5992 switch (err) {
5993 case 0:
5994 case IXGBE_ERR_SFP_NOT_PRESENT:
5995 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5996 break;
5997 case IXGBE_ERR_PRIMARY_REQUESTS_PENDING:
5998 e_dev_err("primary disable timed out\n");
5999 break;
6000 case IXGBE_ERR_EEPROM_VERSION:
6001
6002 e_dev_warn("This device is a pre-production adapter/LOM. "
6003 "Please be aware there may be issues associated with "
6004 "your hardware. If you are experiencing problems "
6005 "please contact your Intel or hardware "
6006 "representative who provided you with this "
6007 "hardware.\n");
6008 break;
6009 default:
6010 e_dev_err("Hardware Error: %d\n", err);
6011 }
6012
6013 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6014
6015
6016 ixgbe_flush_sw_mac_table(adapter);
6017 __dev_uc_unsync(netdev, NULL);
6018
6019
6020 ixgbe_mac_set_default_filter(adapter);
6021
6022
6023 if (hw->mac.san_mac_rar_index)
6024 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
6025
6026 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6027 ixgbe_ptp_reset(adapter);
6028
6029 if (hw->phy.ops.set_phy_power) {
6030 if (!netif_running(adapter->netdev) && !adapter->wol)
6031 hw->phy.ops.set_phy_power(hw, false);
6032 else
6033 hw->phy.ops.set_phy_power(hw, true);
6034 }
6035 }
6036
6037
6038
6039
6040
6041 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
6042 {
6043 u16 i = tx_ring->next_to_clean;
6044 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
6045
6046 if (tx_ring->xsk_pool) {
6047 ixgbe_xsk_clean_tx_ring(tx_ring);
6048 goto out;
6049 }
6050
6051 while (i != tx_ring->next_to_use) {
6052 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
6053
6054
6055 if (ring_is_xdp(tx_ring))
6056 xdp_return_frame(tx_buffer->xdpf);
6057 else
6058 dev_kfree_skb_any(tx_buffer->skb);
6059
6060
6061 dma_unmap_single(tx_ring->dev,
6062 dma_unmap_addr(tx_buffer, dma),
6063 dma_unmap_len(tx_buffer, len),
6064 DMA_TO_DEVICE);
6065
6066
6067 eop_desc = tx_buffer->next_to_watch;
6068 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6069
6070
6071 while (tx_desc != eop_desc) {
6072 tx_buffer++;
6073 tx_desc++;
6074 i++;
6075 if (unlikely(i == tx_ring->count)) {
6076 i = 0;
6077 tx_buffer = tx_ring->tx_buffer_info;
6078 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6079 }
6080
6081
6082 if (dma_unmap_len(tx_buffer, len))
6083 dma_unmap_page(tx_ring->dev,
6084 dma_unmap_addr(tx_buffer, dma),
6085 dma_unmap_len(tx_buffer, len),
6086 DMA_TO_DEVICE);
6087 }
6088
6089
6090 tx_buffer++;
6091 i++;
6092 if (unlikely(i == tx_ring->count)) {
6093 i = 0;
6094 tx_buffer = tx_ring->tx_buffer_info;
6095 }
6096 }
6097
6098
6099 if (!ring_is_xdp(tx_ring))
6100 netdev_tx_reset_queue(txring_txq(tx_ring));
6101
6102 out:
6103
6104 tx_ring->next_to_use = 0;
6105 tx_ring->next_to_clean = 0;
6106 }
6107
6108
6109
6110
6111
6112 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
6113 {
6114 int i;
6115
6116 for (i = 0; i < adapter->num_rx_queues; i++)
6117 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
6118 }
6119
6120
6121
6122
6123
6124 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
6125 {
6126 int i;
6127
6128 for (i = 0; i < adapter->num_tx_queues; i++)
6129 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
6130 for (i = 0; i < adapter->num_xdp_queues; i++)
6131 ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
6132 }
6133
6134 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
6135 {
6136 struct hlist_node *node2;
6137 struct ixgbe_fdir_filter *filter;
6138
6139 spin_lock(&adapter->fdir_perfect_lock);
6140
6141 hlist_for_each_entry_safe(filter, node2,
6142 &adapter->fdir_filter_list, fdir_node) {
6143 hlist_del(&filter->fdir_node);
6144 kfree(filter);
6145 }
6146 adapter->fdir_filter_count = 0;
6147
6148 spin_unlock(&adapter->fdir_perfect_lock);
6149 }
6150
6151 void ixgbe_down(struct ixgbe_adapter *adapter)
6152 {
6153 struct net_device *netdev = adapter->netdev;
6154 struct ixgbe_hw *hw = &adapter->hw;
6155 int i;
6156
6157
6158 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
6159 return;
6160
6161
6162 netif_tx_stop_all_queues(netdev);
6163
6164
6165 netif_carrier_off(netdev);
6166 netif_tx_disable(netdev);
6167
6168
6169 ixgbe_disable_rx(adapter);
6170
6171
6172 if (adapter->xdp_ring[0])
6173 synchronize_rcu();
6174
6175 ixgbe_irq_disable(adapter);
6176
6177 ixgbe_napi_disable_all(adapter);
6178
6179 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
6180 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6181 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6182
6183 del_timer_sync(&adapter->service_timer);
6184
6185 if (adapter->num_vfs) {
6186
6187 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
6188
6189
6190 for (i = 0 ; i < adapter->num_vfs; i++)
6191 adapter->vfinfo[i].clear_to_send = false;
6192
6193
6194 ixgbe_set_all_vfs(adapter);
6195 }
6196
6197
6198 ixgbe_disable_tx(adapter);
6199
6200 if (!pci_channel_offline(adapter->pdev))
6201 ixgbe_reset(adapter);
6202
6203
6204 if (hw->mac.ops.disable_tx_laser)
6205 hw->mac.ops.disable_tx_laser(hw);
6206
6207 ixgbe_clean_all_tx_rings(adapter);
6208 ixgbe_clean_all_rx_rings(adapter);
6209 }
6210
6211
6212
6213
6214
6215 static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
6216 {
6217 struct ixgbe_hw *hw = &adapter->hw;
6218
6219 switch (hw->device_id) {
6220 case IXGBE_DEV_ID_X550EM_A_1G_T:
6221 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6222 if (!hw->phy.eee_speeds_supported)
6223 break;
6224 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
6225 if (!hw->phy.eee_speeds_advertised)
6226 break;
6227 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
6228 break;
6229 default:
6230 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
6231 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
6232 break;
6233 }
6234 }
6235
6236
6237
6238
6239
6240
6241 static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
6242 {
6243 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6244
6245
6246 ixgbe_tx_timeout_reset(adapter);
6247 }
6248
6249 #ifdef CONFIG_IXGBE_DCB
6250 static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
6251 {
6252 struct ixgbe_hw *hw = &adapter->hw;
6253 struct tc_configuration *tc;
6254 int j;
6255
6256 switch (hw->mac.type) {
6257 case ixgbe_mac_82598EB:
6258 case ixgbe_mac_82599EB:
6259 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
6260 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
6261 break;
6262 case ixgbe_mac_X540:
6263 case ixgbe_mac_X550:
6264 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
6265 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
6266 break;
6267 case ixgbe_mac_X550EM_x:
6268 case ixgbe_mac_x550em_a:
6269 default:
6270 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
6271 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
6272 break;
6273 }
6274
6275
6276 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
6277 tc = &adapter->dcb_cfg.tc_config[j];
6278 tc->path[DCB_TX_CONFIG].bwg_id = 0;
6279 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
6280 tc->path[DCB_RX_CONFIG].bwg_id = 0;
6281 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
6282 tc->dcb_pfc = pfc_disabled;
6283 }
6284
6285
6286 tc = &adapter->dcb_cfg.tc_config[0];
6287 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
6288 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
6289
6290 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
6291 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
6292 adapter->dcb_cfg.pfc_mode_enable = false;
6293 adapter->dcb_set_bitmap = 0x00;
6294 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
6295 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
6296 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
6297 sizeof(adapter->temp_dcb_cfg));
6298 }
6299 #endif
6300
6301
6302
6303
6304
6305
6306
6307
6308
6309
6310 static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6311 const struct ixgbe_info *ii)
6312 {
6313 struct ixgbe_hw *hw = &adapter->hw;
6314 struct pci_dev *pdev = adapter->pdev;
6315 unsigned int rss, fdir;
6316 u32 fwsm;
6317 int i;
6318
6319
6320
6321 hw->vendor_id = pdev->vendor;
6322 hw->device_id = pdev->device;
6323 hw->revision_id = pdev->revision;
6324 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6325 hw->subsystem_device_id = pdev->subsystem_device;
6326
6327
6328 ii->get_invariants(hw);
6329
6330
6331 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6332 adapter->ring_feature[RING_F_RSS].limit = rss;
6333 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6334 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6335 adapter->atr_sample_rate = 20;
6336 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6337 adapter->ring_feature[RING_F_FDIR].limit = fdir;
6338 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6339 adapter->ring_feature[RING_F_VMDQ].limit = 1;
6340 #ifdef CONFIG_IXGBE_DCA
6341 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6342 #endif
6343 #ifdef CONFIG_IXGBE_DCB
6344 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6345 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6346 #endif
6347 #ifdef IXGBE_FCOE
6348 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6349 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6350 #ifdef CONFIG_IXGBE_DCB
6351
6352 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6353 #endif
6354 #endif
6355
6356
6357 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6358 GFP_KERNEL);
6359 if (!adapter->jump_tables[0])
6360 return -ENOMEM;
6361 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6362
6363 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6364 adapter->jump_tables[i] = NULL;
6365
6366 adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6367 sizeof(struct ixgbe_mac_addr),
6368 GFP_KERNEL);
6369 if (!adapter->mac_table)
6370 return -ENOMEM;
6371
6372 if (ixgbe_init_rss_key(adapter))
6373 return -ENOMEM;
6374
6375 adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL);
6376 if (!adapter->af_xdp_zc_qps)
6377 return -ENOMEM;
6378
6379
6380 switch (hw->mac.type) {
6381 case ixgbe_mac_82598EB:
6382 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6383
6384 if (hw->device_id == IXGBE_DEV_ID_82598AT)
6385 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6386
6387 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6388 adapter->ring_feature[RING_F_FDIR].limit = 0;
6389 adapter->atr_sample_rate = 0;
6390 adapter->fdir_pballoc = 0;
6391 #ifdef IXGBE_FCOE
6392 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6393 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6394 #ifdef CONFIG_IXGBE_DCB
6395 adapter->fcoe.up = 0;
6396 #endif
6397 #endif
6398 break;
6399 case ixgbe_mac_82599EB:
6400 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6401 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6402 break;
6403 case ixgbe_mac_X540:
6404 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6405 if (fwsm & IXGBE_FWSM_TS_ENABLED)
6406 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6407 break;
6408 case ixgbe_mac_x550em_a:
6409 switch (hw->device_id) {
6410 case IXGBE_DEV_ID_X550EM_A_1G_T:
6411 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6412 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6413 break;
6414 default:
6415 break;
6416 }
6417 fallthrough;
6418 case ixgbe_mac_X550EM_x:
6419 #ifdef CONFIG_IXGBE_DCB
6420 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6421 #endif
6422 #ifdef IXGBE_FCOE
6423 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6424 #ifdef CONFIG_IXGBE_DCB
6425 adapter->fcoe.up = 0;
6426 #endif
6427 #endif
6428 fallthrough;
6429 case ixgbe_mac_X550:
6430 if (hw->mac.type == ixgbe_mac_X550)
6431 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6432 #ifdef CONFIG_IXGBE_DCA
6433 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6434 #endif
6435 break;
6436 default:
6437 break;
6438 }
6439
6440 #ifdef IXGBE_FCOE
6441
6442 spin_lock_init(&adapter->fcoe.lock);
6443
6444 #endif
6445
6446 spin_lock_init(&adapter->fdir_perfect_lock);
6447
6448
6449 spin_lock_init(&adapter->vfs_lock);
6450
6451 #ifdef CONFIG_IXGBE_DCB
6452 ixgbe_init_dcb(adapter);
6453 #endif
6454 ixgbe_init_ipsec_offload(adapter);
6455
6456
6457 hw->fc.requested_mode = ixgbe_fc_full;
6458 hw->fc.current_mode = ixgbe_fc_full;
6459 ixgbe_pbthresh_setup(adapter);
6460 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6461 hw->fc.send_xon = true;
6462 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6463
6464 #ifdef CONFIG_PCI_IOV
6465 if (max_vfs > 0)
6466 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6467
6468
6469 if (hw->mac.type != ixgbe_mac_82598EB) {
6470 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6471 max_vfs = 0;
6472 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6473 }
6474 }
6475 #endif
6476
6477
6478 adapter->rx_itr_setting = 1;
6479 adapter->tx_itr_setting = 1;
6480
6481
6482 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6483 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6484
6485
6486 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6487
6488
6489 if (ixgbe_init_eeprom_params_generic(hw)) {
6490 e_dev_err("EEPROM initialization failed\n");
6491 return -EIO;
6492 }
6493
6494
6495 set_bit(0, adapter->fwd_bitmask);
6496 set_bit(__IXGBE_DOWN, &adapter->state);
6497
6498 return 0;
6499 }
6500
6501
6502
6503
6504
6505
6506
6507 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6508 {
6509 struct device *dev = tx_ring->dev;
6510 int orig_node = dev_to_node(dev);
6511 int ring_node = NUMA_NO_NODE;
6512 int size;
6513
6514 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6515
6516 if (tx_ring->q_vector)
6517 ring_node = tx_ring->q_vector->numa_node;
6518
6519 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6520 if (!tx_ring->tx_buffer_info)
6521 tx_ring->tx_buffer_info = vmalloc(size);
6522 if (!tx_ring->tx_buffer_info)
6523 goto err;
6524
6525
6526 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6527 tx_ring->size = ALIGN(tx_ring->size, 4096);
6528
6529 set_dev_node(dev, ring_node);
6530 tx_ring->desc = dma_alloc_coherent(dev,
6531 tx_ring->size,
6532 &tx_ring->dma,
6533 GFP_KERNEL);
6534 set_dev_node(dev, orig_node);
6535 if (!tx_ring->desc)
6536 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6537 &tx_ring->dma, GFP_KERNEL);
6538 if (!tx_ring->desc)
6539 goto err;
6540
6541 tx_ring->next_to_use = 0;
6542 tx_ring->next_to_clean = 0;
6543 return 0;
6544
6545 err:
6546 vfree(tx_ring->tx_buffer_info);
6547 tx_ring->tx_buffer_info = NULL;
6548 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6549 return -ENOMEM;
6550 }
6551
6552
6553
6554
6555
6556
6557
6558
6559
6560
6561
6562 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6563 {
6564 int i, j = 0, err = 0;
6565
6566 for (i = 0; i < adapter->num_tx_queues; i++) {
6567 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6568 if (!err)
6569 continue;
6570
6571 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6572 goto err_setup_tx;
6573 }
6574 for (j = 0; j < adapter->num_xdp_queues; j++) {
6575 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6576 if (!err)
6577 continue;
6578
6579 e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6580 goto err_setup_tx;
6581 }
6582
6583 return 0;
6584 err_setup_tx:
6585
6586 while (j--)
6587 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6588 while (i--)
6589 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6590 return err;
6591 }
6592
6593 static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring)
6594 {
6595 struct ixgbe_q_vector *q_vector = rx_ring->q_vector;
6596
6597 return q_vector ? q_vector->napi.napi_id : 0;
6598 }
6599
6600
6601
6602
6603
6604
6605
6606
6607 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6608 struct ixgbe_ring *rx_ring)
6609 {
6610 struct device *dev = rx_ring->dev;
6611 int orig_node = dev_to_node(dev);
6612 int ring_node = NUMA_NO_NODE;
6613 int size;
6614
6615 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6616
6617 if (rx_ring->q_vector)
6618 ring_node = rx_ring->q_vector->numa_node;
6619
6620 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6621 if (!rx_ring->rx_buffer_info)
6622 rx_ring->rx_buffer_info = vmalloc(size);
6623 if (!rx_ring->rx_buffer_info)
6624 goto err;
6625
6626
6627 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6628 rx_ring->size = ALIGN(rx_ring->size, 4096);
6629
6630 set_dev_node(dev, ring_node);
6631 rx_ring->desc = dma_alloc_coherent(dev,
6632 rx_ring->size,
6633 &rx_ring->dma,
6634 GFP_KERNEL);
6635 set_dev_node(dev, orig_node);
6636 if (!rx_ring->desc)
6637 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6638 &rx_ring->dma, GFP_KERNEL);
6639 if (!rx_ring->desc)
6640 goto err;
6641
6642 rx_ring->next_to_clean = 0;
6643 rx_ring->next_to_use = 0;
6644
6645
6646 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6647 rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
6648 goto err;
6649
6650 rx_ring->xdp_prog = adapter->xdp_prog;
6651
6652 return 0;
6653 err:
6654 vfree(rx_ring->rx_buffer_info);
6655 rx_ring->rx_buffer_info = NULL;
6656 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6657 return -ENOMEM;
6658 }
6659
6660
6661
6662
6663
6664
6665
6666
6667
6668
6669
6670 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6671 {
6672 int i, err = 0;
6673
6674 for (i = 0; i < adapter->num_rx_queues; i++) {
6675 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6676 if (!err)
6677 continue;
6678
6679 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6680 goto err_setup_rx;
6681 }
6682
6683 #ifdef IXGBE_FCOE
6684 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6685 if (!err)
6686 #endif
6687 return 0;
6688 err_setup_rx:
6689
6690 while (i--)
6691 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6692 return err;
6693 }
6694
6695
6696
6697
6698
6699
6700
6701 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6702 {
6703 ixgbe_clean_tx_ring(tx_ring);
6704
6705 vfree(tx_ring->tx_buffer_info);
6706 tx_ring->tx_buffer_info = NULL;
6707
6708
6709 if (!tx_ring->desc)
6710 return;
6711
6712 dma_free_coherent(tx_ring->dev, tx_ring->size,
6713 tx_ring->desc, tx_ring->dma);
6714
6715 tx_ring->desc = NULL;
6716 }
6717
6718
6719
6720
6721
6722
6723
6724 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6725 {
6726 int i;
6727
6728 for (i = 0; i < adapter->num_tx_queues; i++)
6729 if (adapter->tx_ring[i]->desc)
6730 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6731 for (i = 0; i < adapter->num_xdp_queues; i++)
6732 if (adapter->xdp_ring[i]->desc)
6733 ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6734 }
6735
6736
6737
6738
6739
6740
6741
6742 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6743 {
6744 ixgbe_clean_rx_ring(rx_ring);
6745
6746 rx_ring->xdp_prog = NULL;
6747 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6748 vfree(rx_ring->rx_buffer_info);
6749 rx_ring->rx_buffer_info = NULL;
6750
6751
6752 if (!rx_ring->desc)
6753 return;
6754
6755 dma_free_coherent(rx_ring->dev, rx_ring->size,
6756 rx_ring->desc, rx_ring->dma);
6757
6758 rx_ring->desc = NULL;
6759 }
6760
6761
6762
6763
6764
6765
6766
6767 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6768 {
6769 int i;
6770
6771 #ifdef IXGBE_FCOE
6772 ixgbe_free_fcoe_ddp_resources(adapter);
6773
6774 #endif
6775 for (i = 0; i < adapter->num_rx_queues; i++)
6776 if (adapter->rx_ring[i]->desc)
6777 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6778 }
6779
6780
6781
6782
6783
6784
6785
6786
6787 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6788 {
6789 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6790
6791 if (adapter->xdp_prog) {
6792 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6793 VLAN_HLEN;
6794 int i;
6795
6796 for (i = 0; i < adapter->num_rx_queues; i++) {
6797 struct ixgbe_ring *ring = adapter->rx_ring[i];
6798
6799 if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6800 e_warn(probe, "Requested MTU size is not supported with XDP\n");
6801 return -EINVAL;
6802 }
6803 }
6804 }
6805
6806
6807
6808
6809
6810
6811 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6812 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6813 (new_mtu > ETH_DATA_LEN))
6814 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6815
6816 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6817 netdev->mtu, new_mtu);
6818
6819
6820 netdev->mtu = new_mtu;
6821
6822 if (netif_running(netdev))
6823 ixgbe_reinit_locked(adapter);
6824
6825 return 0;
6826 }
6827
6828
6829
6830
6831
6832
6833
6834
6835
6836
6837
6838
6839
6840 int ixgbe_open(struct net_device *netdev)
6841 {
6842 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6843 struct ixgbe_hw *hw = &adapter->hw;
6844 int err, queues;
6845
6846
6847 if (test_bit(__IXGBE_TESTING, &adapter->state))
6848 return -EBUSY;
6849
6850 netif_carrier_off(netdev);
6851
6852
6853 err = ixgbe_setup_all_tx_resources(adapter);
6854 if (err)
6855 goto err_setup_tx;
6856
6857
6858 err = ixgbe_setup_all_rx_resources(adapter);
6859 if (err)
6860 goto err_setup_rx;
6861
6862 ixgbe_configure(adapter);
6863
6864 err = ixgbe_request_irq(adapter);
6865 if (err)
6866 goto err_req_irq;
6867
6868
6869 queues = adapter->num_tx_queues;
6870 err = netif_set_real_num_tx_queues(netdev, queues);
6871 if (err)
6872 goto err_set_queues;
6873
6874 queues = adapter->num_rx_queues;
6875 err = netif_set_real_num_rx_queues(netdev, queues);
6876 if (err)
6877 goto err_set_queues;
6878
6879 ixgbe_ptp_init(adapter);
6880
6881 ixgbe_up_complete(adapter);
6882
6883 udp_tunnel_nic_reset_ntf(netdev);
6884
6885 return 0;
6886
6887 err_set_queues:
6888 ixgbe_free_irq(adapter);
6889 err_req_irq:
6890 ixgbe_free_all_rx_resources(adapter);
6891 if (hw->phy.ops.set_phy_power && !adapter->wol)
6892 hw->phy.ops.set_phy_power(&adapter->hw, false);
6893 err_setup_rx:
6894 ixgbe_free_all_tx_resources(adapter);
6895 err_setup_tx:
6896 ixgbe_reset(adapter);
6897
6898 return err;
6899 }
6900
6901 static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6902 {
6903 ixgbe_ptp_suspend(adapter);
6904
6905 if (adapter->hw.phy.ops.enter_lplu) {
6906 adapter->hw.phy.reset_disable = true;
6907 ixgbe_down(adapter);
6908 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6909 adapter->hw.phy.reset_disable = false;
6910 } else {
6911 ixgbe_down(adapter);
6912 }
6913
6914 ixgbe_free_irq(adapter);
6915
6916 ixgbe_free_all_tx_resources(adapter);
6917 ixgbe_free_all_rx_resources(adapter);
6918 }
6919
6920
6921
6922
6923
6924
6925
6926
6927
6928
6929
6930
6931 int ixgbe_close(struct net_device *netdev)
6932 {
6933 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6934
6935 ixgbe_ptp_stop(adapter);
6936
6937 if (netif_device_present(netdev))
6938 ixgbe_close_suspend(adapter);
6939
6940 ixgbe_fdir_filter_exit(adapter);
6941
6942 ixgbe_release_hw_control(adapter);
6943
6944 return 0;
6945 }
6946
6947 static int __maybe_unused ixgbe_resume(struct device *dev_d)
6948 {
6949 struct pci_dev *pdev = to_pci_dev(dev_d);
6950 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6951 struct net_device *netdev = adapter->netdev;
6952 u32 err;
6953
6954 adapter->hw.hw_addr = adapter->io_addr;
6955
6956 err = pci_enable_device_mem(pdev);
6957 if (err) {
6958 e_dev_err("Cannot enable PCI device from suspend\n");
6959 return err;
6960 }
6961 smp_mb__before_atomic();
6962 clear_bit(__IXGBE_DISABLED, &adapter->state);
6963 pci_set_master(pdev);
6964
6965 device_wakeup_disable(dev_d);
6966
6967 ixgbe_reset(adapter);
6968
6969 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6970
6971 rtnl_lock();
6972 err = ixgbe_init_interrupt_scheme(adapter);
6973 if (!err && netif_running(netdev))
6974 err = ixgbe_open(netdev);
6975
6976
6977 if (!err)
6978 netif_device_attach(netdev);
6979 rtnl_unlock();
6980
6981 return err;
6982 }
6983
6984 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6985 {
6986 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6987 struct net_device *netdev = adapter->netdev;
6988 struct ixgbe_hw *hw = &adapter->hw;
6989 u32 ctrl;
6990 u32 wufc = adapter->wol;
6991
6992 rtnl_lock();
6993 netif_device_detach(netdev);
6994
6995 if (netif_running(netdev))
6996 ixgbe_close_suspend(adapter);
6997
6998 ixgbe_clear_interrupt_scheme(adapter);
6999 rtnl_unlock();
7000
7001 if (hw->mac.ops.stop_link_on_d3)
7002 hw->mac.ops.stop_link_on_d3(hw);
7003
7004 if (wufc) {
7005 u32 fctrl;
7006
7007 ixgbe_set_rx_mode(netdev);
7008
7009
7010 if (hw->mac.ops.enable_tx_laser)
7011 hw->mac.ops.enable_tx_laser(hw);
7012
7013
7014 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7015 fctrl |= IXGBE_FCTRL_MPE;
7016 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
7017
7018 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
7019 ctrl |= IXGBE_CTRL_GIO_DIS;
7020 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
7021
7022 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
7023 } else {
7024 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
7025 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
7026 }
7027
7028 switch (hw->mac.type) {
7029 case ixgbe_mac_82598EB:
7030 pci_wake_from_d3(pdev, false);
7031 break;
7032 case ixgbe_mac_82599EB:
7033 case ixgbe_mac_X540:
7034 case ixgbe_mac_X550:
7035 case ixgbe_mac_X550EM_x:
7036 case ixgbe_mac_x550em_a:
7037 pci_wake_from_d3(pdev, !!wufc);
7038 break;
7039 default:
7040 break;
7041 }
7042
7043 *enable_wake = !!wufc;
7044 if (hw->phy.ops.set_phy_power && !*enable_wake)
7045 hw->phy.ops.set_phy_power(hw, false);
7046
7047 ixgbe_release_hw_control(adapter);
7048
7049 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
7050 pci_disable_device(pdev);
7051
7052 return 0;
7053 }
7054
7055 static int __maybe_unused ixgbe_suspend(struct device *dev_d)
7056 {
7057 struct pci_dev *pdev = to_pci_dev(dev_d);
7058 int retval;
7059 bool wake;
7060
7061 retval = __ixgbe_shutdown(pdev, &wake);
7062
7063 device_set_wakeup_enable(dev_d, wake);
7064
7065 return retval;
7066 }
7067
7068 static void ixgbe_shutdown(struct pci_dev *pdev)
7069 {
7070 bool wake;
7071
7072 __ixgbe_shutdown(pdev, &wake);
7073
7074 if (system_state == SYSTEM_POWER_OFF) {
7075 pci_wake_from_d3(pdev, wake);
7076 pci_set_power_state(pdev, PCI_D3hot);
7077 }
7078 }
7079
7080
7081
7082
7083
7084 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
7085 {
7086 struct net_device *netdev = adapter->netdev;
7087 struct ixgbe_hw *hw = &adapter->hw;
7088 struct ixgbe_hw_stats *hwstats = &adapter->stats;
7089 u64 total_mpc = 0;
7090 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
7091 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
7092 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
7093 u64 alloc_rx_page = 0;
7094 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
7095
7096 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7097 test_bit(__IXGBE_RESETTING, &adapter->state))
7098 return;
7099
7100 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
7101 u64 rsc_count = 0;
7102 u64 rsc_flush = 0;
7103 for (i = 0; i < adapter->num_rx_queues; i++) {
7104 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
7105 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
7106 }
7107 adapter->rsc_total_count = rsc_count;
7108 adapter->rsc_total_flush = rsc_flush;
7109 }
7110
7111 for (i = 0; i < adapter->num_rx_queues; i++) {
7112 struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
7113
7114 if (!rx_ring)
7115 continue;
7116 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
7117 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
7118 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
7119 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
7120 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
7121 bytes += rx_ring->stats.bytes;
7122 packets += rx_ring->stats.packets;
7123 }
7124 adapter->non_eop_descs = non_eop_descs;
7125 adapter->alloc_rx_page = alloc_rx_page;
7126 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
7127 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
7128 adapter->hw_csum_rx_error = hw_csum_rx_error;
7129 netdev->stats.rx_bytes = bytes;
7130 netdev->stats.rx_packets = packets;
7131
7132 bytes = 0;
7133 packets = 0;
7134
7135 for (i = 0; i < adapter->num_tx_queues; i++) {
7136 struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
7137
7138 if (!tx_ring)
7139 continue;
7140 restart_queue += tx_ring->tx_stats.restart_queue;
7141 tx_busy += tx_ring->tx_stats.tx_busy;
7142 bytes += tx_ring->stats.bytes;
7143 packets += tx_ring->stats.packets;
7144 }
7145 for (i = 0; i < adapter->num_xdp_queues; i++) {
7146 struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
7147
7148 if (!xdp_ring)
7149 continue;
7150 restart_queue += xdp_ring->tx_stats.restart_queue;
7151 tx_busy += xdp_ring->tx_stats.tx_busy;
7152 bytes += xdp_ring->stats.bytes;
7153 packets += xdp_ring->stats.packets;
7154 }
7155 adapter->restart_queue = restart_queue;
7156 adapter->tx_busy = tx_busy;
7157 netdev->stats.tx_bytes = bytes;
7158 netdev->stats.tx_packets = packets;
7159
7160 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
7161
7162
7163 for (i = 0; i < 8; i++) {
7164
7165 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
7166 missed_rx += mpc;
7167 hwstats->mpc[i] += mpc;
7168 total_mpc += hwstats->mpc[i];
7169 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
7170 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
7171 switch (hw->mac.type) {
7172 case ixgbe_mac_82598EB:
7173 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
7174 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
7175 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
7176 hwstats->pxonrxc[i] +=
7177 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
7178 break;
7179 case ixgbe_mac_82599EB:
7180 case ixgbe_mac_X540:
7181 case ixgbe_mac_X550:
7182 case ixgbe_mac_X550EM_x:
7183 case ixgbe_mac_x550em_a:
7184 hwstats->pxonrxc[i] +=
7185 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
7186 break;
7187 default:
7188 break;
7189 }
7190 }
7191
7192
7193 for (i = 0; i < 16; i++) {
7194 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
7195 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
7196 if ((hw->mac.type == ixgbe_mac_82599EB) ||
7197 (hw->mac.type == ixgbe_mac_X540) ||
7198 (hw->mac.type == ixgbe_mac_X550) ||
7199 (hw->mac.type == ixgbe_mac_X550EM_x) ||
7200 (hw->mac.type == ixgbe_mac_x550em_a)) {
7201 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
7202 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
7203 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
7204 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
7205 }
7206 }
7207
7208 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
7209
7210 hwstats->gprc -= missed_rx;
7211
7212 ixgbe_update_xoff_received(adapter);
7213
7214
7215 switch (hw->mac.type) {
7216 case ixgbe_mac_82598EB:
7217 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
7218 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
7219 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
7220 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
7221 break;
7222 case ixgbe_mac_X540:
7223 case ixgbe_mac_X550:
7224 case ixgbe_mac_X550EM_x:
7225 case ixgbe_mac_x550em_a:
7226
7227 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
7228 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
7229 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
7230 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
7231 fallthrough;
7232 case ixgbe_mac_82599EB:
7233 for (i = 0; i < 16; i++)
7234 adapter->hw_rx_no_dma_resources +=
7235 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
7236 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
7237 IXGBE_READ_REG(hw, IXGBE_GORCH);
7238 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
7239 IXGBE_READ_REG(hw, IXGBE_GOTCH);
7240 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
7241 IXGBE_READ_REG(hw, IXGBE_TORH);
7242 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7243 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
7244 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
7245 #ifdef IXGBE_FCOE
7246 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
7247 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
7248 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
7249 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
7250 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
7251 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
7252
7253 if (adapter->fcoe.ddp_pool) {
7254 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
7255 struct ixgbe_fcoe_ddp_pool *ddp_pool;
7256 unsigned int cpu;
7257 u64 noddp = 0, noddp_ext_buff = 0;
7258 for_each_possible_cpu(cpu) {
7259 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
7260 noddp += ddp_pool->noddp;
7261 noddp_ext_buff += ddp_pool->noddp_ext_buff;
7262 }
7263 hwstats->fcoe_noddp = noddp;
7264 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
7265 }
7266 #endif
7267 break;
7268 default:
7269 break;
7270 }
7271 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7272 hwstats->bprc += bprc;
7273 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
7274 if (hw->mac.type == ixgbe_mac_82598EB)
7275 hwstats->mprc -= bprc;
7276 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
7277 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
7278 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
7279 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
7280 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
7281 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
7282 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
7283 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
7284 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7285 hwstats->lxontxc += lxon;
7286 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7287 hwstats->lxofftxc += lxoff;
7288 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
7289 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
7290
7291
7292
7293 xon_off_tot = lxon + lxoff;
7294 hwstats->gptc -= xon_off_tot;
7295 hwstats->mptc -= xon_off_tot;
7296 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
7297 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
7298 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
7299 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
7300 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
7301 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
7302 hwstats->ptc64 -= xon_off_tot;
7303 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
7304 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
7305 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
7306 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
7307 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
7308 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
7309
7310
7311 netdev->stats.multicast = hwstats->mprc;
7312
7313
7314 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
7315 netdev->stats.rx_dropped = 0;
7316 netdev->stats.rx_length_errors = hwstats->rlec;
7317 netdev->stats.rx_crc_errors = hwstats->crcerrs;
7318 netdev->stats.rx_missed_errors = total_mpc;
7319
7320
7321
7322
7323
7324 if (!test_bit(__IXGBE_RESETTING, &adapter->state)) {
7325 for (i = 0; i < adapter->num_vfs; i++) {
7326 UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i),
7327 adapter->vfinfo[i].last_vfstats.gprc,
7328 adapter->vfinfo[i].vfstats.gprc);
7329 UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i),
7330 adapter->vfinfo[i].last_vfstats.gptc,
7331 adapter->vfinfo[i].vfstats.gptc);
7332 UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i),
7333 IXGBE_PVFGORC_MSB(i),
7334 adapter->vfinfo[i].last_vfstats.gorc,
7335 adapter->vfinfo[i].vfstats.gorc);
7336 UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i),
7337 IXGBE_PVFGOTC_MSB(i),
7338 adapter->vfinfo[i].last_vfstats.gotc,
7339 adapter->vfinfo[i].vfstats.gotc);
7340 UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i),
7341 adapter->vfinfo[i].last_vfstats.mprc,
7342 adapter->vfinfo[i].vfstats.mprc);
7343 }
7344 }
7345 }
7346
7347
7348
7349
7350
7351 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
7352 {
7353 struct ixgbe_hw *hw = &adapter->hw;
7354 int i;
7355
7356 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7357 return;
7358
7359 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7360
7361
7362 if (test_bit(__IXGBE_DOWN, &adapter->state))
7363 return;
7364
7365
7366 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7367 return;
7368
7369 adapter->fdir_overflow++;
7370
7371 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7372 for (i = 0; i < adapter->num_tx_queues; i++)
7373 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7374 &(adapter->tx_ring[i]->state));
7375 for (i = 0; i < adapter->num_xdp_queues; i++)
7376 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7377 &adapter->xdp_ring[i]->state);
7378
7379 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7380 } else {
7381 e_err(probe, "failed to finish FDIR re-initialization, "
7382 "ignored adding FDIR ATR filters\n");
7383 }
7384 }
7385
7386
7387
7388
7389
7390
7391
7392
7393
7394
7395 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7396 {
7397 struct ixgbe_hw *hw = &adapter->hw;
7398 u64 eics = 0;
7399 int i;
7400
7401
7402 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7403 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7404 test_bit(__IXGBE_RESETTING, &adapter->state))
7405 return;
7406
7407
7408 if (netif_carrier_ok(adapter->netdev)) {
7409 for (i = 0; i < adapter->num_tx_queues; i++)
7410 set_check_for_tx_hang(adapter->tx_ring[i]);
7411 for (i = 0; i < adapter->num_xdp_queues; i++)
7412 set_check_for_tx_hang(adapter->xdp_ring[i]);
7413 }
7414
7415 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7416
7417
7418
7419
7420
7421 IXGBE_WRITE_REG(hw, IXGBE_EICS,
7422 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7423 } else {
7424
7425 for (i = 0; i < adapter->num_q_vectors; i++) {
7426 struct ixgbe_q_vector *qv = adapter->q_vector[i];
7427 if (qv->rx.ring || qv->tx.ring)
7428 eics |= BIT_ULL(i);
7429 }
7430 }
7431
7432
7433 ixgbe_irq_rearm_queues(adapter, eics);
7434 }
7435
7436
7437
7438
7439
7440 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7441 {
7442 struct ixgbe_hw *hw = &adapter->hw;
7443 u32 link_speed = adapter->link_speed;
7444 bool link_up = adapter->link_up;
7445 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7446
7447 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7448 return;
7449
7450 if (hw->mac.ops.check_link) {
7451 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7452 } else {
7453
7454 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7455 link_up = true;
7456 }
7457
7458 if (adapter->ixgbe_ieee_pfc)
7459 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7460
7461 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7462 hw->mac.ops.fc_enable(hw);
7463 ixgbe_set_rx_drop_en(adapter);
7464 }
7465
7466 if (link_up ||
7467 time_after(jiffies, (adapter->link_check_timeout +
7468 IXGBE_TRY_LINK_TIMEOUT))) {
7469 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7470 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7471 IXGBE_WRITE_FLUSH(hw);
7472 }
7473
7474 adapter->link_up = link_up;
7475 adapter->link_speed = link_speed;
7476 }
7477
7478 static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7479 {
7480 #ifdef CONFIG_IXGBE_DCB
7481 struct net_device *netdev = adapter->netdev;
7482 struct dcb_app app = {
7483 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7484 .protocol = 0,
7485 };
7486 u8 up = 0;
7487
7488 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7489 up = dcb_ieee_getapp_mask(netdev, &app);
7490
7491 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7492 #endif
7493 }
7494
7495
7496
7497
7498
7499
7500 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7501 {
7502 struct net_device *netdev = adapter->netdev;
7503 struct ixgbe_hw *hw = &adapter->hw;
7504 u32 link_speed = adapter->link_speed;
7505 const char *speed_str;
7506 bool flow_rx, flow_tx;
7507
7508
7509 if (netif_carrier_ok(netdev))
7510 return;
7511
7512 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7513
7514 switch (hw->mac.type) {
7515 case ixgbe_mac_82598EB: {
7516 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7517 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7518 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7519 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7520 }
7521 break;
7522 case ixgbe_mac_X540:
7523 case ixgbe_mac_X550:
7524 case ixgbe_mac_X550EM_x:
7525 case ixgbe_mac_x550em_a:
7526 case ixgbe_mac_82599EB: {
7527 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7528 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7529 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7530 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7531 }
7532 break;
7533 default:
7534 flow_tx = false;
7535 flow_rx = false;
7536 break;
7537 }
7538
7539 adapter->last_rx_ptp_check = jiffies;
7540
7541 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7542 ixgbe_ptp_start_cyclecounter(adapter);
7543
7544 switch (link_speed) {
7545 case IXGBE_LINK_SPEED_10GB_FULL:
7546 speed_str = "10 Gbps";
7547 break;
7548 case IXGBE_LINK_SPEED_5GB_FULL:
7549 speed_str = "5 Gbps";
7550 break;
7551 case IXGBE_LINK_SPEED_2_5GB_FULL:
7552 speed_str = "2.5 Gbps";
7553 break;
7554 case IXGBE_LINK_SPEED_1GB_FULL:
7555 speed_str = "1 Gbps";
7556 break;
7557 case IXGBE_LINK_SPEED_100_FULL:
7558 speed_str = "100 Mbps";
7559 break;
7560 case IXGBE_LINK_SPEED_10_FULL:
7561 speed_str = "10 Mbps";
7562 break;
7563 default:
7564 speed_str = "unknown speed";
7565 break;
7566 }
7567 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7568 ((flow_rx && flow_tx) ? "RX/TX" :
7569 (flow_rx ? "RX" :
7570 (flow_tx ? "TX" : "None"))));
7571
7572 netif_carrier_on(netdev);
7573 ixgbe_check_vf_rate_limit(adapter);
7574
7575
7576 netif_tx_wake_all_queues(adapter->netdev);
7577
7578
7579 ixgbe_update_default_up(adapter);
7580
7581
7582 ixgbe_ping_all_vfs(adapter);
7583 }
7584
7585
7586
7587
7588
7589
7590 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7591 {
7592 struct net_device *netdev = adapter->netdev;
7593 struct ixgbe_hw *hw = &adapter->hw;
7594
7595 adapter->link_up = false;
7596 adapter->link_speed = 0;
7597
7598
7599 if (!netif_carrier_ok(netdev))
7600 return;
7601
7602
7603 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7604 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7605
7606 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7607 ixgbe_ptp_start_cyclecounter(adapter);
7608
7609 e_info(drv, "NIC Link is Down\n");
7610 netif_carrier_off(netdev);
7611
7612
7613 ixgbe_ping_all_vfs(adapter);
7614 }
7615
7616 static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7617 {
7618 int i;
7619
7620 for (i = 0; i < adapter->num_tx_queues; i++) {
7621 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7622
7623 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7624 return true;
7625 }
7626
7627 for (i = 0; i < adapter->num_xdp_queues; i++) {
7628 struct ixgbe_ring *ring = adapter->xdp_ring[i];
7629
7630 if (ring->next_to_use != ring->next_to_clean)
7631 return true;
7632 }
7633
7634 return false;
7635 }
7636
7637 static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7638 {
7639 struct ixgbe_hw *hw = &adapter->hw;
7640 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7641 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7642
7643 int i, j;
7644
7645 if (!adapter->num_vfs)
7646 return false;
7647
7648
7649 if (hw->mac.type >= ixgbe_mac_X550)
7650 return false;
7651
7652 for (i = 0; i < adapter->num_vfs; i++) {
7653 for (j = 0; j < q_per_pool; j++) {
7654 u32 h, t;
7655
7656 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7657 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7658
7659 if (h != t)
7660 return true;
7661 }
7662 }
7663
7664 return false;
7665 }
7666
7667
7668
7669
7670
7671 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7672 {
7673 if (!netif_carrier_ok(adapter->netdev)) {
7674 if (ixgbe_ring_tx_pending(adapter) ||
7675 ixgbe_vf_tx_pending(adapter)) {
7676
7677
7678
7679
7680
7681 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7682 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7683 }
7684 }
7685 }
7686
7687 #ifdef CONFIG_PCI_IOV
7688 static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf)
7689 {
7690 struct ixgbe_hw *hw = &adapter->hw;
7691
7692 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
7693 adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) {
7694 adapter->vfinfo[vf].primary_abort_count++;
7695 if (adapter->vfinfo[vf].primary_abort_count ==
7696 IXGBE_PRIMARY_ABORT_LIMIT) {
7697 ixgbe_set_vf_link_state(adapter, vf,
7698 IFLA_VF_LINK_STATE_DISABLE);
7699 adapter->vfinfo[vf].primary_abort_count = 0;
7700
7701 e_info(drv,
7702 "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on",
7703 hw->bus.func, vf,
7704 adapter->vfinfo[vf].vf_mac_addresses);
7705 }
7706 }
7707 }
7708
7709 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7710 {
7711 struct ixgbe_hw *hw = &adapter->hw;
7712 struct pci_dev *pdev = adapter->pdev;
7713 unsigned int vf;
7714 u32 gpc;
7715
7716 if (!(netif_carrier_ok(adapter->netdev)))
7717 return;
7718
7719 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7720 if (gpc)
7721 return;
7722
7723
7724
7725
7726
7727
7728 if (!pdev)
7729 return;
7730
7731
7732 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7733 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7734 u16 status_reg;
7735
7736 if (!vfdev)
7737 continue;
7738 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7739 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7740 status_reg & PCI_STATUS_REC_MASTER_ABORT) {
7741 ixgbe_bad_vf_abort(adapter, vf);
7742 pcie_flr(vfdev);
7743 }
7744 }
7745 }
7746
7747 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7748 {
7749 u32 ssvpc;
7750
7751
7752 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7753 adapter->num_vfs == 0)
7754 return;
7755
7756 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7757
7758
7759
7760
7761
7762 if (!ssvpc)
7763 return;
7764
7765 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7766 }
7767 #else
7768 static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7769 {
7770 }
7771
7772 static void
7773 ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7774 {
7775 }
7776 #endif
7777
7778
7779
7780
7781
7782
7783 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7784 {
7785
7786 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7787 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7788 test_bit(__IXGBE_RESETTING, &adapter->state))
7789 return;
7790
7791 ixgbe_watchdog_update_link(adapter);
7792
7793 if (adapter->link_up)
7794 ixgbe_watchdog_link_is_up(adapter);
7795 else
7796 ixgbe_watchdog_link_is_down(adapter);
7797
7798 ixgbe_check_for_bad_vf(adapter);
7799 ixgbe_spoof_check(adapter);
7800 ixgbe_update_stats(adapter);
7801
7802 ixgbe_watchdog_flush_tx(adapter);
7803 }
7804
7805
7806
7807
7808
7809 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7810 {
7811 struct ixgbe_hw *hw = &adapter->hw;
7812 s32 err;
7813
7814
7815 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7816 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7817 return;
7818
7819 if (adapter->sfp_poll_time &&
7820 time_after(adapter->sfp_poll_time, jiffies))
7821 return;
7822
7823
7824 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7825 return;
7826
7827 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7828
7829 err = hw->phy.ops.identify_sfp(hw);
7830 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7831 goto sfp_out;
7832
7833 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7834
7835
7836 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7837 }
7838
7839
7840 if (err)
7841 goto sfp_out;
7842
7843
7844 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7845 goto sfp_out;
7846
7847 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7848
7849
7850
7851
7852
7853
7854 if (hw->mac.type == ixgbe_mac_82598EB)
7855 err = hw->phy.ops.reset(hw);
7856 else
7857 err = hw->mac.ops.setup_sfp(hw);
7858
7859 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7860 goto sfp_out;
7861
7862 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7863 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7864
7865 sfp_out:
7866 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7867
7868 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7869 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7870 e_dev_err("failed to initialize because an unsupported "
7871 "SFP+ module type was detected.\n");
7872 e_dev_err("Reload the driver after installing a "
7873 "supported module.\n");
7874 unregister_netdev(adapter->netdev);
7875 }
7876 }
7877
7878
7879
7880
7881
7882 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7883 {
7884 struct ixgbe_hw *hw = &adapter->hw;
7885 u32 cap_speed;
7886 u32 speed;
7887 bool autoneg = false;
7888
7889 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7890 return;
7891
7892
7893 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7894 return;
7895
7896 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7897
7898 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
7899
7900
7901 if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
7902 speed = IXGBE_LINK_SPEED_10GB_FULL;
7903 else
7904 speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
7905 IXGBE_LINK_SPEED_1GB_FULL);
7906
7907 if (hw->mac.ops.setup_link)
7908 hw->mac.ops.setup_link(hw, speed, true);
7909
7910 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7911 adapter->link_check_timeout = jiffies;
7912 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7913 }
7914
7915
7916
7917
7918
7919 static void ixgbe_service_timer(struct timer_list *t)
7920 {
7921 struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
7922 unsigned long next_event_offset;
7923
7924
7925 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7926 next_event_offset = HZ / 10;
7927 else
7928 next_event_offset = HZ * 2;
7929
7930
7931 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7932
7933 ixgbe_service_event_schedule(adapter);
7934 }
7935
7936 static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7937 {
7938 struct ixgbe_hw *hw = &adapter->hw;
7939 u32 status;
7940
7941 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7942 return;
7943
7944 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7945
7946 if (!hw->phy.ops.handle_lasi)
7947 return;
7948
7949 status = hw->phy.ops.handle_lasi(&adapter->hw);
7950 if (status != IXGBE_ERR_OVERTEMP)
7951 return;
7952
7953 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7954 }
7955
7956 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7957 {
7958 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7959 return;
7960
7961 rtnl_lock();
7962
7963 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7964 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7965 test_bit(__IXGBE_RESETTING, &adapter->state)) {
7966 rtnl_unlock();
7967 return;
7968 }
7969
7970 ixgbe_dump(adapter);
7971 netdev_err(adapter->netdev, "Reset adapter\n");
7972 adapter->tx_timeout_count++;
7973
7974 ixgbe_reinit_locked(adapter);
7975 rtnl_unlock();
7976 }
7977
7978
7979
7980
7981
7982
7983
7984 static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
7985 {
7986 struct ixgbe_hw *hw = &adapter->hw;
7987 u32 fwsm;
7988
7989
7990 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
7991
7992 if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
7993 !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
7994 e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
7995 fwsm);
7996
7997 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
7998 e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
7999 return true;
8000 }
8001
8002 return false;
8003 }
8004
8005
8006
8007
8008
8009 static void ixgbe_service_task(struct work_struct *work)
8010 {
8011 struct ixgbe_adapter *adapter = container_of(work,
8012 struct ixgbe_adapter,
8013 service_task);
8014 if (ixgbe_removed(adapter->hw.hw_addr)) {
8015 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
8016 rtnl_lock();
8017 ixgbe_down(adapter);
8018 rtnl_unlock();
8019 }
8020 ixgbe_service_event_complete(adapter);
8021 return;
8022 }
8023 if (ixgbe_check_fw_error(adapter)) {
8024 if (!test_bit(__IXGBE_DOWN, &adapter->state))
8025 unregister_netdev(adapter->netdev);
8026 ixgbe_service_event_complete(adapter);
8027 return;
8028 }
8029 ixgbe_reset_subtask(adapter);
8030 ixgbe_phy_interrupt_subtask(adapter);
8031 ixgbe_sfp_detection_subtask(adapter);
8032 ixgbe_sfp_link_config_subtask(adapter);
8033 ixgbe_check_overtemp_subtask(adapter);
8034 ixgbe_watchdog_subtask(adapter);
8035 ixgbe_fdir_reinit_subtask(adapter);
8036 ixgbe_check_hang_subtask(adapter);
8037
8038 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
8039 ixgbe_ptp_overflow_check(adapter);
8040 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
8041 ixgbe_ptp_rx_hang(adapter);
8042 ixgbe_ptp_tx_hang(adapter);
8043 }
8044
8045 ixgbe_service_event_complete(adapter);
8046 }
8047
8048 static int ixgbe_tso(struct ixgbe_ring *tx_ring,
8049 struct ixgbe_tx_buffer *first,
8050 u8 *hdr_len,
8051 struct ixgbe_ipsec_tx_data *itd)
8052 {
8053 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
8054 struct sk_buff *skb = first->skb;
8055 union {
8056 struct iphdr *v4;
8057 struct ipv6hdr *v6;
8058 unsigned char *hdr;
8059 } ip;
8060 union {
8061 struct tcphdr *tcp;
8062 struct udphdr *udp;
8063 unsigned char *hdr;
8064 } l4;
8065 u32 paylen, l4_offset;
8066 u32 fceof_saidx = 0;
8067 int err;
8068
8069 if (skb->ip_summed != CHECKSUM_PARTIAL)
8070 return 0;
8071
8072 if (!skb_is_gso(skb))
8073 return 0;
8074
8075 err = skb_cow_head(skb, 0);
8076 if (err < 0)
8077 return err;
8078
8079 if (eth_p_mpls(first->protocol))
8080 ip.hdr = skb_inner_network_header(skb);
8081 else
8082 ip.hdr = skb_network_header(skb);
8083 l4.hdr = skb_checksum_start(skb);
8084
8085
8086 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
8087 IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
8088
8089
8090 if (ip.v4->version == 4) {
8091 unsigned char *csum_start = skb_checksum_start(skb);
8092 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
8093 int len = csum_start - trans_start;
8094
8095
8096
8097
8098
8099 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
8100 csum_fold(csum_partial(trans_start,
8101 len, 0)) : 0;
8102 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
8103
8104 ip.v4->tot_len = 0;
8105 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8106 IXGBE_TX_FLAGS_CSUM |
8107 IXGBE_TX_FLAGS_IPV4;
8108 } else {
8109 ip.v6->payload_len = 0;
8110 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8111 IXGBE_TX_FLAGS_CSUM;
8112 }
8113
8114
8115 l4_offset = l4.hdr - skb->data;
8116
8117
8118 paylen = skb->len - l4_offset;
8119
8120 if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
8121
8122 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
8123 csum_replace_by_diff(&l4.tcp->check,
8124 (__force __wsum)htonl(paylen));
8125 } else {
8126
8127 *hdr_len = sizeof(*l4.udp) + l4_offset;
8128 csum_replace_by_diff(&l4.udp->check,
8129 (__force __wsum)htonl(paylen));
8130 }
8131
8132
8133 first->gso_segs = skb_shinfo(skb)->gso_segs;
8134 first->bytecount += (first->gso_segs - 1) * *hdr_len;
8135
8136
8137 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
8138 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
8139
8140 fceof_saidx |= itd->sa_idx;
8141 type_tucmd |= itd->flags | itd->trailer_len;
8142
8143
8144 vlan_macip_lens = l4.hdr - ip.hdr;
8145 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
8146 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8147
8148 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
8149 mss_l4len_idx);
8150
8151 return 1;
8152 }
8153
8154 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
8155 struct ixgbe_tx_buffer *first,
8156 struct ixgbe_ipsec_tx_data *itd)
8157 {
8158 struct sk_buff *skb = first->skb;
8159 u32 vlan_macip_lens = 0;
8160 u32 fceof_saidx = 0;
8161 u32 type_tucmd = 0;
8162
8163 if (skb->ip_summed != CHECKSUM_PARTIAL) {
8164 csum_failed:
8165 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
8166 IXGBE_TX_FLAGS_CC)))
8167 return;
8168 goto no_csum;
8169 }
8170
8171 switch (skb->csum_offset) {
8172 case offsetof(struct tcphdr, check):
8173 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
8174 fallthrough;
8175 case offsetof(struct udphdr, check):
8176 break;
8177 case offsetof(struct sctphdr, checksum):
8178
8179 if (skb_csum_is_sctp(skb)) {
8180 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
8181 break;
8182 }
8183 fallthrough;
8184 default:
8185 skb_checksum_help(skb);
8186 goto csum_failed;
8187 }
8188
8189
8190 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
8191 vlan_macip_lens = skb_checksum_start_offset(skb) -
8192 skb_network_offset(skb);
8193 no_csum:
8194
8195 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
8196 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8197
8198 fceof_saidx |= itd->sa_idx;
8199 type_tucmd |= itd->flags | itd->trailer_len;
8200
8201 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
8202 }
8203
8204 #define IXGBE_SET_FLAG(_input, _flag, _result) \
8205 ((_flag <= _result) ? \
8206 ((u32)(_input & _flag) * (_result / _flag)) : \
8207 ((u32)(_input & _flag) / (_flag / _result)))
8208
8209 static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
8210 {
8211
8212 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8213 IXGBE_ADVTXD_DCMD_DEXT |
8214 IXGBE_ADVTXD_DCMD_IFCS;
8215
8216
8217 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
8218 IXGBE_ADVTXD_DCMD_VLE);
8219
8220
8221 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
8222 IXGBE_ADVTXD_DCMD_TSE);
8223
8224
8225 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
8226 IXGBE_ADVTXD_MAC_TSTAMP);
8227
8228
8229 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
8230
8231 return cmd_type;
8232 }
8233
8234 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
8235 u32 tx_flags, unsigned int paylen)
8236 {
8237 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
8238
8239
8240 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8241 IXGBE_TX_FLAGS_CSUM,
8242 IXGBE_ADVTXD_POPTS_TXSM);
8243
8244
8245 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8246 IXGBE_TX_FLAGS_IPV4,
8247 IXGBE_ADVTXD_POPTS_IXSM);
8248
8249
8250 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8251 IXGBE_TX_FLAGS_IPSEC,
8252 IXGBE_ADVTXD_POPTS_IPSEC);
8253
8254
8255
8256
8257
8258 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8259 IXGBE_TX_FLAGS_CC,
8260 IXGBE_ADVTXD_CC);
8261
8262 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
8263 }
8264
8265 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8266 {
8267 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
8268
8269
8270
8271
8272
8273 smp_mb();
8274
8275
8276
8277
8278 if (likely(ixgbe_desc_unused(tx_ring) < size))
8279 return -EBUSY;
8280
8281
8282 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
8283 ++tx_ring->tx_stats.restart_queue;
8284 return 0;
8285 }
8286
8287 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8288 {
8289 if (likely(ixgbe_desc_unused(tx_ring) >= size))
8290 return 0;
8291
8292 return __ixgbe_maybe_stop_tx(tx_ring, size);
8293 }
8294
8295 static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8296 struct ixgbe_tx_buffer *first,
8297 const u8 hdr_len)
8298 {
8299 struct sk_buff *skb = first->skb;
8300 struct ixgbe_tx_buffer *tx_buffer;
8301 union ixgbe_adv_tx_desc *tx_desc;
8302 skb_frag_t *frag;
8303 dma_addr_t dma;
8304 unsigned int data_len, size;
8305 u32 tx_flags = first->tx_flags;
8306 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
8307 u16 i = tx_ring->next_to_use;
8308
8309 tx_desc = IXGBE_TX_DESC(tx_ring, i);
8310
8311 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
8312
8313 size = skb_headlen(skb);
8314 data_len = skb->data_len;
8315
8316 #ifdef IXGBE_FCOE
8317 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
8318 if (data_len < sizeof(struct fcoe_crc_eof)) {
8319 size -= sizeof(struct fcoe_crc_eof) - data_len;
8320 data_len = 0;
8321 } else {
8322 data_len -= sizeof(struct fcoe_crc_eof);
8323 }
8324 }
8325
8326 #endif
8327 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8328
8329 tx_buffer = first;
8330
8331 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8332 if (dma_mapping_error(tx_ring->dev, dma))
8333 goto dma_error;
8334
8335
8336 dma_unmap_len_set(tx_buffer, len, size);
8337 dma_unmap_addr_set(tx_buffer, dma, dma);
8338
8339 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8340
8341 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
8342 tx_desc->read.cmd_type_len =
8343 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
8344
8345 i++;
8346 tx_desc++;
8347 if (i == tx_ring->count) {
8348 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8349 i = 0;
8350 }
8351 tx_desc->read.olinfo_status = 0;
8352
8353 dma += IXGBE_MAX_DATA_PER_TXD;
8354 size -= IXGBE_MAX_DATA_PER_TXD;
8355
8356 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8357 }
8358
8359 if (likely(!data_len))
8360 break;
8361
8362 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8363
8364 i++;
8365 tx_desc++;
8366 if (i == tx_ring->count) {
8367 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8368 i = 0;
8369 }
8370 tx_desc->read.olinfo_status = 0;
8371
8372 #ifdef IXGBE_FCOE
8373 size = min_t(unsigned int, data_len, skb_frag_size(frag));
8374 #else
8375 size = skb_frag_size(frag);
8376 #endif
8377 data_len -= size;
8378
8379 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8380 DMA_TO_DEVICE);
8381
8382 tx_buffer = &tx_ring->tx_buffer_info[i];
8383 }
8384
8385
8386 cmd_type |= size | IXGBE_TXD_CMD;
8387 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8388
8389 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8390
8391
8392 first->time_stamp = jiffies;
8393
8394 skb_tx_timestamp(skb);
8395
8396
8397
8398
8399
8400
8401
8402
8403
8404 wmb();
8405
8406
8407 first->next_to_watch = tx_desc;
8408
8409 i++;
8410 if (i == tx_ring->count)
8411 i = 0;
8412
8413 tx_ring->next_to_use = i;
8414
8415 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8416
8417 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
8418 writel(i, tx_ring->tail);
8419 }
8420
8421 return 0;
8422 dma_error:
8423 dev_err(tx_ring->dev, "TX DMA map failed\n");
8424
8425
8426 for (;;) {
8427 tx_buffer = &tx_ring->tx_buffer_info[i];
8428 if (dma_unmap_len(tx_buffer, len))
8429 dma_unmap_page(tx_ring->dev,
8430 dma_unmap_addr(tx_buffer, dma),
8431 dma_unmap_len(tx_buffer, len),
8432 DMA_TO_DEVICE);
8433 dma_unmap_len_set(tx_buffer, len, 0);
8434 if (tx_buffer == first)
8435 break;
8436 if (i == 0)
8437 i += tx_ring->count;
8438 i--;
8439 }
8440
8441 dev_kfree_skb_any(first->skb);
8442 first->skb = NULL;
8443
8444 tx_ring->next_to_use = i;
8445
8446 return -1;
8447 }
8448
8449 static void ixgbe_atr(struct ixgbe_ring *ring,
8450 struct ixgbe_tx_buffer *first)
8451 {
8452 struct ixgbe_q_vector *q_vector = ring->q_vector;
8453 union ixgbe_atr_hash_dword input = { .dword = 0 };
8454 union ixgbe_atr_hash_dword common = { .dword = 0 };
8455 union {
8456 unsigned char *network;
8457 struct iphdr *ipv4;
8458 struct ipv6hdr *ipv6;
8459 } hdr;
8460 struct tcphdr *th;
8461 unsigned int hlen;
8462 struct sk_buff *skb;
8463 __be16 vlan_id;
8464 int l4_proto;
8465
8466
8467 if (!q_vector)
8468 return;
8469
8470
8471 if (!ring->atr_sample_rate)
8472 return;
8473
8474 ring->atr_count++;
8475
8476
8477 if ((first->protocol != htons(ETH_P_IP)) &&
8478 (first->protocol != htons(ETH_P_IPV6)))
8479 return;
8480
8481
8482 skb = first->skb;
8483 hdr.network = skb_network_header(skb);
8484 if (unlikely(hdr.network <= skb->data))
8485 return;
8486 if (skb->encapsulation &&
8487 first->protocol == htons(ETH_P_IP) &&
8488 hdr.ipv4->protocol == IPPROTO_UDP) {
8489 struct ixgbe_adapter *adapter = q_vector->adapter;
8490
8491 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8492 VXLAN_HEADROOM))
8493 return;
8494
8495
8496 if (adapter->vxlan_port &&
8497 udp_hdr(skb)->dest == adapter->vxlan_port)
8498 hdr.network = skb_inner_network_header(skb);
8499
8500 if (adapter->geneve_port &&
8501 udp_hdr(skb)->dest == adapter->geneve_port)
8502 hdr.network = skb_inner_network_header(skb);
8503 }
8504
8505
8506
8507
8508 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8509 return;
8510
8511
8512 switch (hdr.ipv4->version) {
8513 case IPVERSION:
8514
8515 hlen = (hdr.network[0] & 0x0F) << 2;
8516 l4_proto = hdr.ipv4->protocol;
8517 break;
8518 case 6:
8519 hlen = hdr.network - skb->data;
8520 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8521 hlen -= hdr.network - skb->data;
8522 break;
8523 default:
8524 return;
8525 }
8526
8527 if (l4_proto != IPPROTO_TCP)
8528 return;
8529
8530 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8531 hlen + sizeof(struct tcphdr)))
8532 return;
8533
8534 th = (struct tcphdr *)(hdr.network + hlen);
8535
8536
8537 if (th->fin)
8538 return;
8539
8540
8541 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8542 return;
8543
8544
8545 ring->atr_count = 0;
8546
8547 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8548
8549
8550
8551
8552
8553
8554
8555
8556 input.formatted.vlan_id = vlan_id;
8557
8558
8559
8560
8561
8562 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8563 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8564 else
8565 common.port.src ^= th->dest ^ first->protocol;
8566 common.port.dst ^= th->source;
8567
8568 switch (hdr.ipv4->version) {
8569 case IPVERSION:
8570 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8571 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8572 break;
8573 case 6:
8574 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8575 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8576 hdr.ipv6->saddr.s6_addr32[1] ^
8577 hdr.ipv6->saddr.s6_addr32[2] ^
8578 hdr.ipv6->saddr.s6_addr32[3] ^
8579 hdr.ipv6->daddr.s6_addr32[0] ^
8580 hdr.ipv6->daddr.s6_addr32[1] ^
8581 hdr.ipv6->daddr.s6_addr32[2] ^
8582 hdr.ipv6->daddr.s6_addr32[3];
8583 break;
8584 default:
8585 break;
8586 }
8587
8588 if (hdr.network != skb_network_header(skb))
8589 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8590
8591
8592 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8593 input, common, ring->queue_index);
8594 }
8595
8596 #ifdef IXGBE_FCOE
8597 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8598 struct net_device *sb_dev)
8599 {
8600 struct ixgbe_adapter *adapter;
8601 struct ixgbe_ring_feature *f;
8602 int txq;
8603
8604 if (sb_dev) {
8605 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
8606 struct net_device *vdev = sb_dev;
8607
8608 txq = vdev->tc_to_txq[tc].offset;
8609 txq += reciprocal_scale(skb_get_hash(skb),
8610 vdev->tc_to_txq[tc].count);
8611
8612 return txq;
8613 }
8614
8615
8616
8617
8618
8619 switch (vlan_get_protocol(skb)) {
8620 case htons(ETH_P_FCOE):
8621 case htons(ETH_P_FIP):
8622 adapter = netdev_priv(dev);
8623
8624 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8625 break;
8626 fallthrough;
8627 default:
8628 return netdev_pick_tx(dev, skb, sb_dev);
8629 }
8630
8631 f = &adapter->ring_feature[RING_F_FCOE];
8632
8633 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8634 smp_processor_id();
8635
8636 while (txq >= f->indices)
8637 txq -= f->indices;
8638
8639 return txq + f->offset;
8640 }
8641
8642 #endif
8643 int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
8644 struct xdp_frame *xdpf)
8645 {
8646 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
8647 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
8648 u16 i = 0, index = ring->next_to_use;
8649 struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index];
8650 struct ixgbe_tx_buffer *tx_buff = tx_head;
8651 union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index);
8652 u32 cmd_type, len = xdpf->len;
8653 void *data = xdpf->data;
8654
8655 if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags))
8656 return IXGBE_XDP_CONSUMED;
8657
8658 tx_head->bytecount = xdp_get_frame_len(xdpf);
8659 tx_head->gso_segs = 1;
8660 tx_head->xdpf = xdpf;
8661
8662 tx_desc->read.olinfo_status =
8663 cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT);
8664
8665 for (;;) {
8666 dma_addr_t dma;
8667
8668 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
8669 if (dma_mapping_error(ring->dev, dma))
8670 goto unmap;
8671
8672 dma_unmap_len_set(tx_buff, len, len);
8673 dma_unmap_addr_set(tx_buff, dma, dma);
8674
8675 cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT |
8676 IXGBE_ADVTXD_DCMD_IFCS | len;
8677 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8678 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8679 tx_buff->protocol = 0;
8680
8681 if (++index == ring->count)
8682 index = 0;
8683
8684 if (i == nr_frags)
8685 break;
8686
8687 tx_buff = &ring->tx_buffer_info[index];
8688 tx_desc = IXGBE_TX_DESC(ring, index);
8689 tx_desc->read.olinfo_status = 0;
8690
8691 data = skb_frag_address(&sinfo->frags[i]);
8692 len = skb_frag_size(&sinfo->frags[i]);
8693 i++;
8694 }
8695
8696 tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
8697
8698
8699 smp_wmb();
8700
8701 tx_head->next_to_watch = tx_desc;
8702 ring->next_to_use = index;
8703
8704 return IXGBE_XDP_TX;
8705
8706 unmap:
8707 for (;;) {
8708 tx_buff = &ring->tx_buffer_info[index];
8709 if (dma_unmap_len(tx_buff, len))
8710 dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma),
8711 dma_unmap_len(tx_buff, len),
8712 DMA_TO_DEVICE);
8713 dma_unmap_len_set(tx_buff, len, 0);
8714 if (tx_buff == tx_head)
8715 break;
8716
8717 if (!index)
8718 index += ring->count;
8719 index--;
8720 }
8721
8722 return IXGBE_XDP_CONSUMED;
8723 }
8724
8725 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8726 struct ixgbe_adapter *adapter,
8727 struct ixgbe_ring *tx_ring)
8728 {
8729 struct ixgbe_tx_buffer *first;
8730 int tso;
8731 u32 tx_flags = 0;
8732 unsigned short f;
8733 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8734 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8735 __be16 protocol = skb->protocol;
8736 u8 hdr_len = 0;
8737
8738
8739
8740
8741
8742
8743
8744
8745 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8746 count += TXD_USE_COUNT(skb_frag_size(
8747 &skb_shinfo(skb)->frags[f]));
8748
8749 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8750 tx_ring->tx_stats.tx_busy++;
8751 return NETDEV_TX_BUSY;
8752 }
8753
8754
8755 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8756 first->skb = skb;
8757 first->bytecount = skb->len;
8758 first->gso_segs = 1;
8759
8760
8761 if (skb_vlan_tag_present(skb)) {
8762 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8763 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8764
8765 } else if (protocol == htons(ETH_P_8021Q)) {
8766 struct vlan_hdr *vhdr, _vhdr;
8767 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8768 if (!vhdr)
8769 goto out_drop;
8770
8771 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8772 IXGBE_TX_FLAGS_VLAN_SHIFT;
8773 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8774 }
8775 protocol = vlan_get_protocol(skb);
8776
8777 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8778 adapter->ptp_clock) {
8779 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
8780 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8781 &adapter->state)) {
8782 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8783 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8784
8785
8786 adapter->ptp_tx_skb = skb_get(skb);
8787 adapter->ptp_tx_start = jiffies;
8788 schedule_work(&adapter->ptp_tx_work);
8789 } else {
8790 adapter->tx_hwtstamp_skipped++;
8791 }
8792 }
8793
8794 #ifdef CONFIG_PCI_IOV
8795
8796
8797
8798
8799 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8800 tx_flags |= IXGBE_TX_FLAGS_CC;
8801
8802 #endif
8803
8804 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8805 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8806 (skb->priority != TC_PRIO_CONTROL))) {
8807 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8808 tx_flags |= (skb->priority & 0x7) <<
8809 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8810 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8811 struct vlan_ethhdr *vhdr;
8812
8813 if (skb_cow_head(skb, 0))
8814 goto out_drop;
8815 vhdr = (struct vlan_ethhdr *)skb->data;
8816 vhdr->h_vlan_TCI = htons(tx_flags >>
8817 IXGBE_TX_FLAGS_VLAN_SHIFT);
8818 } else {
8819 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8820 }
8821 }
8822
8823
8824 first->tx_flags = tx_flags;
8825 first->protocol = protocol;
8826
8827 #ifdef IXGBE_FCOE
8828
8829 if ((protocol == htons(ETH_P_FCOE)) &&
8830 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8831 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8832 if (tso < 0)
8833 goto out_drop;
8834
8835 goto xmit_fcoe;
8836 }
8837
8838 #endif
8839
8840 #ifdef CONFIG_IXGBE_IPSEC
8841 if (xfrm_offload(skb) &&
8842 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8843 goto out_drop;
8844 #endif
8845 tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
8846 if (tso < 0)
8847 goto out_drop;
8848 else if (!tso)
8849 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8850
8851
8852 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8853 ixgbe_atr(tx_ring, first);
8854
8855 #ifdef IXGBE_FCOE
8856 xmit_fcoe:
8857 #endif
8858 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8859 goto cleanup_tx_timestamp;
8860
8861 return NETDEV_TX_OK;
8862
8863 out_drop:
8864 dev_kfree_skb_any(first->skb);
8865 first->skb = NULL;
8866 cleanup_tx_timestamp:
8867 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8868 dev_kfree_skb_any(adapter->ptp_tx_skb);
8869 adapter->ptp_tx_skb = NULL;
8870 cancel_work_sync(&adapter->ptp_tx_work);
8871 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8872 }
8873
8874 return NETDEV_TX_OK;
8875 }
8876
8877 static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8878 struct net_device *netdev,
8879 struct ixgbe_ring *ring)
8880 {
8881 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8882 struct ixgbe_ring *tx_ring;
8883
8884
8885
8886
8887
8888 if (skb_put_padto(skb, 17))
8889 return NETDEV_TX_OK;
8890
8891 tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
8892 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
8893 return NETDEV_TX_BUSY;
8894
8895 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8896 }
8897
8898 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8899 struct net_device *netdev)
8900 {
8901 return __ixgbe_xmit_frame(skb, netdev, NULL);
8902 }
8903
8904
8905
8906
8907
8908
8909
8910
8911 static int ixgbe_set_mac(struct net_device *netdev, void *p)
8912 {
8913 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8914 struct ixgbe_hw *hw = &adapter->hw;
8915 struct sockaddr *addr = p;
8916
8917 if (!is_valid_ether_addr(addr->sa_data))
8918 return -EADDRNOTAVAIL;
8919
8920 eth_hw_addr_set(netdev, addr->sa_data);
8921 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8922
8923 ixgbe_mac_set_default_filter(adapter);
8924
8925 return 0;
8926 }
8927
8928 static int
8929 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8930 {
8931 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8932 struct ixgbe_hw *hw = &adapter->hw;
8933 u16 value;
8934 int rc;
8935
8936 if (adapter->mii_bus) {
8937 int regnum = addr;
8938
8939 if (devad != MDIO_DEVAD_NONE)
8940 regnum |= (devad << 16) | MII_ADDR_C45;
8941
8942 return mdiobus_read(adapter->mii_bus, prtad, regnum);
8943 }
8944
8945 if (prtad != hw->phy.mdio.prtad)
8946 return -EINVAL;
8947 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8948 if (!rc)
8949 rc = value;
8950 return rc;
8951 }
8952
8953 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8954 u16 addr, u16 value)
8955 {
8956 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8957 struct ixgbe_hw *hw = &adapter->hw;
8958
8959 if (adapter->mii_bus) {
8960 int regnum = addr;
8961
8962 if (devad != MDIO_DEVAD_NONE)
8963 regnum |= (devad << 16) | MII_ADDR_C45;
8964
8965 return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
8966 }
8967
8968 if (prtad != hw->phy.mdio.prtad)
8969 return -EINVAL;
8970 return hw->phy.ops.write_reg(hw, addr, devad, value);
8971 }
8972
8973 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8974 {
8975 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8976
8977 switch (cmd) {
8978 case SIOCSHWTSTAMP:
8979 return ixgbe_ptp_set_ts_config(adapter, req);
8980 case SIOCGHWTSTAMP:
8981 return ixgbe_ptp_get_ts_config(adapter, req);
8982 case SIOCGMIIPHY:
8983 if (!adapter->hw.phy.ops.read_reg)
8984 return -EOPNOTSUPP;
8985 fallthrough;
8986 default:
8987 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8988 }
8989 }
8990
8991
8992
8993
8994
8995
8996
8997
8998 static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8999 {
9000 int err = 0;
9001 struct ixgbe_adapter *adapter = netdev_priv(dev);
9002 struct ixgbe_hw *hw = &adapter->hw;
9003
9004 if (is_valid_ether_addr(hw->mac.san_addr)) {
9005 rtnl_lock();
9006 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
9007 rtnl_unlock();
9008
9009
9010 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
9011 }
9012 return err;
9013 }
9014
9015
9016
9017
9018
9019
9020
9021
9022 static int ixgbe_del_sanmac_netdev(struct net_device *dev)
9023 {
9024 int err = 0;
9025 struct ixgbe_adapter *adapter = netdev_priv(dev);
9026 struct ixgbe_mac_info *mac = &adapter->hw.mac;
9027
9028 if (is_valid_ether_addr(mac->san_addr)) {
9029 rtnl_lock();
9030 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
9031 rtnl_unlock();
9032 }
9033 return err;
9034 }
9035
9036 static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
9037 struct ixgbe_ring *ring)
9038 {
9039 u64 bytes, packets;
9040 unsigned int start;
9041
9042 if (ring) {
9043 do {
9044 start = u64_stats_fetch_begin_irq(&ring->syncp);
9045 packets = ring->stats.packets;
9046 bytes = ring->stats.bytes;
9047 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
9048 stats->tx_packets += packets;
9049 stats->tx_bytes += bytes;
9050 }
9051 }
9052
9053 static void ixgbe_get_stats64(struct net_device *netdev,
9054 struct rtnl_link_stats64 *stats)
9055 {
9056 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9057 int i;
9058
9059 rcu_read_lock();
9060 for (i = 0; i < adapter->num_rx_queues; i++) {
9061 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
9062 u64 bytes, packets;
9063 unsigned int start;
9064
9065 if (ring) {
9066 do {
9067 start = u64_stats_fetch_begin_irq(&ring->syncp);
9068 packets = ring->stats.packets;
9069 bytes = ring->stats.bytes;
9070 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
9071 stats->rx_packets += packets;
9072 stats->rx_bytes += bytes;
9073 }
9074 }
9075
9076 for (i = 0; i < adapter->num_tx_queues; i++) {
9077 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
9078
9079 ixgbe_get_ring_stats64(stats, ring);
9080 }
9081 for (i = 0; i < adapter->num_xdp_queues; i++) {
9082 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
9083
9084 ixgbe_get_ring_stats64(stats, ring);
9085 }
9086 rcu_read_unlock();
9087
9088
9089 stats->multicast = netdev->stats.multicast;
9090 stats->rx_errors = netdev->stats.rx_errors;
9091 stats->rx_length_errors = netdev->stats.rx_length_errors;
9092 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
9093 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
9094 }
9095
9096 static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf,
9097 struct ifla_vf_stats *vf_stats)
9098 {
9099 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9100
9101 if (vf < 0 || vf >= adapter->num_vfs)
9102 return -EINVAL;
9103
9104 vf_stats->rx_packets = adapter->vfinfo[vf].vfstats.gprc;
9105 vf_stats->rx_bytes = adapter->vfinfo[vf].vfstats.gorc;
9106 vf_stats->tx_packets = adapter->vfinfo[vf].vfstats.gptc;
9107 vf_stats->tx_bytes = adapter->vfinfo[vf].vfstats.gotc;
9108 vf_stats->multicast = adapter->vfinfo[vf].vfstats.mprc;
9109
9110 return 0;
9111 }
9112
9113 #ifdef CONFIG_IXGBE_DCB
9114
9115
9116
9117
9118
9119
9120
9121
9122 static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
9123 {
9124 struct ixgbe_hw *hw = &adapter->hw;
9125 u32 reg, rsave;
9126 int i;
9127
9128
9129
9130
9131 if (hw->mac.type == ixgbe_mac_82598EB)
9132 return;
9133
9134 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
9135 rsave = reg;
9136
9137 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
9138 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
9139
9140
9141 if (up2tc > tc)
9142 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
9143 }
9144
9145 if (reg != rsave)
9146 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
9147
9148 return;
9149 }
9150
9151
9152
9153
9154
9155
9156
9157 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
9158 {
9159 struct net_device *dev = adapter->netdev;
9160 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
9161 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
9162 u8 prio;
9163
9164 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
9165 u8 tc = 0;
9166
9167 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
9168 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
9169 else if (ets)
9170 tc = ets->prio_tc[prio];
9171
9172 netdev_set_prio_tc_map(dev, prio, tc);
9173 }
9174 }
9175
9176 #endif
9177 static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
9178 struct netdev_nested_priv *priv)
9179 {
9180 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
9181 struct ixgbe_fwd_adapter *accel;
9182 int pool;
9183
9184
9185 if (!netif_is_macvlan(vdev))
9186 return 0;
9187
9188
9189 accel = macvlan_accel_priv(vdev);
9190 if (!accel)
9191 return 0;
9192
9193
9194 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9195 if (pool < adapter->num_rx_pools) {
9196 set_bit(pool, adapter->fwd_bitmask);
9197 accel->pool = pool;
9198 return 0;
9199 }
9200
9201
9202 netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
9203 macvlan_release_l2fw_offload(vdev);
9204
9205
9206 netdev_unbind_sb_channel(adapter->netdev, vdev);
9207 netdev_set_sb_channel(vdev, 0);
9208
9209 kfree(accel);
9210
9211 return 0;
9212 }
9213
9214 static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
9215 {
9216 struct ixgbe_adapter *adapter = netdev_priv(dev);
9217 struct netdev_nested_priv priv = {
9218 .data = (void *)adapter,
9219 };
9220
9221
9222 bitmap_clear(adapter->fwd_bitmask, 1, 63);
9223
9224
9225 netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
9226 &priv);
9227 }
9228
9229
9230
9231
9232
9233
9234
9235 int ixgbe_setup_tc(struct net_device *dev, u8 tc)
9236 {
9237 struct ixgbe_adapter *adapter = netdev_priv(dev);
9238 struct ixgbe_hw *hw = &adapter->hw;
9239
9240
9241 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
9242 return -EINVAL;
9243
9244 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
9245 return -EINVAL;
9246
9247
9248
9249
9250
9251 if (netif_running(dev))
9252 ixgbe_close(dev);
9253 else
9254 ixgbe_reset(adapter);
9255
9256 ixgbe_clear_interrupt_scheme(adapter);
9257
9258 #ifdef CONFIG_IXGBE_DCB
9259 if (tc) {
9260 if (adapter->xdp_prog) {
9261 e_warn(probe, "DCB is not supported with XDP\n");
9262
9263 ixgbe_init_interrupt_scheme(adapter);
9264 if (netif_running(dev))
9265 ixgbe_open(dev);
9266 return -EINVAL;
9267 }
9268
9269 netdev_set_num_tc(dev, tc);
9270 ixgbe_set_prio_tc_map(adapter);
9271
9272 adapter->hw_tcs = tc;
9273 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
9274
9275 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
9276 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
9277 adapter->hw.fc.requested_mode = ixgbe_fc_none;
9278 }
9279 } else {
9280 netdev_reset_tc(dev);
9281
9282 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
9283 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
9284
9285 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
9286 adapter->hw_tcs = tc;
9287
9288 adapter->temp_dcb_cfg.pfc_mode_enable = false;
9289 adapter->dcb_cfg.pfc_mode_enable = false;
9290 }
9291
9292 ixgbe_validate_rtr(adapter, tc);
9293
9294 #endif
9295 ixgbe_init_interrupt_scheme(adapter);
9296
9297 ixgbe_defrag_macvlan_pools(dev);
9298
9299 if (netif_running(dev))
9300 return ixgbe_open(dev);
9301
9302 return 0;
9303 }
9304
9305 static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
9306 struct tc_cls_u32_offload *cls)
9307 {
9308 u32 hdl = cls->knode.handle;
9309 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
9310 u32 loc = cls->knode.handle & 0xfffff;
9311 int err = 0, i, j;
9312 struct ixgbe_jump_table *jump = NULL;
9313
9314 if (loc > IXGBE_MAX_HW_ENTRIES)
9315 return -EINVAL;
9316
9317 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
9318 return -EINVAL;
9319
9320
9321 if (uhtid != 0x800) {
9322 jump = adapter->jump_tables[uhtid];
9323 if (!jump)
9324 return -EINVAL;
9325 if (!test_bit(loc - 1, jump->child_loc_map))
9326 return -EINVAL;
9327 clear_bit(loc - 1, jump->child_loc_map);
9328 }
9329
9330
9331 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9332 jump = adapter->jump_tables[i];
9333 if (jump && jump->link_hdl == hdl) {
9334
9335
9336
9337 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
9338 if (!test_bit(j, jump->child_loc_map))
9339 continue;
9340 spin_lock(&adapter->fdir_perfect_lock);
9341 err = ixgbe_update_ethtool_fdir_entry(adapter,
9342 NULL,
9343 j + 1);
9344 spin_unlock(&adapter->fdir_perfect_lock);
9345 clear_bit(j, jump->child_loc_map);
9346 }
9347
9348 kfree(jump->input);
9349 kfree(jump->mask);
9350 kfree(jump);
9351 adapter->jump_tables[i] = NULL;
9352 return err;
9353 }
9354 }
9355
9356 spin_lock(&adapter->fdir_perfect_lock);
9357 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
9358 spin_unlock(&adapter->fdir_perfect_lock);
9359 return err;
9360 }
9361
9362 static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
9363 struct tc_cls_u32_offload *cls)
9364 {
9365 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9366
9367 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9368 return -EINVAL;
9369
9370
9371
9372
9373 if (cls->hnode.divisor > 0)
9374 return -EINVAL;
9375
9376 set_bit(uhtid - 1, &adapter->tables);
9377 return 0;
9378 }
9379
9380 static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
9381 struct tc_cls_u32_offload *cls)
9382 {
9383 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9384
9385 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9386 return -EINVAL;
9387
9388 clear_bit(uhtid - 1, &adapter->tables);
9389 return 0;
9390 }
9391
9392 #ifdef CONFIG_NET_CLS_ACT
9393 struct upper_walk_data {
9394 struct ixgbe_adapter *adapter;
9395 u64 action;
9396 int ifindex;
9397 u8 queue;
9398 };
9399
9400 static int get_macvlan_queue(struct net_device *upper,
9401 struct netdev_nested_priv *priv)
9402 {
9403 if (netif_is_macvlan(upper)) {
9404 struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
9405 struct ixgbe_adapter *adapter;
9406 struct upper_walk_data *data;
9407 int ifindex;
9408
9409 data = (struct upper_walk_data *)priv->data;
9410 ifindex = data->ifindex;
9411 adapter = data->adapter;
9412 if (vadapter && upper->ifindex == ifindex) {
9413 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
9414 data->action = data->queue;
9415 return 1;
9416 }
9417 }
9418
9419 return 0;
9420 }
9421
9422 static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
9423 u8 *queue, u64 *action)
9424 {
9425 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
9426 unsigned int num_vfs = adapter->num_vfs, vf;
9427 struct netdev_nested_priv priv;
9428 struct upper_walk_data data;
9429 struct net_device *upper;
9430
9431
9432 for (vf = 0; vf < num_vfs; ++vf) {
9433 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
9434 if (upper->ifindex == ifindex) {
9435 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
9436 *action = vf + 1;
9437 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
9438 return 0;
9439 }
9440 }
9441
9442
9443 data.adapter = adapter;
9444 data.ifindex = ifindex;
9445 data.action = 0;
9446 data.queue = 0;
9447 priv.data = (void *)&data;
9448 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9449 get_macvlan_queue, &priv)) {
9450 *action = data.action;
9451 *queue = data.queue;
9452
9453 return 0;
9454 }
9455
9456 return -EINVAL;
9457 }
9458
9459 static int parse_tc_actions(struct ixgbe_adapter *adapter,
9460 struct tcf_exts *exts, u64 *action, u8 *queue)
9461 {
9462 const struct tc_action *a;
9463 int i;
9464
9465 if (!tcf_exts_has_actions(exts))
9466 return -EINVAL;
9467
9468 tcf_exts_for_each_action(i, a, exts) {
9469
9470 if (is_tcf_gact_shot(a)) {
9471 *action = IXGBE_FDIR_DROP_QUEUE;
9472 *queue = IXGBE_FDIR_DROP_QUEUE;
9473 return 0;
9474 }
9475
9476
9477 if (is_tcf_mirred_egress_redirect(a)) {
9478 struct net_device *dev = tcf_mirred_dev(a);
9479
9480 if (!dev)
9481 return -EINVAL;
9482 return handle_redirect_action(adapter, dev->ifindex,
9483 queue, action);
9484 }
9485
9486 return -EINVAL;
9487 }
9488
9489 return -EINVAL;
9490 }
9491 #else
9492 static int parse_tc_actions(struct ixgbe_adapter *adapter,
9493 struct tcf_exts *exts, u64 *action, u8 *queue)
9494 {
9495 return -EINVAL;
9496 }
9497 #endif
9498
9499 static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
9500 union ixgbe_atr_input *mask,
9501 struct tc_cls_u32_offload *cls,
9502 struct ixgbe_mat_field *field_ptr,
9503 struct ixgbe_nexthdr *nexthdr)
9504 {
9505 int i, j, off;
9506 __be32 val, m;
9507 bool found_entry = false, found_jump_field = false;
9508
9509 for (i = 0; i < cls->knode.sel->nkeys; i++) {
9510 off = cls->knode.sel->keys[i].off;
9511 val = cls->knode.sel->keys[i].val;
9512 m = cls->knode.sel->keys[i].mask;
9513
9514 for (j = 0; field_ptr[j].val; j++) {
9515 if (field_ptr[j].off == off) {
9516 field_ptr[j].val(input, mask, (__force u32)val,
9517 (__force u32)m);
9518 input->filter.formatted.flow_type |=
9519 field_ptr[j].type;
9520 found_entry = true;
9521 break;
9522 }
9523 }
9524 if (nexthdr) {
9525 if (nexthdr->off == cls->knode.sel->keys[i].off &&
9526 nexthdr->val ==
9527 (__force u32)cls->knode.sel->keys[i].val &&
9528 nexthdr->mask ==
9529 (__force u32)cls->knode.sel->keys[i].mask)
9530 found_jump_field = true;
9531 else
9532 continue;
9533 }
9534 }
9535
9536 if (nexthdr && !found_jump_field)
9537 return -EINVAL;
9538
9539 if (!found_entry)
9540 return 0;
9541
9542 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9543 IXGBE_ATR_L4TYPE_MASK;
9544
9545 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9546 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9547
9548 return 0;
9549 }
9550
9551 static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9552 struct tc_cls_u32_offload *cls)
9553 {
9554 __be16 protocol = cls->common.protocol;
9555 u32 loc = cls->knode.handle & 0xfffff;
9556 struct ixgbe_hw *hw = &adapter->hw;
9557 struct ixgbe_mat_field *field_ptr;
9558 struct ixgbe_fdir_filter *input = NULL;
9559 union ixgbe_atr_input *mask = NULL;
9560 struct ixgbe_jump_table *jump = NULL;
9561 int i, err = -EINVAL;
9562 u8 queue;
9563 u32 uhtid, link_uhtid;
9564
9565 uhtid = TC_U32_USERHTID(cls->knode.handle);
9566 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9567
9568
9569
9570
9571
9572
9573
9574
9575 if (protocol != htons(ETH_P_IP))
9576 return err;
9577
9578 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9579 e_err(drv, "Location out of range\n");
9580 return err;
9581 }
9582
9583
9584
9585
9586
9587
9588
9589
9590 if (uhtid == 0x800) {
9591 field_ptr = (adapter->jump_tables[0])->mat;
9592 } else {
9593 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9594 return err;
9595 if (!adapter->jump_tables[uhtid])
9596 return err;
9597 field_ptr = (adapter->jump_tables[uhtid])->mat;
9598 }
9599
9600 if (!field_ptr)
9601 return err;
9602
9603
9604
9605
9606
9607
9608
9609 if (link_uhtid) {
9610 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9611
9612 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9613 return err;
9614
9615 if (!test_bit(link_uhtid - 1, &adapter->tables))
9616 return err;
9617
9618
9619
9620
9621
9622
9623 if (adapter->jump_tables[link_uhtid] &&
9624 (adapter->jump_tables[link_uhtid])->link_hdl) {
9625 e_err(drv, "Link filter exists for link: %x\n",
9626 link_uhtid);
9627 return err;
9628 }
9629
9630 for (i = 0; nexthdr[i].jump; i++) {
9631 if (nexthdr[i].o != cls->knode.sel->offoff ||
9632 nexthdr[i].s != cls->knode.sel->offshift ||
9633 nexthdr[i].m !=
9634 (__force u32)cls->knode.sel->offmask)
9635 return err;
9636
9637 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9638 if (!jump)
9639 return -ENOMEM;
9640 input = kzalloc(sizeof(*input), GFP_KERNEL);
9641 if (!input) {
9642 err = -ENOMEM;
9643 goto free_jump;
9644 }
9645 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9646 if (!mask) {
9647 err = -ENOMEM;
9648 goto free_input;
9649 }
9650 jump->input = input;
9651 jump->mask = mask;
9652 jump->link_hdl = cls->knode.handle;
9653
9654 err = ixgbe_clsu32_build_input(input, mask, cls,
9655 field_ptr, &nexthdr[i]);
9656 if (!err) {
9657 jump->mat = nexthdr[i].jump;
9658 adapter->jump_tables[link_uhtid] = jump;
9659 break;
9660 } else {
9661 kfree(mask);
9662 kfree(input);
9663 kfree(jump);
9664 }
9665 }
9666 return 0;
9667 }
9668
9669 input = kzalloc(sizeof(*input), GFP_KERNEL);
9670 if (!input)
9671 return -ENOMEM;
9672 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9673 if (!mask) {
9674 err = -ENOMEM;
9675 goto free_input;
9676 }
9677
9678 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9679 if ((adapter->jump_tables[uhtid])->input)
9680 memcpy(input, (adapter->jump_tables[uhtid])->input,
9681 sizeof(*input));
9682 if ((adapter->jump_tables[uhtid])->mask)
9683 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9684 sizeof(*mask));
9685
9686
9687
9688
9689 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9690 struct ixgbe_jump_table *link = adapter->jump_tables[i];
9691
9692 if (link && (test_bit(loc - 1, link->child_loc_map))) {
9693 e_err(drv, "Filter exists in location: %x\n",
9694 loc);
9695 err = -EINVAL;
9696 goto err_out;
9697 }
9698 }
9699 }
9700 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9701 if (err)
9702 goto err_out;
9703
9704 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9705 &queue);
9706 if (err < 0)
9707 goto err_out;
9708
9709 input->sw_idx = loc;
9710
9711 spin_lock(&adapter->fdir_perfect_lock);
9712
9713 if (hlist_empty(&adapter->fdir_filter_list)) {
9714 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9715 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9716 if (err)
9717 goto err_out_w_lock;
9718 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9719 err = -EINVAL;
9720 goto err_out_w_lock;
9721 }
9722
9723 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9724 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9725 input->sw_idx, queue);
9726 if (err)
9727 goto err_out_w_lock;
9728
9729 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9730 spin_unlock(&adapter->fdir_perfect_lock);
9731
9732 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9733 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9734
9735 kfree(mask);
9736 return err;
9737 err_out_w_lock:
9738 spin_unlock(&adapter->fdir_perfect_lock);
9739 err_out:
9740 kfree(mask);
9741 free_input:
9742 kfree(input);
9743 free_jump:
9744 kfree(jump);
9745 return err;
9746 }
9747
9748 static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
9749 struct tc_cls_u32_offload *cls_u32)
9750 {
9751 switch (cls_u32->command) {
9752 case TC_CLSU32_NEW_KNODE:
9753 case TC_CLSU32_REPLACE_KNODE:
9754 return ixgbe_configure_clsu32(adapter, cls_u32);
9755 case TC_CLSU32_DELETE_KNODE:
9756 return ixgbe_delete_clsu32(adapter, cls_u32);
9757 case TC_CLSU32_NEW_HNODE:
9758 case TC_CLSU32_REPLACE_HNODE:
9759 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9760 case TC_CLSU32_DELETE_HNODE:
9761 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9762 default:
9763 return -EOPNOTSUPP;
9764 }
9765 }
9766
9767 static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9768 void *cb_priv)
9769 {
9770 struct ixgbe_adapter *adapter = cb_priv;
9771
9772 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
9773 return -EOPNOTSUPP;
9774
9775 switch (type) {
9776 case TC_SETUP_CLSU32:
9777 return ixgbe_setup_tc_cls_u32(adapter, type_data);
9778 default:
9779 return -EOPNOTSUPP;
9780 }
9781 }
9782
9783 static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9784 struct tc_mqprio_qopt *mqprio)
9785 {
9786 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9787 return ixgbe_setup_tc(dev, mqprio->num_tc);
9788 }
9789
9790 static LIST_HEAD(ixgbe_block_cb_list);
9791
9792 static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9793 void *type_data)
9794 {
9795 struct ixgbe_adapter *adapter = netdev_priv(dev);
9796
9797 switch (type) {
9798 case TC_SETUP_BLOCK:
9799 return flow_block_cb_setup_simple(type_data,
9800 &ixgbe_block_cb_list,
9801 ixgbe_setup_tc_block_cb,
9802 adapter, adapter, true);
9803 case TC_SETUP_QDISC_MQPRIO:
9804 return ixgbe_setup_tc_mqprio(dev, type_data);
9805 default:
9806 return -EOPNOTSUPP;
9807 }
9808 }
9809
9810 #ifdef CONFIG_PCI_IOV
9811 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9812 {
9813 struct net_device *netdev = adapter->netdev;
9814
9815 rtnl_lock();
9816 ixgbe_setup_tc(netdev, adapter->hw_tcs);
9817 rtnl_unlock();
9818 }
9819
9820 #endif
9821 void ixgbe_do_reset(struct net_device *netdev)
9822 {
9823 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9824
9825 if (netif_running(netdev))
9826 ixgbe_reinit_locked(adapter);
9827 else
9828 ixgbe_reset(adapter);
9829 }
9830
9831 static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9832 netdev_features_t features)
9833 {
9834 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9835
9836
9837 if (!(features & NETIF_F_RXCSUM))
9838 features &= ~NETIF_F_LRO;
9839
9840
9841 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9842 features &= ~NETIF_F_LRO;
9843
9844 if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
9845 e_dev_err("LRO is not supported with XDP\n");
9846 features &= ~NETIF_F_LRO;
9847 }
9848
9849 return features;
9850 }
9851
9852 static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
9853 {
9854 int rss = min_t(int, ixgbe_max_rss_indices(adapter),
9855 num_online_cpus());
9856
9857
9858 if (!adapter->ring_feature[RING_F_VMDQ].offset)
9859 adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
9860 IXGBE_FLAG_SRIOV_ENABLED);
9861
9862 adapter->ring_feature[RING_F_RSS].limit = rss;
9863 adapter->ring_feature[RING_F_VMDQ].limit = 1;
9864
9865 ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
9866 }
9867
9868 static int ixgbe_set_features(struct net_device *netdev,
9869 netdev_features_t features)
9870 {
9871 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9872 netdev_features_t changed = netdev->features ^ features;
9873 bool need_reset = false;
9874
9875
9876 if (!(features & NETIF_F_LRO)) {
9877 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9878 need_reset = true;
9879 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9880 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9881 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9882 if (adapter->rx_itr_setting == 1 ||
9883 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9884 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9885 need_reset = true;
9886 } else if ((changed ^ features) & NETIF_F_LRO) {
9887 e_info(probe, "rx-usecs set too low, "
9888 "disabling RSC\n");
9889 }
9890 }
9891
9892
9893
9894
9895
9896 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9897
9898 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9899 need_reset = true;
9900
9901 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9902 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9903 } else {
9904
9905 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9906 need_reset = true;
9907
9908 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9909
9910
9911 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9912
9913 (adapter->hw_tcs > 1) ||
9914
9915 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9916
9917 (!adapter->atr_sample_rate))
9918 ;
9919 else
9920 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9921 }
9922
9923 if (changed & NETIF_F_RXALL)
9924 need_reset = true;
9925
9926 netdev->features = features;
9927
9928 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
9929 ixgbe_reset_l2fw_offload(adapter);
9930 else if (need_reset)
9931 ixgbe_do_reset(netdev);
9932 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9933 NETIF_F_HW_VLAN_CTAG_FILTER))
9934 ixgbe_set_rx_mode(netdev);
9935
9936 return 1;
9937 }
9938
9939 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9940 struct net_device *dev,
9941 const unsigned char *addr, u16 vid,
9942 u16 flags,
9943 struct netlink_ext_ack *extack)
9944 {
9945
9946 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9947 struct ixgbe_adapter *adapter = netdev_priv(dev);
9948 u16 pool = VMDQ_P(0);
9949
9950 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9951 return -ENOMEM;
9952 }
9953
9954 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9955 }
9956
9957
9958
9959
9960
9961
9962
9963
9964 static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9965 __u16 mode)
9966 {
9967 struct ixgbe_hw *hw = &adapter->hw;
9968 unsigned int p, num_pools;
9969 u32 vmdctl;
9970
9971 switch (mode) {
9972 case BRIDGE_MODE_VEPA:
9973
9974 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9975
9976
9977
9978
9979
9980 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9981 vmdctl |= IXGBE_VT_CTL_REPLEN;
9982 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9983
9984
9985
9986
9987 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9988 for (p = 0; p < num_pools; p++) {
9989 if (hw->mac.ops.set_source_address_pruning)
9990 hw->mac.ops.set_source_address_pruning(hw,
9991 true,
9992 p);
9993 }
9994 break;
9995 case BRIDGE_MODE_VEB:
9996
9997 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9998 IXGBE_PFDTXGSWC_VT_LBEN);
9999
10000
10001
10002
10003 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
10004 if (!adapter->num_vfs)
10005 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
10006 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
10007
10008
10009
10010
10011 num_pools = adapter->num_vfs + adapter->num_rx_pools;
10012 for (p = 0; p < num_pools; p++) {
10013 if (hw->mac.ops.set_source_address_pruning)
10014 hw->mac.ops.set_source_address_pruning(hw,
10015 false,
10016 p);
10017 }
10018 break;
10019 default:
10020 return -EINVAL;
10021 }
10022
10023 adapter->bridge_mode = mode;
10024
10025 e_info(drv, "enabling bridge mode: %s\n",
10026 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10027
10028 return 0;
10029 }
10030
10031 static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
10032 struct nlmsghdr *nlh, u16 flags,
10033 struct netlink_ext_ack *extack)
10034 {
10035 struct ixgbe_adapter *adapter = netdev_priv(dev);
10036 struct nlattr *attr, *br_spec;
10037 int rem;
10038
10039 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
10040 return -EOPNOTSUPP;
10041
10042 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
10043 if (!br_spec)
10044 return -EINVAL;
10045
10046 nla_for_each_nested(attr, br_spec, rem) {
10047 int status;
10048 __u16 mode;
10049
10050 if (nla_type(attr) != IFLA_BRIDGE_MODE)
10051 continue;
10052
10053 if (nla_len(attr) < sizeof(mode))
10054 return -EINVAL;
10055
10056 mode = nla_get_u16(attr);
10057 status = ixgbe_configure_bridge_mode(adapter, mode);
10058 if (status)
10059 return status;
10060
10061 break;
10062 }
10063
10064 return 0;
10065 }
10066
10067 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
10068 struct net_device *dev,
10069 u32 filter_mask, int nlflags)
10070 {
10071 struct ixgbe_adapter *adapter = netdev_priv(dev);
10072
10073 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
10074 return 0;
10075
10076 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
10077 adapter->bridge_mode, 0, 0, nlflags,
10078 filter_mask, NULL);
10079 }
10080
10081 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
10082 {
10083 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10084 struct ixgbe_fwd_adapter *accel;
10085 int tcs = adapter->hw_tcs ? : 1;
10086 int pool, err;
10087
10088 if (adapter->xdp_prog) {
10089 e_warn(probe, "L2FW offload is not supported with XDP\n");
10090 return ERR_PTR(-EINVAL);
10091 }
10092
10093
10094
10095
10096
10097 if (!macvlan_supports_dest_filter(vdev))
10098 return ERR_PTR(-EMEDIUMTYPE);
10099
10100
10101
10102
10103
10104 if (netif_is_multiqueue(vdev))
10105 return ERR_PTR(-ERANGE);
10106
10107 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
10108 if (pool == adapter->num_rx_pools) {
10109 u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
10110 u16 reserved_pools;
10111
10112 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
10113 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
10114 adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
10115 return ERR_PTR(-EBUSY);
10116
10117
10118
10119
10120
10121 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
10122 return ERR_PTR(-EBUSY);
10123
10124
10125 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
10126 IXGBE_FLAG_SRIOV_ENABLED;
10127
10128
10129
10130
10131
10132 if (used_pools < 32 && adapter->num_rx_pools < 16)
10133 reserved_pools = min_t(u16,
10134 32 - used_pools,
10135 16 - adapter->num_rx_pools);
10136 else if (adapter->num_rx_pools < 32)
10137 reserved_pools = min_t(u16,
10138 64 - used_pools,
10139 32 - adapter->num_rx_pools);
10140 else
10141 reserved_pools = 64 - used_pools;
10142
10143
10144 if (!reserved_pools)
10145 return ERR_PTR(-EBUSY);
10146
10147 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
10148
10149
10150 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
10151 if (err)
10152 return ERR_PTR(err);
10153
10154 if (pool >= adapter->num_rx_pools)
10155 return ERR_PTR(-ENOMEM);
10156 }
10157
10158 accel = kzalloc(sizeof(*accel), GFP_KERNEL);
10159 if (!accel)
10160 return ERR_PTR(-ENOMEM);
10161
10162 set_bit(pool, adapter->fwd_bitmask);
10163 netdev_set_sb_channel(vdev, pool);
10164 accel->pool = pool;
10165 accel->netdev = vdev;
10166
10167 if (!netif_running(pdev))
10168 return accel;
10169
10170 err = ixgbe_fwd_ring_up(adapter, accel);
10171 if (err)
10172 return ERR_PTR(err);
10173
10174 return accel;
10175 }
10176
10177 static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
10178 {
10179 struct ixgbe_fwd_adapter *accel = priv;
10180 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10181 unsigned int rxbase = accel->rx_base_queue;
10182 unsigned int i;
10183
10184
10185 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
10186 VMDQ_P(accel->pool));
10187
10188
10189
10190
10191 usleep_range(10000, 20000);
10192
10193 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
10194 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
10195 struct ixgbe_q_vector *qv = ring->q_vector;
10196
10197
10198
10199
10200 if (netif_running(adapter->netdev))
10201 napi_synchronize(&qv->napi);
10202 ring->netdev = NULL;
10203 }
10204
10205
10206 netdev_unbind_sb_channel(pdev, accel->netdev);
10207 netdev_set_sb_channel(accel->netdev, 0);
10208
10209 clear_bit(accel->pool, adapter->fwd_bitmask);
10210 kfree(accel);
10211 }
10212
10213 #define IXGBE_MAX_MAC_HDR_LEN 127
10214 #define IXGBE_MAX_NETWORK_HDR_LEN 511
10215
10216 static netdev_features_t
10217 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
10218 netdev_features_t features)
10219 {
10220 unsigned int network_hdr_len, mac_hdr_len;
10221
10222
10223 mac_hdr_len = skb_network_header(skb) - skb->data;
10224 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
10225 return features & ~(NETIF_F_HW_CSUM |
10226 NETIF_F_SCTP_CRC |
10227 NETIF_F_GSO_UDP_L4 |
10228 NETIF_F_HW_VLAN_CTAG_TX |
10229 NETIF_F_TSO |
10230 NETIF_F_TSO6);
10231
10232 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
10233 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
10234 return features & ~(NETIF_F_HW_CSUM |
10235 NETIF_F_SCTP_CRC |
10236 NETIF_F_GSO_UDP_L4 |
10237 NETIF_F_TSO |
10238 NETIF_F_TSO6);
10239
10240
10241
10242
10243
10244
10245 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
10246 #ifdef CONFIG_IXGBE_IPSEC
10247 if (!secpath_exists(skb))
10248 #endif
10249 features &= ~NETIF_F_TSO;
10250 }
10251
10252 return features;
10253 }
10254
10255 static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10256 {
10257 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10258 struct ixgbe_adapter *adapter = netdev_priv(dev);
10259 struct bpf_prog *old_prog;
10260 bool need_reset;
10261 int num_queues;
10262
10263 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10264 return -EINVAL;
10265
10266 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
10267 return -EINVAL;
10268
10269
10270 for (i = 0; i < adapter->num_rx_queues; i++) {
10271 struct ixgbe_ring *ring = adapter->rx_ring[i];
10272
10273 if (ring_is_rsc_enabled(ring))
10274 return -EINVAL;
10275
10276 if (frame_size > ixgbe_rx_bufsz(ring))
10277 return -EINVAL;
10278 }
10279
10280
10281
10282
10283 if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2)
10284 return -ENOMEM;
10285 else if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
10286 static_branch_inc(&ixgbe_xdp_locking_key);
10287
10288 old_prog = xchg(&adapter->xdp_prog, prog);
10289 need_reset = (!!prog != !!old_prog);
10290
10291
10292 if (need_reset) {
10293 int err;
10294
10295 if (!prog)
10296
10297 synchronize_rcu();
10298 err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10299
10300 if (err) {
10301 rcu_assign_pointer(adapter->xdp_prog, old_prog);
10302 return -EINVAL;
10303 }
10304 } else {
10305 for (i = 0; i < adapter->num_rx_queues; i++)
10306 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
10307 adapter->xdp_prog);
10308 }
10309
10310 if (old_prog)
10311 bpf_prog_put(old_prog);
10312
10313
10314
10315
10316 if (need_reset && prog) {
10317 num_queues = min_t(int, adapter->num_rx_queues,
10318 adapter->num_xdp_queues);
10319 for (i = 0; i < num_queues; i++)
10320 if (adapter->xdp_ring[i]->xsk_pool)
10321 (void)ixgbe_xsk_wakeup(adapter->netdev, i,
10322 XDP_WAKEUP_RX);
10323 }
10324
10325 return 0;
10326 }
10327
10328 static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
10329 {
10330 struct ixgbe_adapter *adapter = netdev_priv(dev);
10331
10332 switch (xdp->command) {
10333 case XDP_SETUP_PROG:
10334 return ixgbe_xdp_setup(dev, xdp->prog);
10335 case XDP_SETUP_XSK_POOL:
10336 return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
10337 xdp->xsk.queue_id);
10338
10339 default:
10340 return -EINVAL;
10341 }
10342 }
10343
10344 void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
10345 {
10346
10347
10348
10349 wmb();
10350 writel(ring->next_to_use, ring->tail);
10351 }
10352
10353 void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring)
10354 {
10355 if (static_branch_unlikely(&ixgbe_xdp_locking_key))
10356 spin_lock(&ring->tx_lock);
10357 ixgbe_xdp_ring_update_tail(ring);
10358 if (static_branch_unlikely(&ixgbe_xdp_locking_key))
10359 spin_unlock(&ring->tx_lock);
10360 }
10361
10362 static int ixgbe_xdp_xmit(struct net_device *dev, int n,
10363 struct xdp_frame **frames, u32 flags)
10364 {
10365 struct ixgbe_adapter *adapter = netdev_priv(dev);
10366 struct ixgbe_ring *ring;
10367 int nxmit = 0;
10368 int i;
10369
10370 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10371 return -ENETDOWN;
10372
10373 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
10374 return -EINVAL;
10375
10376
10377
10378
10379 ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL;
10380 if (unlikely(!ring))
10381 return -ENXIO;
10382
10383 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
10384 return -ENXIO;
10385
10386 if (static_branch_unlikely(&ixgbe_xdp_locking_key))
10387 spin_lock(&ring->tx_lock);
10388
10389 for (i = 0; i < n; i++) {
10390 struct xdp_frame *xdpf = frames[i];
10391 int err;
10392
10393 err = ixgbe_xmit_xdp_ring(ring, xdpf);
10394 if (err != IXGBE_XDP_TX)
10395 break;
10396 nxmit++;
10397 }
10398
10399 if (unlikely(flags & XDP_XMIT_FLUSH))
10400 ixgbe_xdp_ring_update_tail(ring);
10401
10402 if (static_branch_unlikely(&ixgbe_xdp_locking_key))
10403 spin_unlock(&ring->tx_lock);
10404
10405 return nxmit;
10406 }
10407
10408 static const struct net_device_ops ixgbe_netdev_ops = {
10409 .ndo_open = ixgbe_open,
10410 .ndo_stop = ixgbe_close,
10411 .ndo_start_xmit = ixgbe_xmit_frame,
10412 .ndo_set_rx_mode = ixgbe_set_rx_mode,
10413 .ndo_validate_addr = eth_validate_addr,
10414 .ndo_set_mac_address = ixgbe_set_mac,
10415 .ndo_change_mtu = ixgbe_change_mtu,
10416 .ndo_tx_timeout = ixgbe_tx_timeout,
10417 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
10418 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
10419 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
10420 .ndo_eth_ioctl = ixgbe_ioctl,
10421 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
10422 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
10423 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
10424 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
10425 .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state,
10426 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
10427 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
10428 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10429 .ndo_get_vf_stats = ixgbe_ndo_get_vf_stats,
10430 .ndo_get_stats64 = ixgbe_get_stats64,
10431 .ndo_setup_tc = __ixgbe_setup_tc,
10432 #ifdef IXGBE_FCOE
10433 .ndo_select_queue = ixgbe_select_queue,
10434 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
10435 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
10436 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
10437 .ndo_fcoe_enable = ixgbe_fcoe_enable,
10438 .ndo_fcoe_disable = ixgbe_fcoe_disable,
10439 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
10440 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
10441 #endif
10442 .ndo_set_features = ixgbe_set_features,
10443 .ndo_fix_features = ixgbe_fix_features,
10444 .ndo_fdb_add = ixgbe_ndo_fdb_add,
10445 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
10446 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
10447 .ndo_dfwd_add_station = ixgbe_fwd_add,
10448 .ndo_dfwd_del_station = ixgbe_fwd_del,
10449 .ndo_features_check = ixgbe_features_check,
10450 .ndo_bpf = ixgbe_xdp,
10451 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10452 .ndo_xsk_wakeup = ixgbe_xsk_wakeup,
10453 };
10454
10455 static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
10456 struct ixgbe_ring *tx_ring)
10457 {
10458 unsigned long wait_delay, delay_interval;
10459 struct ixgbe_hw *hw = &adapter->hw;
10460 u8 reg_idx = tx_ring->reg_idx;
10461 int wait_loop;
10462 u32 txdctl;
10463
10464 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
10465
10466
10467 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10468
10469 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10470 wait_delay = delay_interval;
10471
10472 while (wait_loop--) {
10473 usleep_range(wait_delay, wait_delay + 10);
10474 wait_delay += delay_interval * 2;
10475 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
10476
10477 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
10478 return;
10479 }
10480
10481 e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
10482 }
10483
10484 static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
10485 struct ixgbe_ring *tx_ring)
10486 {
10487 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10488 ixgbe_disable_txr_hw(adapter, tx_ring);
10489 }
10490
10491 static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
10492 struct ixgbe_ring *rx_ring)
10493 {
10494 unsigned long wait_delay, delay_interval;
10495 struct ixgbe_hw *hw = &adapter->hw;
10496 u8 reg_idx = rx_ring->reg_idx;
10497 int wait_loop;
10498 u32 rxdctl;
10499
10500 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10501 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
10502 rxdctl |= IXGBE_RXDCTL_SWFLSH;
10503
10504
10505 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
10506
10507
10508 if (hw->mac.type == ixgbe_mac_82598EB &&
10509 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
10510 return;
10511
10512
10513 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10514
10515 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10516 wait_delay = delay_interval;
10517
10518 while (wait_loop--) {
10519 usleep_range(wait_delay, wait_delay + 10);
10520 wait_delay += delay_interval * 2;
10521 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10522
10523 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
10524 return;
10525 }
10526
10527 e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
10528 }
10529
10530 static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
10531 {
10532 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
10533 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
10534 }
10535
10536 static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
10537 {
10538 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
10539 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
10540 }
10541
10542
10543
10544
10545
10546
10547
10548
10549
10550 void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
10551 {
10552 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10553
10554 rx_ring = adapter->rx_ring[ring];
10555 tx_ring = adapter->tx_ring[ring];
10556 xdp_ring = adapter->xdp_ring[ring];
10557
10558 ixgbe_disable_txr(adapter, tx_ring);
10559 if (xdp_ring)
10560 ixgbe_disable_txr(adapter, xdp_ring);
10561 ixgbe_disable_rxr_hw(adapter, rx_ring);
10562
10563 if (xdp_ring)
10564 synchronize_rcu();
10565
10566
10567 napi_disable(&rx_ring->q_vector->napi);
10568
10569 ixgbe_clean_tx_ring(tx_ring);
10570 if (xdp_ring)
10571 ixgbe_clean_tx_ring(xdp_ring);
10572 ixgbe_clean_rx_ring(rx_ring);
10573
10574 ixgbe_reset_txr_stats(tx_ring);
10575 if (xdp_ring)
10576 ixgbe_reset_txr_stats(xdp_ring);
10577 ixgbe_reset_rxr_stats(rx_ring);
10578 }
10579
10580
10581
10582
10583
10584
10585
10586
10587
10588 void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
10589 {
10590 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10591
10592 rx_ring = adapter->rx_ring[ring];
10593 tx_ring = adapter->tx_ring[ring];
10594 xdp_ring = adapter->xdp_ring[ring];
10595
10596
10597 napi_enable(&rx_ring->q_vector->napi);
10598
10599 ixgbe_configure_tx_ring(adapter, tx_ring);
10600 if (xdp_ring)
10601 ixgbe_configure_tx_ring(adapter, xdp_ring);
10602 ixgbe_configure_rx_ring(adapter, rx_ring);
10603
10604 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10605 if (xdp_ring)
10606 clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
10607 }
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617
10618 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
10619 {
10620 struct pci_dev *entry, *pdev = adapter->pdev;
10621 int physfns = 0;
10622
10623
10624
10625
10626
10627 if (ixgbe_pcie_from_parent(&adapter->hw))
10628 physfns = 4;
10629
10630 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
10631
10632 if (entry->is_virtfn)
10633 continue;
10634
10635
10636
10637
10638
10639
10640
10641 if ((entry->vendor != pdev->vendor) ||
10642 (entry->device != pdev->device))
10643 return -1;
10644
10645 physfns++;
10646 }
10647
10648 return physfns;
10649 }
10650
10651
10652
10653
10654
10655
10656
10657
10658
10659
10660
10661 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
10662 u16 subdevice_id)
10663 {
10664 struct ixgbe_hw *hw = &adapter->hw;
10665 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
10666
10667
10668 if (hw->mac.type == ixgbe_mac_82598EB)
10669 return false;
10670
10671
10672 if (hw->mac.type >= ixgbe_mac_X540) {
10673 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10674 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10675 (hw->bus.func == 0)))
10676 return true;
10677 }
10678
10679
10680 switch (device_id) {
10681 case IXGBE_DEV_ID_82599_SFP:
10682
10683 switch (subdevice_id) {
10684 case IXGBE_SUBDEV_ID_82599_560FLR:
10685 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10686 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10687 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10688
10689 if (hw->bus.func != 0)
10690 break;
10691 fallthrough;
10692 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10693 case IXGBE_SUBDEV_ID_82599_SFP:
10694 case IXGBE_SUBDEV_ID_82599_RNDC:
10695 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10696 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10697 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10698 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10699 return true;
10700 }
10701 break;
10702 case IXGBE_DEV_ID_82599EN_SFP:
10703
10704 switch (subdevice_id) {
10705 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10706 return true;
10707 }
10708 break;
10709 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10710
10711 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10712 return true;
10713 break;
10714 case IXGBE_DEV_ID_82599_KX4:
10715 return true;
10716 default:
10717 break;
10718 }
10719
10720 return false;
10721 }
10722
10723
10724
10725
10726
10727
10728
10729
10730 static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
10731 {
10732 struct ixgbe_hw *hw = &adapter->hw;
10733 struct ixgbe_nvm_version nvm_ver;
10734
10735 ixgbe_get_oem_prod_version(hw, &nvm_ver);
10736 if (nvm_ver.oem_valid) {
10737 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10738 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
10739 nvm_ver.oem_release);
10740 return;
10741 }
10742
10743 ixgbe_get_etk_id(hw, &nvm_ver);
10744 ixgbe_get_orom_version(hw, &nvm_ver);
10745
10746 if (nvm_ver.or_valid) {
10747 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10748 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
10749 nvm_ver.or_build, nvm_ver.or_patch);
10750 return;
10751 }
10752
10753
10754 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10755 "0x%08x", nvm_ver.etk_id);
10756 }
10757
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769 static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10770 {
10771 struct net_device *netdev;
10772 struct ixgbe_adapter *adapter = NULL;
10773 struct ixgbe_hw *hw;
10774 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10775 unsigned int indices = MAX_TX_QUEUES;
10776 u8 part_str[IXGBE_PBANUM_LENGTH];
10777 int i, err, expected_gts;
10778 bool disable_dev = false;
10779 #ifdef IXGBE_FCOE
10780 u16 device_caps;
10781 #endif
10782 u32 eec;
10783
10784
10785
10786
10787 if (pdev->is_virtfn) {
10788 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10789 pci_name(pdev), pdev->vendor, pdev->device);
10790 return -EINVAL;
10791 }
10792
10793 err = pci_enable_device_mem(pdev);
10794 if (err)
10795 return err;
10796
10797 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10798 if (err) {
10799 dev_err(&pdev->dev,
10800 "No usable DMA configuration, aborting\n");
10801 goto err_dma;
10802 }
10803
10804 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10805 if (err) {
10806 dev_err(&pdev->dev,
10807 "pci_request_selected_regions failed 0x%x\n", err);
10808 goto err_pci_reg;
10809 }
10810
10811 pci_enable_pcie_error_reporting(pdev);
10812
10813 pci_set_master(pdev);
10814 pci_save_state(pdev);
10815
10816 if (ii->mac == ixgbe_mac_82598EB) {
10817 #ifdef CONFIG_IXGBE_DCB
10818
10819 indices = 4 * MAX_TRAFFIC_CLASS;
10820 #else
10821 indices = IXGBE_MAX_RSS_INDICES;
10822 #endif
10823 }
10824
10825 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10826 if (!netdev) {
10827 err = -ENOMEM;
10828 goto err_alloc_etherdev;
10829 }
10830
10831 SET_NETDEV_DEV(netdev, &pdev->dev);
10832
10833 adapter = netdev_priv(netdev);
10834
10835 adapter->netdev = netdev;
10836 adapter->pdev = pdev;
10837 hw = &adapter->hw;
10838 hw->back = adapter;
10839 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10840
10841 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10842 pci_resource_len(pdev, 0));
10843 adapter->io_addr = hw->hw_addr;
10844 if (!hw->hw_addr) {
10845 err = -EIO;
10846 goto err_ioremap;
10847 }
10848
10849 netdev->netdev_ops = &ixgbe_netdev_ops;
10850 ixgbe_set_ethtool_ops(netdev);
10851 netdev->watchdog_timeo = 5 * HZ;
10852 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10853
10854
10855 hw->mac.ops = *ii->mac_ops;
10856 hw->mac.type = ii->mac;
10857 hw->mvals = ii->mvals;
10858 if (ii->link_ops)
10859 hw->link.ops = *ii->link_ops;
10860
10861
10862 hw->eeprom.ops = *ii->eeprom_ops;
10863 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10864 if (ixgbe_removed(hw->hw_addr)) {
10865 err = -EIO;
10866 goto err_ioremap;
10867 }
10868
10869 if (!(eec & BIT(8)))
10870 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10871
10872
10873 hw->phy.ops = *ii->phy_ops;
10874 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10875
10876 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10877 hw->phy.mdio.mmds = 0;
10878 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10879 hw->phy.mdio.dev = netdev;
10880 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10881 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10882
10883
10884 err = ixgbe_sw_init(adapter, ii);
10885 if (err)
10886 goto err_sw_init;
10887
10888 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
10889 adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
10890
10891 switch (adapter->hw.mac.type) {
10892 case ixgbe_mac_X550:
10893 case ixgbe_mac_X550EM_x:
10894 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
10895 break;
10896 case ixgbe_mac_x550em_a:
10897 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a;
10898 break;
10899 default:
10900 break;
10901 }
10902
10903
10904 if (hw->mac.ops.init_swfw_sync)
10905 hw->mac.ops.init_swfw_sync(hw);
10906
10907
10908 switch (adapter->hw.mac.type) {
10909 case ixgbe_mac_82599EB:
10910 case ixgbe_mac_X540:
10911 case ixgbe_mac_X550:
10912 case ixgbe_mac_X550EM_x:
10913 case ixgbe_mac_x550em_a:
10914 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10915 break;
10916 default:
10917 break;
10918 }
10919
10920
10921
10922
10923
10924 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10925 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10926 if (esdp & IXGBE_ESDP_SDP1)
10927 e_crit(probe, "Fan has stopped, replace the adapter\n");
10928 }
10929
10930 if (allow_unsupported_sfp)
10931 hw->allow_unsupported_sfp = allow_unsupported_sfp;
10932
10933
10934 hw->phy.reset_if_overtemp = true;
10935 err = hw->mac.ops.reset_hw(hw);
10936 hw->phy.reset_if_overtemp = false;
10937 ixgbe_set_eee_capable(adapter);
10938 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10939 err = 0;
10940 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10941 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10942 e_dev_err("Reload the driver after installing a supported module.\n");
10943 goto err_sw_init;
10944 } else if (err) {
10945 e_dev_err("HW Init failed: %d\n", err);
10946 goto err_sw_init;
10947 }
10948
10949 #ifdef CONFIG_PCI_IOV
10950
10951 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10952 goto skip_sriov;
10953
10954 ixgbe_init_mbx_params_pf(hw);
10955 hw->mbx.ops = ii->mbx_ops;
10956 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10957 ixgbe_enable_sriov(adapter, max_vfs);
10958 skip_sriov:
10959
10960 #endif
10961 netdev->features = NETIF_F_SG |
10962 NETIF_F_TSO |
10963 NETIF_F_TSO6 |
10964 NETIF_F_RXHASH |
10965 NETIF_F_RXCSUM |
10966 NETIF_F_HW_CSUM;
10967
10968 #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10969 NETIF_F_GSO_GRE_CSUM | \
10970 NETIF_F_GSO_IPXIP4 | \
10971 NETIF_F_GSO_IPXIP6 | \
10972 NETIF_F_GSO_UDP_TUNNEL | \
10973 NETIF_F_GSO_UDP_TUNNEL_CSUM)
10974
10975 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10976 netdev->features |= NETIF_F_GSO_PARTIAL |
10977 IXGBE_GSO_PARTIAL_FEATURES;
10978
10979 if (hw->mac.type >= ixgbe_mac_82599EB)
10980 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
10981
10982 #ifdef CONFIG_IXGBE_IPSEC
10983 #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
10984 NETIF_F_HW_ESP_TX_CSUM | \
10985 NETIF_F_GSO_ESP)
10986
10987 if (adapter->ipsec)
10988 netdev->features |= IXGBE_ESP_FEATURES;
10989 #endif
10990
10991 netdev->hw_features |= netdev->features |
10992 NETIF_F_HW_VLAN_CTAG_FILTER |
10993 NETIF_F_HW_VLAN_CTAG_RX |
10994 NETIF_F_HW_VLAN_CTAG_TX |
10995 NETIF_F_RXALL |
10996 NETIF_F_HW_L2FW_DOFFLOAD;
10997
10998 if (hw->mac.type >= ixgbe_mac_82599EB)
10999 netdev->hw_features |= NETIF_F_NTUPLE |
11000 NETIF_F_HW_TC;
11001
11002 netdev->features |= NETIF_F_HIGHDMA;
11003
11004 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
11005 netdev->hw_enc_features |= netdev->vlan_features;
11006 netdev->mpls_features |= NETIF_F_SG |
11007 NETIF_F_TSO |
11008 NETIF_F_TSO6 |
11009 NETIF_F_HW_CSUM;
11010 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
11011
11012
11013 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
11014 NETIF_F_HW_VLAN_CTAG_RX |
11015 NETIF_F_HW_VLAN_CTAG_TX;
11016
11017 netdev->priv_flags |= IFF_UNICAST_FLT;
11018 netdev->priv_flags |= IFF_SUPP_NOFCS;
11019
11020
11021 netdev->min_mtu = ETH_MIN_MTU;
11022 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
11023
11024 #ifdef CONFIG_IXGBE_DCB
11025 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
11026 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
11027 #endif
11028
11029 #ifdef IXGBE_FCOE
11030 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
11031 unsigned int fcoe_l;
11032
11033 if (hw->mac.ops.get_device_caps) {
11034 hw->mac.ops.get_device_caps(hw, &device_caps);
11035 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
11036 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
11037 }
11038
11039
11040 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
11041 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
11042
11043 netdev->features |= NETIF_F_FSO |
11044 NETIF_F_FCOE_CRC;
11045
11046 netdev->vlan_features |= NETIF_F_FSO |
11047 NETIF_F_FCOE_CRC |
11048 NETIF_F_FCOE_MTU;
11049 }
11050 #endif
11051 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
11052 netdev->hw_features |= NETIF_F_LRO;
11053 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
11054 netdev->features |= NETIF_F_LRO;
11055
11056 if (ixgbe_check_fw_error(adapter)) {
11057 err = -EIO;
11058 goto err_sw_init;
11059 }
11060
11061
11062 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
11063 e_dev_err("The EEPROM Checksum Is Not Valid\n");
11064 err = -EIO;
11065 goto err_sw_init;
11066 }
11067
11068 eth_platform_get_mac_address(&adapter->pdev->dev,
11069 adapter->hw.mac.perm_addr);
11070
11071 eth_hw_addr_set(netdev, hw->mac.perm_addr);
11072
11073 if (!is_valid_ether_addr(netdev->dev_addr)) {
11074 e_dev_err("invalid MAC address\n");
11075 err = -EIO;
11076 goto err_sw_init;
11077 }
11078
11079
11080 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
11081 ixgbe_mac_set_default_filter(adapter);
11082
11083 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
11084
11085 if (ixgbe_removed(hw->hw_addr)) {
11086 err = -EIO;
11087 goto err_sw_init;
11088 }
11089 INIT_WORK(&adapter->service_task, ixgbe_service_task);
11090 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
11091 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
11092
11093 err = ixgbe_init_interrupt_scheme(adapter);
11094 if (err)
11095 goto err_sw_init;
11096
11097 for (i = 0; i < adapter->num_rx_queues; i++)
11098 u64_stats_init(&adapter->rx_ring[i]->syncp);
11099 for (i = 0; i < adapter->num_tx_queues; i++)
11100 u64_stats_init(&adapter->tx_ring[i]->syncp);
11101 for (i = 0; i < adapter->num_xdp_queues; i++)
11102 u64_stats_init(&adapter->xdp_ring[i]->syncp);
11103
11104
11105 adapter->wol = 0;
11106 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
11107 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
11108 pdev->subsystem_device);
11109 if (hw->wol_enabled)
11110 adapter->wol = IXGBE_WUFC_MAG;
11111
11112 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
11113
11114
11115 ixgbe_set_fw_version(adapter);
11116
11117
11118 if (ixgbe_pcie_from_parent(hw))
11119 ixgbe_get_parent_bus_info(adapter);
11120 else
11121 hw->mac.ops.get_bus_info(hw);
11122
11123
11124
11125
11126
11127
11128 switch (hw->mac.type) {
11129 case ixgbe_mac_82598EB:
11130 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
11131 break;
11132 default:
11133 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
11134 break;
11135 }
11136
11137
11138 if (expected_gts > 0)
11139 ixgbe_check_minimum_link(adapter, expected_gts);
11140
11141 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
11142 if (err)
11143 strlcpy(part_str, "Unknown", sizeof(part_str));
11144 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
11145 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
11146 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
11147 part_str);
11148 else
11149 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
11150 hw->mac.type, hw->phy.type, part_str);
11151
11152 e_dev_info("%pM\n", netdev->dev_addr);
11153
11154
11155 err = hw->mac.ops.start_hw(hw);
11156 if (err == IXGBE_ERR_EEPROM_VERSION) {
11157
11158 e_dev_warn("This device is a pre-production adapter/LOM. "
11159 "Please be aware there may be issues associated "
11160 "with your hardware. If you are experiencing "
11161 "problems please contact your Intel or hardware "
11162 "representative who provided you with this "
11163 "hardware.\n");
11164 }
11165 strcpy(netdev->name, "eth%d");
11166 pci_set_drvdata(pdev, adapter);
11167 err = register_netdev(netdev);
11168 if (err)
11169 goto err_register;
11170
11171
11172
11173 if (hw->mac.ops.disable_tx_laser)
11174 hw->mac.ops.disable_tx_laser(hw);
11175
11176
11177 netif_carrier_off(netdev);
11178
11179 #ifdef CONFIG_IXGBE_DCA
11180 if (dca_add_requester(&pdev->dev) == 0) {
11181 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
11182 ixgbe_setup_dca(adapter);
11183 }
11184 #endif
11185 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
11186 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
11187 for (i = 0; i < adapter->num_vfs; i++)
11188 ixgbe_vf_configuration(pdev, (i | 0x10000000));
11189 }
11190
11191
11192
11193
11194 if (hw->mac.ops.set_fw_drv_ver)
11195 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
11196 sizeof(UTS_RELEASE) - 1,
11197 UTS_RELEASE);
11198
11199
11200 ixgbe_add_sanmac_netdev(netdev);
11201
11202 e_dev_info("%s\n", ixgbe_default_device_descr);
11203
11204 #ifdef CONFIG_IXGBE_HWMON
11205 if (ixgbe_sysfs_init(adapter))
11206 e_err(probe, "failed to allocate sysfs resources\n");
11207 #endif
11208
11209 ixgbe_dbg_adapter_init(adapter);
11210
11211
11212 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
11213 hw->mac.ops.setup_link(hw,
11214 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
11215 true);
11216
11217 err = ixgbe_mii_bus_init(hw);
11218 if (err)
11219 goto err_netdev;
11220
11221 return 0;
11222
11223 err_netdev:
11224 unregister_netdev(netdev);
11225 err_register:
11226 ixgbe_release_hw_control(adapter);
11227 ixgbe_clear_interrupt_scheme(adapter);
11228 err_sw_init:
11229 ixgbe_disable_sriov(adapter);
11230 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
11231 iounmap(adapter->io_addr);
11232 kfree(adapter->jump_tables[0]);
11233 kfree(adapter->mac_table);
11234 kfree(adapter->rss_key);
11235 bitmap_free(adapter->af_xdp_zc_qps);
11236 err_ioremap:
11237 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11238 free_netdev(netdev);
11239 err_alloc_etherdev:
11240 pci_disable_pcie_error_reporting(pdev);
11241 pci_release_mem_regions(pdev);
11242 err_pci_reg:
11243 err_dma:
11244 if (!adapter || disable_dev)
11245 pci_disable_device(pdev);
11246 return err;
11247 }
11248
11249
11250
11251
11252
11253
11254
11255
11256
11257
11258 static void ixgbe_remove(struct pci_dev *pdev)
11259 {
11260 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11261 struct net_device *netdev;
11262 bool disable_dev;
11263 int i;
11264
11265
11266 if (!adapter)
11267 return;
11268
11269 netdev = adapter->netdev;
11270 ixgbe_dbg_adapter_exit(adapter);
11271
11272 set_bit(__IXGBE_REMOVING, &adapter->state);
11273 cancel_work_sync(&adapter->service_task);
11274
11275 if (adapter->mii_bus)
11276 mdiobus_unregister(adapter->mii_bus);
11277
11278 #ifdef CONFIG_IXGBE_DCA
11279 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
11280 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
11281 dca_remove_requester(&pdev->dev);
11282 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
11283 IXGBE_DCA_CTRL_DCA_DISABLE);
11284 }
11285
11286 #endif
11287 #ifdef CONFIG_IXGBE_HWMON
11288 ixgbe_sysfs_exit(adapter);
11289 #endif
11290
11291
11292 ixgbe_del_sanmac_netdev(netdev);
11293
11294 #ifdef CONFIG_PCI_IOV
11295 ixgbe_disable_sriov(adapter);
11296 #endif
11297 if (netdev->reg_state == NETREG_REGISTERED)
11298 unregister_netdev(netdev);
11299
11300 ixgbe_stop_ipsec_offload(adapter);
11301 ixgbe_clear_interrupt_scheme(adapter);
11302
11303 ixgbe_release_hw_control(adapter);
11304
11305 #ifdef CONFIG_DCB
11306 kfree(adapter->ixgbe_ieee_pfc);
11307 kfree(adapter->ixgbe_ieee_ets);
11308
11309 #endif
11310 iounmap(adapter->io_addr);
11311 pci_release_mem_regions(pdev);
11312
11313 e_dev_info("complete\n");
11314
11315 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
11316 if (adapter->jump_tables[i]) {
11317 kfree(adapter->jump_tables[i]->input);
11318 kfree(adapter->jump_tables[i]->mask);
11319 }
11320 kfree(adapter->jump_tables[i]);
11321 }
11322
11323 kfree(adapter->mac_table);
11324 kfree(adapter->rss_key);
11325 bitmap_free(adapter->af_xdp_zc_qps);
11326 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11327 free_netdev(netdev);
11328
11329 pci_disable_pcie_error_reporting(pdev);
11330
11331 if (disable_dev)
11332 pci_disable_device(pdev);
11333 }
11334
11335
11336
11337
11338
11339
11340
11341
11342
11343 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
11344 pci_channel_state_t state)
11345 {
11346 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11347 struct net_device *netdev = adapter->netdev;
11348
11349 #ifdef CONFIG_PCI_IOV
11350 struct ixgbe_hw *hw = &adapter->hw;
11351 struct pci_dev *bdev, *vfdev;
11352 u32 dw0, dw1, dw2, dw3;
11353 int vf, pos;
11354 u16 req_id, pf_func;
11355
11356 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
11357 adapter->num_vfs == 0)
11358 goto skip_bad_vf_detection;
11359
11360 bdev = pdev->bus->self;
11361 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
11362 bdev = bdev->bus->self;
11363
11364 if (!bdev)
11365 goto skip_bad_vf_detection;
11366
11367 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
11368 if (!pos)
11369 goto skip_bad_vf_detection;
11370
11371 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
11372 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
11373 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
11374 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
11375 if (ixgbe_removed(hw->hw_addr))
11376 goto skip_bad_vf_detection;
11377
11378 req_id = dw1 >> 16;
11379
11380 if (!(req_id & 0x0080))
11381 goto skip_bad_vf_detection;
11382
11383 pf_func = req_id & 0x01;
11384 if ((pf_func & 1) == (pdev->devfn & 1)) {
11385 unsigned int device_id;
11386
11387 vf = (req_id & 0x7F) >> 1;
11388 e_dev_err("VF %d has caused a PCIe error\n", vf);
11389 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
11390 "%8.8x\tdw3: %8.8x\n",
11391 dw0, dw1, dw2, dw3);
11392 switch (adapter->hw.mac.type) {
11393 case ixgbe_mac_82599EB:
11394 device_id = IXGBE_82599_VF_DEVICE_ID;
11395 break;
11396 case ixgbe_mac_X540:
11397 device_id = IXGBE_X540_VF_DEVICE_ID;
11398 break;
11399 case ixgbe_mac_X550:
11400 device_id = IXGBE_DEV_ID_X550_VF;
11401 break;
11402 case ixgbe_mac_X550EM_x:
11403 device_id = IXGBE_DEV_ID_X550EM_X_VF;
11404 break;
11405 case ixgbe_mac_x550em_a:
11406 device_id = IXGBE_DEV_ID_X550EM_A_VF;
11407 break;
11408 default:
11409 device_id = 0;
11410 break;
11411 }
11412
11413
11414 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
11415 while (vfdev) {
11416 if (vfdev->devfn == (req_id & 0xFF))
11417 break;
11418 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
11419 device_id, vfdev);
11420 }
11421
11422
11423
11424
11425
11426 if (vfdev) {
11427 pcie_flr(vfdev);
11428
11429 pci_dev_put(vfdev);
11430 }
11431 }
11432
11433
11434
11435
11436
11437
11438
11439 adapter->vferr_refcount++;
11440
11441 return PCI_ERS_RESULT_RECOVERED;
11442
11443 skip_bad_vf_detection:
11444 #endif
11445 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
11446 return PCI_ERS_RESULT_DISCONNECT;
11447
11448 if (!netif_device_present(netdev))
11449 return PCI_ERS_RESULT_DISCONNECT;
11450
11451 rtnl_lock();
11452 netif_device_detach(netdev);
11453
11454 if (netif_running(netdev))
11455 ixgbe_close_suspend(adapter);
11456
11457 if (state == pci_channel_io_perm_failure) {
11458 rtnl_unlock();
11459 return PCI_ERS_RESULT_DISCONNECT;
11460 }
11461
11462 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
11463 pci_disable_device(pdev);
11464 rtnl_unlock();
11465
11466
11467 return PCI_ERS_RESULT_NEED_RESET;
11468 }
11469
11470
11471
11472
11473
11474
11475
11476 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
11477 {
11478 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11479 pci_ers_result_t result;
11480
11481 if (pci_enable_device_mem(pdev)) {
11482 e_err(probe, "Cannot re-enable PCI device after reset.\n");
11483 result = PCI_ERS_RESULT_DISCONNECT;
11484 } else {
11485 smp_mb__before_atomic();
11486 clear_bit(__IXGBE_DISABLED, &adapter->state);
11487 adapter->hw.hw_addr = adapter->io_addr;
11488 pci_set_master(pdev);
11489 pci_restore_state(pdev);
11490 pci_save_state(pdev);
11491
11492 pci_wake_from_d3(pdev, false);
11493
11494 ixgbe_reset(adapter);
11495 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
11496 result = PCI_ERS_RESULT_RECOVERED;
11497 }
11498
11499 return result;
11500 }
11501
11502
11503
11504
11505
11506
11507
11508
11509 static void ixgbe_io_resume(struct pci_dev *pdev)
11510 {
11511 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11512 struct net_device *netdev = adapter->netdev;
11513
11514 #ifdef CONFIG_PCI_IOV
11515 if (adapter->vferr_refcount) {
11516 e_info(drv, "Resuming after VF err\n");
11517 adapter->vferr_refcount--;
11518 return;
11519 }
11520
11521 #endif
11522 rtnl_lock();
11523 if (netif_running(netdev))
11524 ixgbe_open(netdev);
11525
11526 netif_device_attach(netdev);
11527 rtnl_unlock();
11528 }
11529
11530 static const struct pci_error_handlers ixgbe_err_handler = {
11531 .error_detected = ixgbe_io_error_detected,
11532 .slot_reset = ixgbe_io_slot_reset,
11533 .resume = ixgbe_io_resume,
11534 };
11535
11536 static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
11537
11538 static struct pci_driver ixgbe_driver = {
11539 .name = ixgbe_driver_name,
11540 .id_table = ixgbe_pci_tbl,
11541 .probe = ixgbe_probe,
11542 .remove = ixgbe_remove,
11543 .driver.pm = &ixgbe_pm_ops,
11544 .shutdown = ixgbe_shutdown,
11545 .sriov_configure = ixgbe_pci_sriov_configure,
11546 .err_handler = &ixgbe_err_handler
11547 };
11548
11549
11550
11551
11552
11553
11554
11555 static int __init ixgbe_init_module(void)
11556 {
11557 int ret;
11558 pr_info("%s\n", ixgbe_driver_string);
11559 pr_info("%s\n", ixgbe_copyright);
11560
11561 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
11562 if (!ixgbe_wq) {
11563 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
11564 return -ENOMEM;
11565 }
11566
11567 ixgbe_dbg_init();
11568
11569 ret = pci_register_driver(&ixgbe_driver);
11570 if (ret) {
11571 destroy_workqueue(ixgbe_wq);
11572 ixgbe_dbg_exit();
11573 return ret;
11574 }
11575
11576 #ifdef CONFIG_IXGBE_DCA
11577 dca_register_notify(&dca_notifier);
11578 #endif
11579
11580 return 0;
11581 }
11582
11583 module_init(ixgbe_init_module);
11584
11585
11586
11587
11588
11589
11590
11591 static void __exit ixgbe_exit_module(void)
11592 {
11593 #ifdef CONFIG_IXGBE_DCA
11594 dca_unregister_notify(&dca_notifier);
11595 #endif
11596 pci_unregister_driver(&ixgbe_driver);
11597
11598 ixgbe_dbg_exit();
11599 if (ixgbe_wq) {
11600 destroy_workqueue(ixgbe_wq);
11601 ixgbe_wq = NULL;
11602 }
11603 }
11604
11605 #ifdef CONFIG_IXGBE_DCA
11606 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
11607 void *p)
11608 {
11609 int ret_val;
11610
11611 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
11612 __ixgbe_notify_dca);
11613
11614 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
11615 }
11616
11617 #endif
11618
11619 module_exit(ixgbe_exit_module);
11620
11621