0001
0002
0003
0004 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0005
0006 #include <linux/module.h>
0007 #include <linux/types.h>
0008 #include <linux/init.h>
0009 #include <linux/bitops.h>
0010 #include <linux/vmalloc.h>
0011 #include <linux/pagemap.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/ipv6.h>
0014 #include <linux/slab.h>
0015 #include <net/checksum.h>
0016 #include <net/ip6_checksum.h>
0017 #include <net/pkt_sched.h>
0018 #include <net/pkt_cls.h>
0019 #include <linux/net_tstamp.h>
0020 #include <linux/mii.h>
0021 #include <linux/ethtool.h>
0022 #include <linux/if.h>
0023 #include <linux/if_vlan.h>
0024 #include <linux/pci.h>
0025 #include <linux/delay.h>
0026 #include <linux/interrupt.h>
0027 #include <linux/ip.h>
0028 #include <linux/tcp.h>
0029 #include <linux/sctp.h>
0030 #include <linux/if_ether.h>
0031 #include <linux/aer.h>
0032 #include <linux/prefetch.h>
0033 #include <linux/bpf.h>
0034 #include <linux/bpf_trace.h>
0035 #include <linux/pm_runtime.h>
0036 #include <linux/etherdevice.h>
0037 #ifdef CONFIG_IGB_DCA
0038 #include <linux/dca.h>
0039 #endif
0040 #include <linux/i2c.h>
0041 #include "igb.h"
0042
0043 enum queue_mode {
0044 QUEUE_MODE_STRICT_PRIORITY,
0045 QUEUE_MODE_STREAM_RESERVATION,
0046 };
0047
0048 enum tx_queue_prio {
0049 TX_QUEUE_PRIO_HIGH,
0050 TX_QUEUE_PRIO_LOW,
0051 };
0052
0053 char igb_driver_name[] = "igb";
0054 static const char igb_driver_string[] =
0055 "Intel(R) Gigabit Ethernet Network Driver";
0056 static const char igb_copyright[] =
0057 "Copyright (c) 2007-2014 Intel Corporation.";
0058
0059 static const struct e1000_info *igb_info_tbl[] = {
0060 [board_82575] = &e1000_82575_info,
0061 };
0062
0063 static const struct pci_device_id igb_pci_tbl[] = {
0064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
0065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
0066 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
0067 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
0068 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
0069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
0070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
0071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
0072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
0073 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
0074 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
0075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
0076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
0077 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
0078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
0079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
0080 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
0081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
0082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
0083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
0084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
0085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
0086 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
0087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
0088 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
0089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
0090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
0091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
0092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
0093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
0094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
0095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
0096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
0097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
0098 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
0099
0100 {0, }
0101 };
0102
0103 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
0104
0105 static int igb_setup_all_tx_resources(struct igb_adapter *);
0106 static int igb_setup_all_rx_resources(struct igb_adapter *);
0107 static void igb_free_all_tx_resources(struct igb_adapter *);
0108 static void igb_free_all_rx_resources(struct igb_adapter *);
0109 static void igb_setup_mrqc(struct igb_adapter *);
0110 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
0111 static void igb_remove(struct pci_dev *pdev);
0112 static int igb_sw_init(struct igb_adapter *);
0113 int igb_open(struct net_device *);
0114 int igb_close(struct net_device *);
0115 static void igb_configure(struct igb_adapter *);
0116 static void igb_configure_tx(struct igb_adapter *);
0117 static void igb_configure_rx(struct igb_adapter *);
0118 static void igb_clean_all_tx_rings(struct igb_adapter *);
0119 static void igb_clean_all_rx_rings(struct igb_adapter *);
0120 static void igb_clean_tx_ring(struct igb_ring *);
0121 static void igb_clean_rx_ring(struct igb_ring *);
0122 static void igb_set_rx_mode(struct net_device *);
0123 static void igb_update_phy_info(struct timer_list *);
0124 static void igb_watchdog(struct timer_list *);
0125 static void igb_watchdog_task(struct work_struct *);
0126 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
0127 static void igb_get_stats64(struct net_device *dev,
0128 struct rtnl_link_stats64 *stats);
0129 static int igb_change_mtu(struct net_device *, int);
0130 static int igb_set_mac(struct net_device *, void *);
0131 static void igb_set_uta(struct igb_adapter *adapter, bool set);
0132 static irqreturn_t igb_intr(int irq, void *);
0133 static irqreturn_t igb_intr_msi(int irq, void *);
0134 static irqreturn_t igb_msix_other(int irq, void *);
0135 static irqreturn_t igb_msix_ring(int irq, void *);
0136 #ifdef CONFIG_IGB_DCA
0137 static void igb_update_dca(struct igb_q_vector *);
0138 static void igb_setup_dca(struct igb_adapter *);
0139 #endif
0140 static int igb_poll(struct napi_struct *, int);
0141 static bool igb_clean_tx_irq(struct igb_q_vector *, int);
0142 static int igb_clean_rx_irq(struct igb_q_vector *, int);
0143 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
0144 static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
0145 static void igb_reset_task(struct work_struct *);
0146 static void igb_vlan_mode(struct net_device *netdev,
0147 netdev_features_t features);
0148 static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
0149 static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
0150 static void igb_restore_vlan(struct igb_adapter *);
0151 static void igb_rar_set_index(struct igb_adapter *, u32);
0152 static void igb_ping_all_vfs(struct igb_adapter *);
0153 static void igb_msg_task(struct igb_adapter *);
0154 static void igb_vmm_control(struct igb_adapter *);
0155 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
0156 static void igb_flush_mac_table(struct igb_adapter *);
0157 static int igb_available_rars(struct igb_adapter *, u8);
0158 static void igb_set_default_mac_filter(struct igb_adapter *);
0159 static int igb_uc_sync(struct net_device *, const unsigned char *);
0160 static int igb_uc_unsync(struct net_device *, const unsigned char *);
0161 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
0162 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
0163 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
0164 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
0165 static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
0166 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
0167 bool setting);
0168 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
0169 bool setting);
0170 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
0171 struct ifla_vf_info *ivi);
0172 static void igb_check_vf_rate_limit(struct igb_adapter *);
0173 static void igb_nfc_filter_exit(struct igb_adapter *adapter);
0174 static void igb_nfc_filter_restore(struct igb_adapter *adapter);
0175
0176 #ifdef CONFIG_PCI_IOV
0177 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
0178 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
0179 static int igb_disable_sriov(struct pci_dev *dev);
0180 static int igb_pci_disable_sriov(struct pci_dev *dev);
0181 #endif
0182
0183 static int igb_suspend(struct device *);
0184 static int igb_resume(struct device *);
0185 static int igb_runtime_suspend(struct device *dev);
0186 static int igb_runtime_resume(struct device *dev);
0187 static int igb_runtime_idle(struct device *dev);
0188 static const struct dev_pm_ops igb_pm_ops = {
0189 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
0190 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
0191 igb_runtime_idle)
0192 };
0193 static void igb_shutdown(struct pci_dev *);
0194 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
0195 #ifdef CONFIG_IGB_DCA
0196 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
0197 static struct notifier_block dca_notifier = {
0198 .notifier_call = igb_notify_dca,
0199 .next = NULL,
0200 .priority = 0
0201 };
0202 #endif
0203 #ifdef CONFIG_PCI_IOV
0204 static unsigned int max_vfs;
0205 module_param(max_vfs, uint, 0);
0206 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
0207 #endif
0208
0209 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
0210 pci_channel_state_t);
0211 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
0212 static void igb_io_resume(struct pci_dev *);
0213
0214 static const struct pci_error_handlers igb_err_handler = {
0215 .error_detected = igb_io_error_detected,
0216 .slot_reset = igb_io_slot_reset,
0217 .resume = igb_io_resume,
0218 };
0219
0220 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
0221
0222 static struct pci_driver igb_driver = {
0223 .name = igb_driver_name,
0224 .id_table = igb_pci_tbl,
0225 .probe = igb_probe,
0226 .remove = igb_remove,
0227 #ifdef CONFIG_PM
0228 .driver.pm = &igb_pm_ops,
0229 #endif
0230 .shutdown = igb_shutdown,
0231 .sriov_configure = igb_pci_sriov_configure,
0232 .err_handler = &igb_err_handler
0233 };
0234
0235 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
0236 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
0237 MODULE_LICENSE("GPL v2");
0238
0239 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
0240 static int debug = -1;
0241 module_param(debug, int, 0);
0242 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
0243
0244 struct igb_reg_info {
0245 u32 ofs;
0246 char *name;
0247 };
0248
0249 static const struct igb_reg_info igb_reg_info_tbl[] = {
0250
0251
0252 {E1000_CTRL, "CTRL"},
0253 {E1000_STATUS, "STATUS"},
0254 {E1000_CTRL_EXT, "CTRL_EXT"},
0255
0256
0257 {E1000_ICR, "ICR"},
0258
0259
0260 {E1000_RCTL, "RCTL"},
0261 {E1000_RDLEN(0), "RDLEN"},
0262 {E1000_RDH(0), "RDH"},
0263 {E1000_RDT(0), "RDT"},
0264 {E1000_RXDCTL(0), "RXDCTL"},
0265 {E1000_RDBAL(0), "RDBAL"},
0266 {E1000_RDBAH(0), "RDBAH"},
0267
0268
0269 {E1000_TCTL, "TCTL"},
0270 {E1000_TDBAL(0), "TDBAL"},
0271 {E1000_TDBAH(0), "TDBAH"},
0272 {E1000_TDLEN(0), "TDLEN"},
0273 {E1000_TDH(0), "TDH"},
0274 {E1000_TDT(0), "TDT"},
0275 {E1000_TXDCTL(0), "TXDCTL"},
0276 {E1000_TDFH, "TDFH"},
0277 {E1000_TDFT, "TDFT"},
0278 {E1000_TDFHS, "TDFHS"},
0279 {E1000_TDFPC, "TDFPC"},
0280
0281
0282 {}
0283 };
0284
0285
0286 static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
0287 {
0288 int n = 0;
0289 char rname[16];
0290 u32 regs[8];
0291
0292 switch (reginfo->ofs) {
0293 case E1000_RDLEN(0):
0294 for (n = 0; n < 4; n++)
0295 regs[n] = rd32(E1000_RDLEN(n));
0296 break;
0297 case E1000_RDH(0):
0298 for (n = 0; n < 4; n++)
0299 regs[n] = rd32(E1000_RDH(n));
0300 break;
0301 case E1000_RDT(0):
0302 for (n = 0; n < 4; n++)
0303 regs[n] = rd32(E1000_RDT(n));
0304 break;
0305 case E1000_RXDCTL(0):
0306 for (n = 0; n < 4; n++)
0307 regs[n] = rd32(E1000_RXDCTL(n));
0308 break;
0309 case E1000_RDBAL(0):
0310 for (n = 0; n < 4; n++)
0311 regs[n] = rd32(E1000_RDBAL(n));
0312 break;
0313 case E1000_RDBAH(0):
0314 for (n = 0; n < 4; n++)
0315 regs[n] = rd32(E1000_RDBAH(n));
0316 break;
0317 case E1000_TDBAL(0):
0318 for (n = 0; n < 4; n++)
0319 regs[n] = rd32(E1000_TDBAL(n));
0320 break;
0321 case E1000_TDBAH(0):
0322 for (n = 0; n < 4; n++)
0323 regs[n] = rd32(E1000_TDBAH(n));
0324 break;
0325 case E1000_TDLEN(0):
0326 for (n = 0; n < 4; n++)
0327 regs[n] = rd32(E1000_TDLEN(n));
0328 break;
0329 case E1000_TDH(0):
0330 for (n = 0; n < 4; n++)
0331 regs[n] = rd32(E1000_TDH(n));
0332 break;
0333 case E1000_TDT(0):
0334 for (n = 0; n < 4; n++)
0335 regs[n] = rd32(E1000_TDT(n));
0336 break;
0337 case E1000_TXDCTL(0):
0338 for (n = 0; n < 4; n++)
0339 regs[n] = rd32(E1000_TXDCTL(n));
0340 break;
0341 default:
0342 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
0343 return;
0344 }
0345
0346 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
0347 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
0348 regs[2], regs[3]);
0349 }
0350
0351
0352 static void igb_dump(struct igb_adapter *adapter)
0353 {
0354 struct net_device *netdev = adapter->netdev;
0355 struct e1000_hw *hw = &adapter->hw;
0356 struct igb_reg_info *reginfo;
0357 struct igb_ring *tx_ring;
0358 union e1000_adv_tx_desc *tx_desc;
0359 struct my_u0 { __le64 a; __le64 b; } *u0;
0360 struct igb_ring *rx_ring;
0361 union e1000_adv_rx_desc *rx_desc;
0362 u32 staterr;
0363 u16 i, n;
0364
0365 if (!netif_msg_hw(adapter))
0366 return;
0367
0368
0369 if (netdev) {
0370 dev_info(&adapter->pdev->dev, "Net device Info\n");
0371 pr_info("Device Name state trans_start\n");
0372 pr_info("%-15s %016lX %016lX\n", netdev->name,
0373 netdev->state, dev_trans_start(netdev));
0374 }
0375
0376
0377 dev_info(&adapter->pdev->dev, "Register Dump\n");
0378 pr_info(" Register Name Value\n");
0379 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
0380 reginfo->name; reginfo++) {
0381 igb_regdump(hw, reginfo);
0382 }
0383
0384
0385 if (!netdev || !netif_running(netdev))
0386 goto exit;
0387
0388 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
0389 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
0390 for (n = 0; n < adapter->num_tx_queues; n++) {
0391 struct igb_tx_buffer *buffer_info;
0392 tx_ring = adapter->tx_ring[n];
0393 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
0394 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
0395 n, tx_ring->next_to_use, tx_ring->next_to_clean,
0396 (u64)dma_unmap_addr(buffer_info, dma),
0397 dma_unmap_len(buffer_info, len),
0398 buffer_info->next_to_watch,
0399 (u64)buffer_info->time_stamp);
0400 }
0401
0402
0403 if (!netif_msg_tx_done(adapter))
0404 goto rx_ring_summary;
0405
0406 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419 for (n = 0; n < adapter->num_tx_queues; n++) {
0420 tx_ring = adapter->tx_ring[n];
0421 pr_info("------------------------------------\n");
0422 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
0423 pr_info("------------------------------------\n");
0424 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
0425
0426 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
0427 const char *next_desc;
0428 struct igb_tx_buffer *buffer_info;
0429 tx_desc = IGB_TX_DESC(tx_ring, i);
0430 buffer_info = &tx_ring->tx_buffer_info[i];
0431 u0 = (struct my_u0 *)tx_desc;
0432 if (i == tx_ring->next_to_use &&
0433 i == tx_ring->next_to_clean)
0434 next_desc = " NTC/U";
0435 else if (i == tx_ring->next_to_use)
0436 next_desc = " NTU";
0437 else if (i == tx_ring->next_to_clean)
0438 next_desc = " NTC";
0439 else
0440 next_desc = "";
0441
0442 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
0443 i, le64_to_cpu(u0->a),
0444 le64_to_cpu(u0->b),
0445 (u64)dma_unmap_addr(buffer_info, dma),
0446 dma_unmap_len(buffer_info, len),
0447 buffer_info->next_to_watch,
0448 (u64)buffer_info->time_stamp,
0449 buffer_info->skb, next_desc);
0450
0451 if (netif_msg_pktdata(adapter) && buffer_info->skb)
0452 print_hex_dump(KERN_INFO, "",
0453 DUMP_PREFIX_ADDRESS,
0454 16, 1, buffer_info->skb->data,
0455 dma_unmap_len(buffer_info, len),
0456 true);
0457 }
0458 }
0459
0460
0461 rx_ring_summary:
0462 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
0463 pr_info("Queue [NTU] [NTC]\n");
0464 for (n = 0; n < adapter->num_rx_queues; n++) {
0465 rx_ring = adapter->rx_ring[n];
0466 pr_info(" %5d %5X %5X\n",
0467 n, rx_ring->next_to_use, rx_ring->next_to_clean);
0468 }
0469
0470
0471 if (!netif_msg_rx_status(adapter))
0472 goto exit;
0473
0474 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497 for (n = 0; n < adapter->num_rx_queues; n++) {
0498 rx_ring = adapter->rx_ring[n];
0499 pr_info("------------------------------------\n");
0500 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
0501 pr_info("------------------------------------\n");
0502 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
0503 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
0504
0505 for (i = 0; i < rx_ring->count; i++) {
0506 const char *next_desc;
0507 struct igb_rx_buffer *buffer_info;
0508 buffer_info = &rx_ring->rx_buffer_info[i];
0509 rx_desc = IGB_RX_DESC(rx_ring, i);
0510 u0 = (struct my_u0 *)rx_desc;
0511 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
0512
0513 if (i == rx_ring->next_to_use)
0514 next_desc = " NTU";
0515 else if (i == rx_ring->next_to_clean)
0516 next_desc = " NTC";
0517 else
0518 next_desc = "";
0519
0520 if (staterr & E1000_RXD_STAT_DD) {
0521
0522 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
0523 "RWB", i,
0524 le64_to_cpu(u0->a),
0525 le64_to_cpu(u0->b),
0526 next_desc);
0527 } else {
0528 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
0529 "R ", i,
0530 le64_to_cpu(u0->a),
0531 le64_to_cpu(u0->b),
0532 (u64)buffer_info->dma,
0533 next_desc);
0534
0535 if (netif_msg_pktdata(adapter) &&
0536 buffer_info->dma && buffer_info->page) {
0537 print_hex_dump(KERN_INFO, "",
0538 DUMP_PREFIX_ADDRESS,
0539 16, 1,
0540 page_address(buffer_info->page) +
0541 buffer_info->page_offset,
0542 igb_rx_bufsz(rx_ring), true);
0543 }
0544 }
0545 }
0546 }
0547
0548 exit:
0549 return;
0550 }
0551
0552
0553
0554
0555
0556
0557
0558 static int igb_get_i2c_data(void *data)
0559 {
0560 struct igb_adapter *adapter = (struct igb_adapter *)data;
0561 struct e1000_hw *hw = &adapter->hw;
0562 s32 i2cctl = rd32(E1000_I2CPARAMS);
0563
0564 return !!(i2cctl & E1000_I2C_DATA_IN);
0565 }
0566
0567
0568
0569
0570
0571
0572
0573
0574 static void igb_set_i2c_data(void *data, int state)
0575 {
0576 struct igb_adapter *adapter = (struct igb_adapter *)data;
0577 struct e1000_hw *hw = &adapter->hw;
0578 s32 i2cctl = rd32(E1000_I2CPARAMS);
0579
0580 if (state) {
0581 i2cctl |= E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
0582 } else {
0583 i2cctl &= ~E1000_I2C_DATA_OE_N;
0584 i2cctl &= ~E1000_I2C_DATA_OUT;
0585 }
0586
0587 wr32(E1000_I2CPARAMS, i2cctl);
0588 wrfl();
0589 }
0590
0591
0592
0593
0594
0595
0596
0597
0598 static void igb_set_i2c_clk(void *data, int state)
0599 {
0600 struct igb_adapter *adapter = (struct igb_adapter *)data;
0601 struct e1000_hw *hw = &adapter->hw;
0602 s32 i2cctl = rd32(E1000_I2CPARAMS);
0603
0604 if (state) {
0605 i2cctl |= E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N;
0606 } else {
0607 i2cctl &= ~E1000_I2C_CLK_OUT;
0608 i2cctl &= ~E1000_I2C_CLK_OE_N;
0609 }
0610 wr32(E1000_I2CPARAMS, i2cctl);
0611 wrfl();
0612 }
0613
0614
0615
0616
0617
0618
0619
0620 static int igb_get_i2c_clk(void *data)
0621 {
0622 struct igb_adapter *adapter = (struct igb_adapter *)data;
0623 struct e1000_hw *hw = &adapter->hw;
0624 s32 i2cctl = rd32(E1000_I2CPARAMS);
0625
0626 return !!(i2cctl & E1000_I2C_CLK_IN);
0627 }
0628
0629 static const struct i2c_algo_bit_data igb_i2c_algo = {
0630 .setsda = igb_set_i2c_data,
0631 .setscl = igb_set_i2c_clk,
0632 .getsda = igb_get_i2c_data,
0633 .getscl = igb_get_i2c_clk,
0634 .udelay = 5,
0635 .timeout = 20,
0636 };
0637
0638
0639
0640
0641
0642
0643
0644 struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
0645 {
0646 struct igb_adapter *adapter = hw->back;
0647 return adapter->netdev;
0648 }
0649
0650
0651
0652
0653
0654
0655
0656 static int __init igb_init_module(void)
0657 {
0658 int ret;
0659
0660 pr_info("%s\n", igb_driver_string);
0661 pr_info("%s\n", igb_copyright);
0662
0663 #ifdef CONFIG_IGB_DCA
0664 dca_register_notify(&dca_notifier);
0665 #endif
0666 ret = pci_register_driver(&igb_driver);
0667 return ret;
0668 }
0669
0670 module_init(igb_init_module);
0671
0672
0673
0674
0675
0676
0677
0678 static void __exit igb_exit_module(void)
0679 {
0680 #ifdef CONFIG_IGB_DCA
0681 dca_unregister_notify(&dca_notifier);
0682 #endif
0683 pci_unregister_driver(&igb_driver);
0684 }
0685
0686 module_exit(igb_exit_module);
0687
0688 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
0689
0690
0691
0692
0693
0694
0695
0696 static void igb_cache_ring_register(struct igb_adapter *adapter)
0697 {
0698 int i = 0, j = 0;
0699 u32 rbase_offset = adapter->vfs_allocated_count;
0700
0701 switch (adapter->hw.mac.type) {
0702 case e1000_82576:
0703
0704
0705
0706
0707
0708 if (adapter->vfs_allocated_count) {
0709 for (; i < adapter->rss_queues; i++)
0710 adapter->rx_ring[i]->reg_idx = rbase_offset +
0711 Q_IDX_82576(i);
0712 }
0713 fallthrough;
0714 case e1000_82575:
0715 case e1000_82580:
0716 case e1000_i350:
0717 case e1000_i354:
0718 case e1000_i210:
0719 case e1000_i211:
0720 default:
0721 for (; i < adapter->num_rx_queues; i++)
0722 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
0723 for (; j < adapter->num_tx_queues; j++)
0724 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
0725 break;
0726 }
0727 }
0728
0729 u32 igb_rd32(struct e1000_hw *hw, u32 reg)
0730 {
0731 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
0732 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
0733 u32 value = 0;
0734
0735 if (E1000_REMOVED(hw_addr))
0736 return ~value;
0737
0738 value = readl(&hw_addr[reg]);
0739
0740
0741 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
0742 struct net_device *netdev = igb->netdev;
0743 hw->hw_addr = NULL;
0744 netdev_err(netdev, "PCIe link lost\n");
0745 WARN(pci_device_is_present(igb->pdev),
0746 "igb: Failed to read reg 0x%x!\n", reg);
0747 }
0748
0749 return value;
0750 }
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764 static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
0765 int index, int offset)
0766 {
0767 u32 ivar = array_rd32(E1000_IVAR0, index);
0768
0769
0770 ivar &= ~((u32)0xFF << offset);
0771
0772
0773 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
0774
0775 array_wr32(E1000_IVAR0, index, ivar);
0776 }
0777
0778 #define IGB_N0_QUEUE -1
0779 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
0780 {
0781 struct igb_adapter *adapter = q_vector->adapter;
0782 struct e1000_hw *hw = &adapter->hw;
0783 int rx_queue = IGB_N0_QUEUE;
0784 int tx_queue = IGB_N0_QUEUE;
0785 u32 msixbm = 0;
0786
0787 if (q_vector->rx.ring)
0788 rx_queue = q_vector->rx.ring->reg_idx;
0789 if (q_vector->tx.ring)
0790 tx_queue = q_vector->tx.ring->reg_idx;
0791
0792 switch (hw->mac.type) {
0793 case e1000_82575:
0794
0795
0796
0797
0798
0799 if (rx_queue > IGB_N0_QUEUE)
0800 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
0801 if (tx_queue > IGB_N0_QUEUE)
0802 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
0803 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
0804 msixbm |= E1000_EIMS_OTHER;
0805 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
0806 q_vector->eims_value = msixbm;
0807 break;
0808 case e1000_82576:
0809
0810
0811
0812
0813
0814 if (rx_queue > IGB_N0_QUEUE)
0815 igb_write_ivar(hw, msix_vector,
0816 rx_queue & 0x7,
0817 (rx_queue & 0x8) << 1);
0818 if (tx_queue > IGB_N0_QUEUE)
0819 igb_write_ivar(hw, msix_vector,
0820 tx_queue & 0x7,
0821 ((tx_queue & 0x8) << 1) + 8);
0822 q_vector->eims_value = BIT(msix_vector);
0823 break;
0824 case e1000_82580:
0825 case e1000_i350:
0826 case e1000_i354:
0827 case e1000_i210:
0828 case e1000_i211:
0829
0830
0831
0832
0833
0834
0835 if (rx_queue > IGB_N0_QUEUE)
0836 igb_write_ivar(hw, msix_vector,
0837 rx_queue >> 1,
0838 (rx_queue & 0x1) << 4);
0839 if (tx_queue > IGB_N0_QUEUE)
0840 igb_write_ivar(hw, msix_vector,
0841 tx_queue >> 1,
0842 ((tx_queue & 0x1) << 4) + 8);
0843 q_vector->eims_value = BIT(msix_vector);
0844 break;
0845 default:
0846 BUG();
0847 break;
0848 }
0849
0850
0851 adapter->eims_enable_mask |= q_vector->eims_value;
0852
0853
0854 q_vector->set_itr = 1;
0855 }
0856
0857
0858
0859
0860
0861
0862
0863
0864 static void igb_configure_msix(struct igb_adapter *adapter)
0865 {
0866 u32 tmp;
0867 int i, vector = 0;
0868 struct e1000_hw *hw = &adapter->hw;
0869
0870 adapter->eims_enable_mask = 0;
0871
0872
0873 switch (hw->mac.type) {
0874 case e1000_82575:
0875 tmp = rd32(E1000_CTRL_EXT);
0876
0877 tmp |= E1000_CTRL_EXT_PBA_CLR;
0878
0879
0880 tmp |= E1000_CTRL_EXT_EIAME;
0881 tmp |= E1000_CTRL_EXT_IRCA;
0882
0883 wr32(E1000_CTRL_EXT, tmp);
0884
0885
0886 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
0887 adapter->eims_other = E1000_EIMS_OTHER;
0888
0889 break;
0890
0891 case e1000_82576:
0892 case e1000_82580:
0893 case e1000_i350:
0894 case e1000_i354:
0895 case e1000_i210:
0896 case e1000_i211:
0897
0898
0899
0900 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
0901 E1000_GPIE_PBA | E1000_GPIE_EIAME |
0902 E1000_GPIE_NSICR);
0903
0904
0905 adapter->eims_other = BIT(vector);
0906 tmp = (vector++ | E1000_IVAR_VALID) << 8;
0907
0908 wr32(E1000_IVAR_MISC, tmp);
0909 break;
0910 default:
0911
0912 break;
0913 }
0914
0915 adapter->eims_enable_mask |= adapter->eims_other;
0916
0917 for (i = 0; i < adapter->num_q_vectors; i++)
0918 igb_assign_vector(adapter->q_vector[i], vector++);
0919
0920 wrfl();
0921 }
0922
0923
0924
0925
0926
0927
0928
0929
0930 static int igb_request_msix(struct igb_adapter *adapter)
0931 {
0932 unsigned int num_q_vectors = adapter->num_q_vectors;
0933 struct net_device *netdev = adapter->netdev;
0934 int i, err = 0, vector = 0, free_vector = 0;
0935
0936 err = request_irq(adapter->msix_entries[vector].vector,
0937 igb_msix_other, 0, netdev->name, adapter);
0938 if (err)
0939 goto err_out;
0940
0941 if (num_q_vectors > MAX_Q_VECTORS) {
0942 num_q_vectors = MAX_Q_VECTORS;
0943 dev_warn(&adapter->pdev->dev,
0944 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
0945 adapter->num_q_vectors, MAX_Q_VECTORS);
0946 }
0947 for (i = 0; i < num_q_vectors; i++) {
0948 struct igb_q_vector *q_vector = adapter->q_vector[i];
0949
0950 vector++;
0951
0952 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
0953
0954 if (q_vector->rx.ring && q_vector->tx.ring)
0955 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
0956 q_vector->rx.ring->queue_index);
0957 else if (q_vector->tx.ring)
0958 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
0959 q_vector->tx.ring->queue_index);
0960 else if (q_vector->rx.ring)
0961 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
0962 q_vector->rx.ring->queue_index);
0963 else
0964 sprintf(q_vector->name, "%s-unused", netdev->name);
0965
0966 err = request_irq(adapter->msix_entries[vector].vector,
0967 igb_msix_ring, 0, q_vector->name,
0968 q_vector);
0969 if (err)
0970 goto err_free;
0971 }
0972
0973 igb_configure_msix(adapter);
0974 return 0;
0975
0976 err_free:
0977
0978 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
0979
0980 vector--;
0981 for (i = 0; i < vector; i++) {
0982 free_irq(adapter->msix_entries[free_vector++].vector,
0983 adapter->q_vector[i]);
0984 }
0985 err_out:
0986 return err;
0987 }
0988
0989
0990
0991
0992
0993
0994
0995
0996 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
0997 {
0998 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
0999
1000 adapter->q_vector[v_idx] = NULL;
1001
1002
1003
1004
1005 if (q_vector)
1006 kfree_rcu(q_vector, rcu);
1007 }
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1018 {
1019 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1020
1021
1022
1023
1024 if (!q_vector)
1025 return;
1026
1027 if (q_vector->tx.ring)
1028 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1029
1030 if (q_vector->rx.ring)
1031 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1032
1033 netif_napi_del(&q_vector->napi);
1034
1035 }
1036
1037 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1038 {
1039 int v_idx = adapter->num_q_vectors;
1040
1041 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1042 pci_disable_msix(adapter->pdev);
1043 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1044 pci_disable_msi(adapter->pdev);
1045
1046 while (v_idx--)
1047 igb_reset_q_vector(adapter, v_idx);
1048 }
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058 static void igb_free_q_vectors(struct igb_adapter *adapter)
1059 {
1060 int v_idx = adapter->num_q_vectors;
1061
1062 adapter->num_tx_queues = 0;
1063 adapter->num_rx_queues = 0;
1064 adapter->num_q_vectors = 0;
1065
1066 while (v_idx--) {
1067 igb_reset_q_vector(adapter, v_idx);
1068 igb_free_q_vector(adapter, v_idx);
1069 }
1070 }
1071
1072
1073
1074
1075
1076
1077
1078
1079 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1080 {
1081 igb_free_q_vectors(adapter);
1082 igb_reset_interrupt_capability(adapter);
1083 }
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093 static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1094 {
1095 int err;
1096 int numvecs, i;
1097
1098 if (!msix)
1099 goto msi_only;
1100 adapter->flags |= IGB_FLAG_HAS_MSIX;
1101
1102
1103 adapter->num_rx_queues = adapter->rss_queues;
1104 if (adapter->vfs_allocated_count)
1105 adapter->num_tx_queues = 1;
1106 else
1107 adapter->num_tx_queues = adapter->rss_queues;
1108
1109
1110 numvecs = adapter->num_rx_queues;
1111
1112
1113 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1114 numvecs += adapter->num_tx_queues;
1115
1116
1117 adapter->num_q_vectors = numvecs;
1118
1119
1120 numvecs++;
1121 for (i = 0; i < numvecs; i++)
1122 adapter->msix_entries[i].entry = i;
1123
1124 err = pci_enable_msix_range(adapter->pdev,
1125 adapter->msix_entries,
1126 numvecs,
1127 numvecs);
1128 if (err > 0)
1129 return;
1130
1131 igb_reset_interrupt_capability(adapter);
1132
1133
1134 msi_only:
1135 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1136 #ifdef CONFIG_PCI_IOV
1137
1138 if (adapter->vf_data) {
1139 struct e1000_hw *hw = &adapter->hw;
1140
1141 pci_disable_sriov(adapter->pdev);
1142 msleep(500);
1143
1144 kfree(adapter->vf_mac_list);
1145 adapter->vf_mac_list = NULL;
1146 kfree(adapter->vf_data);
1147 adapter->vf_data = NULL;
1148 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1149 wrfl();
1150 msleep(100);
1151 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1152 }
1153 #endif
1154 adapter->vfs_allocated_count = 0;
1155 adapter->rss_queues = 1;
1156 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1157 adapter->num_rx_queues = 1;
1158 adapter->num_tx_queues = 1;
1159 adapter->num_q_vectors = 1;
1160 if (!pci_enable_msi(adapter->pdev))
1161 adapter->flags |= IGB_FLAG_HAS_MSI;
1162 }
1163
1164 static void igb_add_ring(struct igb_ring *ring,
1165 struct igb_ring_container *head)
1166 {
1167 head->ring = ring;
1168 head->count++;
1169 }
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 static int igb_alloc_q_vector(struct igb_adapter *adapter,
1184 int v_count, int v_idx,
1185 int txr_count, int txr_idx,
1186 int rxr_count, int rxr_idx)
1187 {
1188 struct igb_q_vector *q_vector;
1189 struct igb_ring *ring;
1190 int ring_count;
1191 size_t size;
1192
1193
1194 if (txr_count > 1 || rxr_count > 1)
1195 return -ENOMEM;
1196
1197 ring_count = txr_count + rxr_count;
1198 size = struct_size(q_vector, ring, ring_count);
1199
1200
1201 q_vector = adapter->q_vector[v_idx];
1202 if (!q_vector) {
1203 q_vector = kzalloc(size, GFP_KERNEL);
1204 } else if (size > ksize(q_vector)) {
1205 kfree_rcu(q_vector, rcu);
1206 q_vector = kzalloc(size, GFP_KERNEL);
1207 } else {
1208 memset(q_vector, 0, size);
1209 }
1210 if (!q_vector)
1211 return -ENOMEM;
1212
1213
1214 netif_napi_add(adapter->netdev, &q_vector->napi,
1215 igb_poll, 64);
1216
1217
1218 adapter->q_vector[v_idx] = q_vector;
1219 q_vector->adapter = adapter;
1220
1221
1222 q_vector->tx.work_limit = adapter->tx_work_limit;
1223
1224
1225 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1226 q_vector->itr_val = IGB_START_ITR;
1227
1228
1229 ring = q_vector->ring;
1230
1231
1232 if (rxr_count) {
1233
1234 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1235 q_vector->itr_val = adapter->rx_itr_setting;
1236 } else {
1237
1238 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1239 q_vector->itr_val = adapter->tx_itr_setting;
1240 }
1241
1242 if (txr_count) {
1243
1244 ring->dev = &adapter->pdev->dev;
1245 ring->netdev = adapter->netdev;
1246
1247
1248 ring->q_vector = q_vector;
1249
1250
1251 igb_add_ring(ring, &q_vector->tx);
1252
1253
1254 if (adapter->hw.mac.type == e1000_82575)
1255 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1256
1257
1258 ring->count = adapter->tx_ring_count;
1259 ring->queue_index = txr_idx;
1260
1261 ring->cbs_enable = false;
1262 ring->idleslope = 0;
1263 ring->sendslope = 0;
1264 ring->hicredit = 0;
1265 ring->locredit = 0;
1266
1267 u64_stats_init(&ring->tx_syncp);
1268 u64_stats_init(&ring->tx_syncp2);
1269
1270
1271 adapter->tx_ring[txr_idx] = ring;
1272
1273
1274 ring++;
1275 }
1276
1277 if (rxr_count) {
1278
1279 ring->dev = &adapter->pdev->dev;
1280 ring->netdev = adapter->netdev;
1281
1282
1283 ring->q_vector = q_vector;
1284
1285
1286 igb_add_ring(ring, &q_vector->rx);
1287
1288
1289 if (adapter->hw.mac.type >= e1000_82576)
1290 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1291
1292
1293
1294
1295 if (adapter->hw.mac.type >= e1000_i350)
1296 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1297
1298
1299 ring->count = adapter->rx_ring_count;
1300 ring->queue_index = rxr_idx;
1301
1302 u64_stats_init(&ring->rx_syncp);
1303
1304
1305 adapter->rx_ring[rxr_idx] = ring;
1306 }
1307
1308 return 0;
1309 }
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1320 {
1321 int q_vectors = adapter->num_q_vectors;
1322 int rxr_remaining = adapter->num_rx_queues;
1323 int txr_remaining = adapter->num_tx_queues;
1324 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1325 int err;
1326
1327 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1328 for (; rxr_remaining; v_idx++) {
1329 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1330 0, 0, 1, rxr_idx);
1331
1332 if (err)
1333 goto err_out;
1334
1335
1336 rxr_remaining--;
1337 rxr_idx++;
1338 }
1339 }
1340
1341 for (; v_idx < q_vectors; v_idx++) {
1342 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1343 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1344
1345 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1346 tqpv, txr_idx, rqpv, rxr_idx);
1347
1348 if (err)
1349 goto err_out;
1350
1351
1352 rxr_remaining -= rqpv;
1353 txr_remaining -= tqpv;
1354 rxr_idx++;
1355 txr_idx++;
1356 }
1357
1358 return 0;
1359
1360 err_out:
1361 adapter->num_tx_queues = 0;
1362 adapter->num_rx_queues = 0;
1363 adapter->num_q_vectors = 0;
1364
1365 while (v_idx--)
1366 igb_free_q_vector(adapter, v_idx);
1367
1368 return -ENOMEM;
1369 }
1370
1371
1372
1373
1374
1375
1376
1377
1378 static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1379 {
1380 struct pci_dev *pdev = adapter->pdev;
1381 int err;
1382
1383 igb_set_interrupt_capability(adapter, msix);
1384
1385 err = igb_alloc_q_vectors(adapter);
1386 if (err) {
1387 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1388 goto err_alloc_q_vectors;
1389 }
1390
1391 igb_cache_ring_register(adapter);
1392
1393 return 0;
1394
1395 err_alloc_q_vectors:
1396 igb_reset_interrupt_capability(adapter);
1397 return err;
1398 }
1399
1400
1401
1402
1403
1404
1405
1406
1407 static int igb_request_irq(struct igb_adapter *adapter)
1408 {
1409 struct net_device *netdev = adapter->netdev;
1410 struct pci_dev *pdev = adapter->pdev;
1411 int err = 0;
1412
1413 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1414 err = igb_request_msix(adapter);
1415 if (!err)
1416 goto request_done;
1417
1418 igb_free_all_tx_resources(adapter);
1419 igb_free_all_rx_resources(adapter);
1420
1421 igb_clear_interrupt_scheme(adapter);
1422 err = igb_init_interrupt_scheme(adapter, false);
1423 if (err)
1424 goto request_done;
1425
1426 igb_setup_all_tx_resources(adapter);
1427 igb_setup_all_rx_resources(adapter);
1428 igb_configure(adapter);
1429 }
1430
1431 igb_assign_vector(adapter->q_vector[0], 0);
1432
1433 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1434 err = request_irq(pdev->irq, igb_intr_msi, 0,
1435 netdev->name, adapter);
1436 if (!err)
1437 goto request_done;
1438
1439
1440 igb_reset_interrupt_capability(adapter);
1441 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1442 }
1443
1444 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1445 netdev->name, adapter);
1446
1447 if (err)
1448 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1449 err);
1450
1451 request_done:
1452 return err;
1453 }
1454
1455 static void igb_free_irq(struct igb_adapter *adapter)
1456 {
1457 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1458 int vector = 0, i;
1459
1460 free_irq(adapter->msix_entries[vector++].vector, adapter);
1461
1462 for (i = 0; i < adapter->num_q_vectors; i++)
1463 free_irq(adapter->msix_entries[vector++].vector,
1464 adapter->q_vector[i]);
1465 } else {
1466 free_irq(adapter->pdev->irq, adapter);
1467 }
1468 }
1469
1470
1471
1472
1473
1474 static void igb_irq_disable(struct igb_adapter *adapter)
1475 {
1476 struct e1000_hw *hw = &adapter->hw;
1477
1478
1479
1480
1481
1482 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1483 u32 regval = rd32(E1000_EIAM);
1484
1485 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1486 wr32(E1000_EIMC, adapter->eims_enable_mask);
1487 regval = rd32(E1000_EIAC);
1488 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1489 }
1490
1491 wr32(E1000_IAM, 0);
1492 wr32(E1000_IMC, ~0);
1493 wrfl();
1494 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1495 int i;
1496
1497 for (i = 0; i < adapter->num_q_vectors; i++)
1498 synchronize_irq(adapter->msix_entries[i].vector);
1499 } else {
1500 synchronize_irq(adapter->pdev->irq);
1501 }
1502 }
1503
1504
1505
1506
1507
1508 static void igb_irq_enable(struct igb_adapter *adapter)
1509 {
1510 struct e1000_hw *hw = &adapter->hw;
1511
1512 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1513 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1514 u32 regval = rd32(E1000_EIAC);
1515
1516 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1517 regval = rd32(E1000_EIAM);
1518 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1519 wr32(E1000_EIMS, adapter->eims_enable_mask);
1520 if (adapter->vfs_allocated_count) {
1521 wr32(E1000_MBVFIMR, 0xFF);
1522 ims |= E1000_IMS_VMMB;
1523 }
1524 wr32(E1000_IMS, ims);
1525 } else {
1526 wr32(E1000_IMS, IMS_ENABLE_MASK |
1527 E1000_IMS_DRSTA);
1528 wr32(E1000_IAM, IMS_ENABLE_MASK |
1529 E1000_IMS_DRSTA);
1530 }
1531 }
1532
1533 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1534 {
1535 struct e1000_hw *hw = &adapter->hw;
1536 u16 pf_id = adapter->vfs_allocated_count;
1537 u16 vid = adapter->hw.mng_cookie.vlan_id;
1538 u16 old_vid = adapter->mng_vlan_id;
1539
1540 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1541
1542 igb_vfta_set(hw, vid, pf_id, true, true);
1543 adapter->mng_vlan_id = vid;
1544 } else {
1545 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1546 }
1547
1548 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1549 (vid != old_vid) &&
1550 !test_bit(old_vid, adapter->active_vlans)) {
1551
1552 igb_vfta_set(hw, vid, pf_id, false, true);
1553 }
1554 }
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 static void igb_release_hw_control(struct igb_adapter *adapter)
1565 {
1566 struct e1000_hw *hw = &adapter->hw;
1567 u32 ctrl_ext;
1568
1569
1570 ctrl_ext = rd32(E1000_CTRL_EXT);
1571 wr32(E1000_CTRL_EXT,
1572 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1573 }
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583 static void igb_get_hw_control(struct igb_adapter *adapter)
1584 {
1585 struct e1000_hw *hw = &adapter->hw;
1586 u32 ctrl_ext;
1587
1588
1589 ctrl_ext = rd32(E1000_CTRL_EXT);
1590 wr32(E1000_CTRL_EXT,
1591 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1592 }
1593
1594 static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1595 {
1596 struct net_device *netdev = adapter->netdev;
1597 struct e1000_hw *hw = &adapter->hw;
1598
1599 WARN_ON(hw->mac.type != e1000_i210);
1600
1601 if (enable)
1602 adapter->flags |= IGB_FLAG_FQTSS;
1603 else
1604 adapter->flags &= ~IGB_FLAG_FQTSS;
1605
1606 if (netif_running(netdev))
1607 schedule_work(&adapter->reset_task);
1608 }
1609
1610 static bool is_fqtss_enabled(struct igb_adapter *adapter)
1611 {
1612 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1613 }
1614
1615 static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1616 enum tx_queue_prio prio)
1617 {
1618 u32 val;
1619
1620 WARN_ON(hw->mac.type != e1000_i210);
1621 WARN_ON(queue < 0 || queue > 4);
1622
1623 val = rd32(E1000_I210_TXDCTL(queue));
1624
1625 if (prio == TX_QUEUE_PRIO_HIGH)
1626 val |= E1000_TXDCTL_PRIORITY;
1627 else
1628 val &= ~E1000_TXDCTL_PRIORITY;
1629
1630 wr32(E1000_I210_TXDCTL(queue), val);
1631 }
1632
1633 static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1634 {
1635 u32 val;
1636
1637 WARN_ON(hw->mac.type != e1000_i210);
1638 WARN_ON(queue < 0 || queue > 1);
1639
1640 val = rd32(E1000_I210_TQAVCC(queue));
1641
1642 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1643 val |= E1000_TQAVCC_QUEUEMODE;
1644 else
1645 val &= ~E1000_TQAVCC_QUEUEMODE;
1646
1647 wr32(E1000_I210_TQAVCC(queue), val);
1648 }
1649
1650 static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1651 {
1652 int i;
1653
1654 for (i = 0; i < adapter->num_tx_queues; i++) {
1655 if (adapter->tx_ring[i]->cbs_enable)
1656 return true;
1657 }
1658
1659 return false;
1660 }
1661
1662 static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1663 {
1664 int i;
1665
1666 for (i = 0; i < adapter->num_tx_queues; i++) {
1667 if (adapter->tx_ring[i]->launchtime_enable)
1668 return true;
1669 }
1670
1671 return false;
1672 }
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684 static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1685 {
1686 struct net_device *netdev = adapter->netdev;
1687 struct e1000_hw *hw = &adapter->hw;
1688 struct igb_ring *ring;
1689 u32 tqavcc, tqavctrl;
1690 u16 value;
1691
1692 WARN_ON(hw->mac.type != e1000_i210);
1693 WARN_ON(queue < 0 || queue > 1);
1694 ring = adapter->tx_ring[queue];
1695
1696
1697
1698
1699
1700 if (ring->cbs_enable || ring->launchtime_enable) {
1701 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1702 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1703 } else {
1704 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1705 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1706 }
1707
1708
1709 if (ring->cbs_enable || queue == 0) {
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719 if (queue == 0 && !ring->cbs_enable) {
1720
1721 ring->idleslope = 1000000;
1722 ring->hicredit = ETH_FRAME_LEN;
1723 }
1724
1725
1726
1727
1728
1729 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1730 tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1731 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1791
1792 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1793 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1794 tqavcc |= value;
1795 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1796
1797 wr32(E1000_I210_TQAVHC(queue),
1798 0x80000000 + ring->hicredit * 0x7735);
1799 } else {
1800
1801
1802 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1803 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1804 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1805
1806
1807 wr32(E1000_I210_TQAVHC(queue), 0);
1808
1809
1810
1811
1812
1813 if (!is_any_cbs_enabled(adapter)) {
1814 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1815 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1816 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1817 }
1818 }
1819
1820
1821 if (ring->launchtime_enable) {
1822
1823
1824
1825
1826
1827
1828
1829
1830 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1831 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1832 E1000_TQAVCTRL_FETCHTIME_DELTA;
1833 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1834 } else {
1835
1836
1837
1838
1839 if (!is_any_txtime_enabled(adapter)) {
1840 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1841 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1842 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1843 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1844 }
1845 }
1846
1847
1848
1849
1850
1851
1852 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1853 ring->cbs_enable ? "enabled" : "disabled",
1854 ring->launchtime_enable ? "enabled" : "disabled",
1855 queue,
1856 ring->idleslope, ring->sendslope,
1857 ring->hicredit, ring->locredit);
1858 }
1859
1860 static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1861 bool enable)
1862 {
1863 struct igb_ring *ring;
1864
1865 if (queue < 0 || queue > adapter->num_tx_queues)
1866 return -EINVAL;
1867
1868 ring = adapter->tx_ring[queue];
1869 ring->launchtime_enable = enable;
1870
1871 return 0;
1872 }
1873
1874 static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1875 bool enable, int idleslope, int sendslope,
1876 int hicredit, int locredit)
1877 {
1878 struct igb_ring *ring;
1879
1880 if (queue < 0 || queue > adapter->num_tx_queues)
1881 return -EINVAL;
1882
1883 ring = adapter->tx_ring[queue];
1884
1885 ring->cbs_enable = enable;
1886 ring->idleslope = idleslope;
1887 ring->sendslope = sendslope;
1888 ring->hicredit = hicredit;
1889 ring->locredit = locredit;
1890
1891 return 0;
1892 }
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903 static void igb_setup_tx_mode(struct igb_adapter *adapter)
1904 {
1905 struct net_device *netdev = adapter->netdev;
1906 struct e1000_hw *hw = &adapter->hw;
1907 u32 val;
1908
1909
1910 if (hw->mac.type != e1000_i210)
1911 return;
1912
1913 if (is_fqtss_enabled(adapter)) {
1914 int i, max_queue;
1915
1916
1917
1918
1919
1920 val = rd32(E1000_I210_TQAVCTRL);
1921 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1922 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1923 wr32(E1000_I210_TQAVCTRL, val);
1924
1925
1926
1927
1928 val = rd32(E1000_TXPBS);
1929 val &= ~I210_TXPBSIZE_MASK;
1930 val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB |
1931 I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB;
1932 wr32(E1000_TXPBS, val);
1933
1934 val = rd32(E1000_RXPBS);
1935 val &= ~I210_RXPBSIZE_MASK;
1936 val |= I210_RXPBSIZE_PB_30KB;
1937 wr32(E1000_RXPBS, val);
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950 val = (4096 - 1) / 64;
1951 wr32(E1000_I210_DTXMXPKTSZ, val);
1952
1953
1954
1955
1956
1957
1958 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1959 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1960
1961 for (i = 0; i < max_queue; i++) {
1962 igb_config_tx_modes(adapter, i);
1963 }
1964 } else {
1965 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1966 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1967 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1968
1969 val = rd32(E1000_I210_TQAVCTRL);
1970
1971
1972
1973
1974 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1975 wr32(E1000_I210_TQAVCTRL, val);
1976 }
1977
1978 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1979 "enabled" : "disabled");
1980 }
1981
1982
1983
1984
1985
1986 static void igb_configure(struct igb_adapter *adapter)
1987 {
1988 struct net_device *netdev = adapter->netdev;
1989 int i;
1990
1991 igb_get_hw_control(adapter);
1992 igb_set_rx_mode(netdev);
1993 igb_setup_tx_mode(adapter);
1994
1995 igb_restore_vlan(adapter);
1996
1997 igb_setup_tctl(adapter);
1998 igb_setup_mrqc(adapter);
1999 igb_setup_rctl(adapter);
2000
2001 igb_nfc_filter_restore(adapter);
2002 igb_configure_tx(adapter);
2003 igb_configure_rx(adapter);
2004
2005 igb_rx_fifo_flush_82575(&adapter->hw);
2006
2007
2008
2009
2010
2011 for (i = 0; i < adapter->num_rx_queues; i++) {
2012 struct igb_ring *ring = adapter->rx_ring[i];
2013 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
2014 }
2015 }
2016
2017
2018
2019
2020
2021 void igb_power_up_link(struct igb_adapter *adapter)
2022 {
2023 igb_reset_phy(&adapter->hw);
2024
2025 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2026 igb_power_up_phy_copper(&adapter->hw);
2027 else
2028 igb_power_up_serdes_link_82575(&adapter->hw);
2029
2030 igb_setup_link(&adapter->hw);
2031 }
2032
2033
2034
2035
2036
2037 static void igb_power_down_link(struct igb_adapter *adapter)
2038 {
2039 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2040 igb_power_down_phy_copper_82575(&adapter->hw);
2041 else
2042 igb_shutdown_serdes_link_82575(&adapter->hw);
2043 }
2044
2045
2046
2047
2048
2049 static void igb_check_swap_media(struct igb_adapter *adapter)
2050 {
2051 struct e1000_hw *hw = &adapter->hw;
2052 u32 ctrl_ext, connsw;
2053 bool swap_now = false;
2054
2055 ctrl_ext = rd32(E1000_CTRL_EXT);
2056 connsw = rd32(E1000_CONNSW);
2057
2058
2059
2060
2061
2062 if ((hw->phy.media_type == e1000_media_type_copper) &&
2063 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2064 swap_now = true;
2065 } else if ((hw->phy.media_type != e1000_media_type_copper) &&
2066 !(connsw & E1000_CONNSW_SERDESD)) {
2067
2068 if (adapter->copper_tries < 4) {
2069 adapter->copper_tries++;
2070 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2071 wr32(E1000_CONNSW, connsw);
2072 return;
2073 } else {
2074 adapter->copper_tries = 0;
2075 if ((connsw & E1000_CONNSW_PHYSD) &&
2076 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2077 swap_now = true;
2078 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2079 wr32(E1000_CONNSW, connsw);
2080 }
2081 }
2082 }
2083
2084 if (!swap_now)
2085 return;
2086
2087 switch (hw->phy.media_type) {
2088 case e1000_media_type_copper:
2089 netdev_info(adapter->netdev,
2090 "MAS: changing media to fiber/serdes\n");
2091 ctrl_ext |=
2092 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2093 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2094 adapter->copper_tries = 0;
2095 break;
2096 case e1000_media_type_internal_serdes:
2097 case e1000_media_type_fiber:
2098 netdev_info(adapter->netdev,
2099 "MAS: changing media to copper\n");
2100 ctrl_ext &=
2101 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2102 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2103 break;
2104 default:
2105
2106 netdev_err(adapter->netdev,
2107 "AMS: Invalid media type found, returning\n");
2108 break;
2109 }
2110 wr32(E1000_CTRL_EXT, ctrl_ext);
2111 }
2112
2113
2114
2115
2116
2117 int igb_up(struct igb_adapter *adapter)
2118 {
2119 struct e1000_hw *hw = &adapter->hw;
2120 int i;
2121
2122
2123 igb_configure(adapter);
2124
2125 clear_bit(__IGB_DOWN, &adapter->state);
2126
2127 for (i = 0; i < adapter->num_q_vectors; i++)
2128 napi_enable(&(adapter->q_vector[i]->napi));
2129
2130 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2131 igb_configure_msix(adapter);
2132 else
2133 igb_assign_vector(adapter->q_vector[0], 0);
2134
2135
2136 rd32(E1000_TSICR);
2137 rd32(E1000_ICR);
2138 igb_irq_enable(adapter);
2139
2140
2141 if (adapter->vfs_allocated_count) {
2142 u32 reg_data = rd32(E1000_CTRL_EXT);
2143
2144 reg_data |= E1000_CTRL_EXT_PFRSTD;
2145 wr32(E1000_CTRL_EXT, reg_data);
2146 }
2147
2148 netif_tx_start_all_queues(adapter->netdev);
2149
2150
2151 hw->mac.get_link_status = 1;
2152 schedule_work(&adapter->watchdog_task);
2153
2154 if ((adapter->flags & IGB_FLAG_EEE) &&
2155 (!hw->dev_spec._82575.eee_disable))
2156 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2157
2158 return 0;
2159 }
2160
2161 void igb_down(struct igb_adapter *adapter)
2162 {
2163 struct net_device *netdev = adapter->netdev;
2164 struct e1000_hw *hw = &adapter->hw;
2165 u32 tctl, rctl;
2166 int i;
2167
2168
2169
2170
2171 set_bit(__IGB_DOWN, &adapter->state);
2172
2173
2174 rctl = rd32(E1000_RCTL);
2175 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2176
2177
2178 igb_nfc_filter_exit(adapter);
2179
2180 netif_carrier_off(netdev);
2181 netif_tx_stop_all_queues(netdev);
2182
2183
2184 tctl = rd32(E1000_TCTL);
2185 tctl &= ~E1000_TCTL_EN;
2186 wr32(E1000_TCTL, tctl);
2187
2188 wrfl();
2189 usleep_range(10000, 11000);
2190
2191 igb_irq_disable(adapter);
2192
2193 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2194
2195 for (i = 0; i < adapter->num_q_vectors; i++) {
2196 if (adapter->q_vector[i]) {
2197 napi_synchronize(&adapter->q_vector[i]->napi);
2198 napi_disable(&adapter->q_vector[i]->napi);
2199 }
2200 }
2201
2202 del_timer_sync(&adapter->watchdog_timer);
2203 del_timer_sync(&adapter->phy_info_timer);
2204
2205
2206 spin_lock(&adapter->stats64_lock);
2207 igb_update_stats(adapter);
2208 spin_unlock(&adapter->stats64_lock);
2209
2210 adapter->link_speed = 0;
2211 adapter->link_duplex = 0;
2212
2213 if (!pci_channel_offline(adapter->pdev))
2214 igb_reset(adapter);
2215
2216
2217 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2218
2219 igb_clean_all_tx_rings(adapter);
2220 igb_clean_all_rx_rings(adapter);
2221 #ifdef CONFIG_IGB_DCA
2222
2223
2224 igb_setup_dca(adapter);
2225 #endif
2226 }
2227
2228 void igb_reinit_locked(struct igb_adapter *adapter)
2229 {
2230 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2231 usleep_range(1000, 2000);
2232 igb_down(adapter);
2233 igb_up(adapter);
2234 clear_bit(__IGB_RESETTING, &adapter->state);
2235 }
2236
2237
2238
2239
2240
2241 static void igb_enable_mas(struct igb_adapter *adapter)
2242 {
2243 struct e1000_hw *hw = &adapter->hw;
2244 u32 connsw = rd32(E1000_CONNSW);
2245
2246
2247 if ((hw->phy.media_type == e1000_media_type_copper) &&
2248 (!(connsw & E1000_CONNSW_SERDESD))) {
2249 connsw |= E1000_CONNSW_ENRGSRC;
2250 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2251 wr32(E1000_CONNSW, connsw);
2252 wrfl();
2253 }
2254 }
2255
2256 void igb_reset(struct igb_adapter *adapter)
2257 {
2258 struct pci_dev *pdev = adapter->pdev;
2259 struct e1000_hw *hw = &adapter->hw;
2260 struct e1000_mac_info *mac = &hw->mac;
2261 struct e1000_fc_info *fc = &hw->fc;
2262 u32 pba, hwm;
2263
2264
2265
2266
2267 switch (mac->type) {
2268 case e1000_i350:
2269 case e1000_i354:
2270 case e1000_82580:
2271 pba = rd32(E1000_RXPBS);
2272 pba = igb_rxpbs_adjust_82580(pba);
2273 break;
2274 case e1000_82576:
2275 pba = rd32(E1000_RXPBS);
2276 pba &= E1000_RXPBS_SIZE_MASK_82576;
2277 break;
2278 case e1000_82575:
2279 case e1000_i210:
2280 case e1000_i211:
2281 default:
2282 pba = E1000_PBA_34K;
2283 break;
2284 }
2285
2286 if (mac->type == e1000_82575) {
2287 u32 min_rx_space, min_tx_space, needed_tx_space;
2288
2289
2290 wr32(E1000_PBA, pba);
2291
2292
2293
2294
2295
2296
2297
2298
2299 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2300
2301
2302
2303
2304
2305
2306 min_tx_space = adapter->max_frame_size;
2307 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2308 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2309
2310
2311 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2312
2313
2314
2315
2316
2317 if (needed_tx_space < pba) {
2318 pba -= needed_tx_space;
2319
2320
2321
2322
2323 if (pba < min_rx_space)
2324 pba = min_rx_space;
2325 }
2326
2327
2328 wr32(E1000_PBA, pba);
2329 }
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2340
2341 fc->high_water = hwm & 0xFFFFFFF0;
2342 fc->low_water = fc->high_water - 16;
2343 fc->pause_time = 0xFFFF;
2344 fc->send_xon = 1;
2345 fc->current_mode = fc->requested_mode;
2346
2347
2348 if (adapter->vfs_allocated_count) {
2349 int i;
2350
2351 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2352 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2353
2354
2355 igb_ping_all_vfs(adapter);
2356
2357
2358 wr32(E1000_VFRE, 0);
2359 wr32(E1000_VFTE, 0);
2360 }
2361
2362
2363 hw->mac.ops.reset_hw(hw);
2364 wr32(E1000_WUC, 0);
2365
2366 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2367
2368 adapter->ei.get_invariants(hw);
2369 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2370 }
2371 if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
2372 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2373 igb_enable_mas(adapter);
2374 }
2375 if (hw->mac.ops.init_hw(hw))
2376 dev_err(&pdev->dev, "Hardware Error\n");
2377
2378
2379 igb_flush_mac_table(adapter);
2380 __dev_uc_unsync(adapter->netdev, NULL);
2381
2382
2383 igb_set_default_mac_filter(adapter);
2384
2385
2386
2387
2388 if (!hw->mac.autoneg)
2389 igb_force_mac_fc(hw);
2390
2391 igb_init_dmac(adapter, pba);
2392 #ifdef CONFIG_IGB_HWMON
2393
2394 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2395 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2396
2397
2398
2399 if (adapter->ets)
2400 mac->ops.init_thermal_sensor_thresh(hw);
2401 }
2402 }
2403 #endif
2404
2405 if (hw->phy.media_type == e1000_media_type_copper) {
2406 switch (mac->type) {
2407 case e1000_i350:
2408 case e1000_i210:
2409 case e1000_i211:
2410 igb_set_eee_i350(hw, true, true);
2411 break;
2412 case e1000_i354:
2413 igb_set_eee_i354(hw, true, true);
2414 break;
2415 default:
2416 break;
2417 }
2418 }
2419 if (!netif_running(adapter->netdev))
2420 igb_power_down_link(adapter);
2421
2422 igb_update_mng_vlan(adapter);
2423
2424
2425 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2426
2427
2428 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2429 igb_ptp_reset(adapter);
2430
2431 igb_get_phy_info(hw);
2432 }
2433
2434 static netdev_features_t igb_fix_features(struct net_device *netdev,
2435 netdev_features_t features)
2436 {
2437
2438
2439
2440 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2441 features |= NETIF_F_HW_VLAN_CTAG_TX;
2442 else
2443 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2444
2445 return features;
2446 }
2447
2448 static int igb_set_features(struct net_device *netdev,
2449 netdev_features_t features)
2450 {
2451 netdev_features_t changed = netdev->features ^ features;
2452 struct igb_adapter *adapter = netdev_priv(netdev);
2453
2454 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2455 igb_vlan_mode(netdev, features);
2456
2457 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2458 return 0;
2459
2460 if (!(features & NETIF_F_NTUPLE)) {
2461 struct hlist_node *node2;
2462 struct igb_nfc_filter *rule;
2463
2464 spin_lock(&adapter->nfc_lock);
2465 hlist_for_each_entry_safe(rule, node2,
2466 &adapter->nfc_filter_list, nfc_node) {
2467 igb_erase_filter(adapter, rule);
2468 hlist_del(&rule->nfc_node);
2469 kfree(rule);
2470 }
2471 spin_unlock(&adapter->nfc_lock);
2472 adapter->nfc_filter_count = 0;
2473 }
2474
2475 netdev->features = features;
2476
2477 if (netif_running(netdev))
2478 igb_reinit_locked(adapter);
2479 else
2480 igb_reset(adapter);
2481
2482 return 1;
2483 }
2484
2485 static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2486 struct net_device *dev,
2487 const unsigned char *addr, u16 vid,
2488 u16 flags,
2489 struct netlink_ext_ack *extack)
2490 {
2491
2492 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2493 struct igb_adapter *adapter = netdev_priv(dev);
2494 int vfn = adapter->vfs_allocated_count;
2495
2496 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2497 return -ENOMEM;
2498 }
2499
2500 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2501 }
2502
2503 #define IGB_MAX_MAC_HDR_LEN 127
2504 #define IGB_MAX_NETWORK_HDR_LEN 511
2505
2506 static netdev_features_t
2507 igb_features_check(struct sk_buff *skb, struct net_device *dev,
2508 netdev_features_t features)
2509 {
2510 unsigned int network_hdr_len, mac_hdr_len;
2511
2512
2513 mac_hdr_len = skb_network_header(skb) - skb->data;
2514 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2515 return features & ~(NETIF_F_HW_CSUM |
2516 NETIF_F_SCTP_CRC |
2517 NETIF_F_GSO_UDP_L4 |
2518 NETIF_F_HW_VLAN_CTAG_TX |
2519 NETIF_F_TSO |
2520 NETIF_F_TSO6);
2521
2522 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2523 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2524 return features & ~(NETIF_F_HW_CSUM |
2525 NETIF_F_SCTP_CRC |
2526 NETIF_F_GSO_UDP_L4 |
2527 NETIF_F_TSO |
2528 NETIF_F_TSO6);
2529
2530
2531
2532
2533 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2534 features &= ~NETIF_F_TSO;
2535
2536 return features;
2537 }
2538
2539 static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2540 {
2541 if (!is_fqtss_enabled(adapter)) {
2542 enable_fqtss(adapter, true);
2543 return;
2544 }
2545
2546 igb_config_tx_modes(adapter, queue);
2547
2548 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2549 enable_fqtss(adapter, false);
2550 }
2551
2552 static int igb_offload_cbs(struct igb_adapter *adapter,
2553 struct tc_cbs_qopt_offload *qopt)
2554 {
2555 struct e1000_hw *hw = &adapter->hw;
2556 int err;
2557
2558
2559 if (hw->mac.type != e1000_i210)
2560 return -EOPNOTSUPP;
2561
2562
2563 if (qopt->queue < 0 || qopt->queue > 1)
2564 return -EINVAL;
2565
2566 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2567 qopt->idleslope, qopt->sendslope,
2568 qopt->hicredit, qopt->locredit);
2569 if (err)
2570 return err;
2571
2572 igb_offload_apply(adapter, qopt->queue);
2573
2574 return 0;
2575 }
2576
2577 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2578 #define VLAN_PRIO_FULL_MASK (0x07)
2579
2580 static int igb_parse_cls_flower(struct igb_adapter *adapter,
2581 struct flow_cls_offload *f,
2582 int traffic_class,
2583 struct igb_nfc_filter *input)
2584 {
2585 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2586 struct flow_dissector *dissector = rule->match.dissector;
2587 struct netlink_ext_ack *extack = f->common.extack;
2588
2589 if (dissector->used_keys &
2590 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2591 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2592 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2593 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2594 NL_SET_ERR_MSG_MOD(extack,
2595 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2596 return -EOPNOTSUPP;
2597 }
2598
2599 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2600 struct flow_match_eth_addrs match;
2601
2602 flow_rule_match_eth_addrs(rule, &match);
2603 if (!is_zero_ether_addr(match.mask->dst)) {
2604 if (!is_broadcast_ether_addr(match.mask->dst)) {
2605 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2606 return -EINVAL;
2607 }
2608
2609 input->filter.match_flags |=
2610 IGB_FILTER_FLAG_DST_MAC_ADDR;
2611 ether_addr_copy(input->filter.dst_addr, match.key->dst);
2612 }
2613
2614 if (!is_zero_ether_addr(match.mask->src)) {
2615 if (!is_broadcast_ether_addr(match.mask->src)) {
2616 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2617 return -EINVAL;
2618 }
2619
2620 input->filter.match_flags |=
2621 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2622 ether_addr_copy(input->filter.src_addr, match.key->src);
2623 }
2624 }
2625
2626 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2627 struct flow_match_basic match;
2628
2629 flow_rule_match_basic(rule, &match);
2630 if (match.mask->n_proto) {
2631 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2632 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2633 return -EINVAL;
2634 }
2635
2636 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2637 input->filter.etype = match.key->n_proto;
2638 }
2639 }
2640
2641 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2642 struct flow_match_vlan match;
2643
2644 flow_rule_match_vlan(rule, &match);
2645 if (match.mask->vlan_priority) {
2646 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2647 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2648 return -EINVAL;
2649 }
2650
2651 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2652 input->filter.vlan_tci =
2653 (__force __be16)match.key->vlan_priority;
2654 }
2655 }
2656
2657 input->action = traffic_class;
2658 input->cookie = f->cookie;
2659
2660 return 0;
2661 }
2662
2663 static int igb_configure_clsflower(struct igb_adapter *adapter,
2664 struct flow_cls_offload *cls_flower)
2665 {
2666 struct netlink_ext_ack *extack = cls_flower->common.extack;
2667 struct igb_nfc_filter *filter, *f;
2668 int err, tc;
2669
2670 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2671 if (tc < 0) {
2672 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2673 return -EINVAL;
2674 }
2675
2676 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2677 if (!filter)
2678 return -ENOMEM;
2679
2680 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2681 if (err < 0)
2682 goto err_parse;
2683
2684 spin_lock(&adapter->nfc_lock);
2685
2686 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2687 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2688 err = -EEXIST;
2689 NL_SET_ERR_MSG_MOD(extack,
2690 "This filter is already set in ethtool");
2691 goto err_locked;
2692 }
2693 }
2694
2695 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2696 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2697 err = -EEXIST;
2698 NL_SET_ERR_MSG_MOD(extack,
2699 "This filter is already set in cls_flower");
2700 goto err_locked;
2701 }
2702 }
2703
2704 err = igb_add_filter(adapter, filter);
2705 if (err < 0) {
2706 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2707 goto err_locked;
2708 }
2709
2710 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2711
2712 spin_unlock(&adapter->nfc_lock);
2713
2714 return 0;
2715
2716 err_locked:
2717 spin_unlock(&adapter->nfc_lock);
2718
2719 err_parse:
2720 kfree(filter);
2721
2722 return err;
2723 }
2724
2725 static int igb_delete_clsflower(struct igb_adapter *adapter,
2726 struct flow_cls_offload *cls_flower)
2727 {
2728 struct igb_nfc_filter *filter;
2729 int err;
2730
2731 spin_lock(&adapter->nfc_lock);
2732
2733 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2734 if (filter->cookie == cls_flower->cookie)
2735 break;
2736
2737 if (!filter) {
2738 err = -ENOENT;
2739 goto out;
2740 }
2741
2742 err = igb_erase_filter(adapter, filter);
2743 if (err < 0)
2744 goto out;
2745
2746 hlist_del(&filter->nfc_node);
2747 kfree(filter);
2748
2749 out:
2750 spin_unlock(&adapter->nfc_lock);
2751
2752 return err;
2753 }
2754
2755 static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2756 struct flow_cls_offload *cls_flower)
2757 {
2758 switch (cls_flower->command) {
2759 case FLOW_CLS_REPLACE:
2760 return igb_configure_clsflower(adapter, cls_flower);
2761 case FLOW_CLS_DESTROY:
2762 return igb_delete_clsflower(adapter, cls_flower);
2763 case FLOW_CLS_STATS:
2764 return -EOPNOTSUPP;
2765 default:
2766 return -EOPNOTSUPP;
2767 }
2768 }
2769
2770 static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2771 void *cb_priv)
2772 {
2773 struct igb_adapter *adapter = cb_priv;
2774
2775 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2776 return -EOPNOTSUPP;
2777
2778 switch (type) {
2779 case TC_SETUP_CLSFLOWER:
2780 return igb_setup_tc_cls_flower(adapter, type_data);
2781
2782 default:
2783 return -EOPNOTSUPP;
2784 }
2785 }
2786
2787 static int igb_offload_txtime(struct igb_adapter *adapter,
2788 struct tc_etf_qopt_offload *qopt)
2789 {
2790 struct e1000_hw *hw = &adapter->hw;
2791 int err;
2792
2793
2794 if (hw->mac.type != e1000_i210)
2795 return -EOPNOTSUPP;
2796
2797
2798 if (qopt->queue < 0 || qopt->queue > 1)
2799 return -EINVAL;
2800
2801 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2802 if (err)
2803 return err;
2804
2805 igb_offload_apply(adapter, qopt->queue);
2806
2807 return 0;
2808 }
2809
2810 static LIST_HEAD(igb_block_cb_list);
2811
2812 static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2813 void *type_data)
2814 {
2815 struct igb_adapter *adapter = netdev_priv(dev);
2816
2817 switch (type) {
2818 case TC_SETUP_QDISC_CBS:
2819 return igb_offload_cbs(adapter, type_data);
2820 case TC_SETUP_BLOCK:
2821 return flow_block_cb_setup_simple(type_data,
2822 &igb_block_cb_list,
2823 igb_setup_tc_block_cb,
2824 adapter, adapter, true);
2825
2826 case TC_SETUP_QDISC_ETF:
2827 return igb_offload_txtime(adapter, type_data);
2828
2829 default:
2830 return -EOPNOTSUPP;
2831 }
2832 }
2833
2834 static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
2835 {
2836 int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
2837 struct igb_adapter *adapter = netdev_priv(dev);
2838 struct bpf_prog *prog = bpf->prog, *old_prog;
2839 bool running = netif_running(dev);
2840 bool need_reset;
2841
2842
2843 for (i = 0; i < adapter->num_rx_queues; i++) {
2844 struct igb_ring *ring = adapter->rx_ring[i];
2845
2846 if (frame_size > igb_rx_bufsz(ring)) {
2847 NL_SET_ERR_MSG_MOD(bpf->extack,
2848 "The RX buffer size is too small for the frame size");
2849 netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
2850 igb_rx_bufsz(ring), frame_size);
2851 return -EINVAL;
2852 }
2853 }
2854
2855 old_prog = xchg(&adapter->xdp_prog, prog);
2856 need_reset = (!!prog != !!old_prog);
2857
2858
2859 if (need_reset && running) {
2860 igb_close(dev);
2861 } else {
2862 for (i = 0; i < adapter->num_rx_queues; i++)
2863 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
2864 adapter->xdp_prog);
2865 }
2866
2867 if (old_prog)
2868 bpf_prog_put(old_prog);
2869
2870
2871 if (!need_reset)
2872 return 0;
2873
2874 if (running)
2875 igb_open(dev);
2876
2877 return 0;
2878 }
2879
2880 static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2881 {
2882 switch (xdp->command) {
2883 case XDP_SETUP_PROG:
2884 return igb_xdp_setup(dev, xdp);
2885 default:
2886 return -EINVAL;
2887 }
2888 }
2889
2890 static void igb_xdp_ring_update_tail(struct igb_ring *ring)
2891 {
2892
2893
2894
2895 wmb();
2896 writel(ring->next_to_use, ring->tail);
2897 }
2898
2899 static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
2900 {
2901 unsigned int r_idx = smp_processor_id();
2902
2903 if (r_idx >= adapter->num_tx_queues)
2904 r_idx = r_idx % adapter->num_tx_queues;
2905
2906 return adapter->tx_ring[r_idx];
2907 }
2908
2909 static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
2910 {
2911 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2912 int cpu = smp_processor_id();
2913 struct igb_ring *tx_ring;
2914 struct netdev_queue *nq;
2915 u32 ret;
2916
2917 if (unlikely(!xdpf))
2918 return IGB_XDP_CONSUMED;
2919
2920
2921
2922
2923 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2924 if (unlikely(!tx_ring))
2925 return IGB_XDP_CONSUMED;
2926
2927 nq = txring_txq(tx_ring);
2928 __netif_tx_lock(nq, cpu);
2929
2930 txq_trans_cond_update(nq);
2931 ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2932 __netif_tx_unlock(nq);
2933
2934 return ret;
2935 }
2936
2937 static int igb_xdp_xmit(struct net_device *dev, int n,
2938 struct xdp_frame **frames, u32 flags)
2939 {
2940 struct igb_adapter *adapter = netdev_priv(dev);
2941 int cpu = smp_processor_id();
2942 struct igb_ring *tx_ring;
2943 struct netdev_queue *nq;
2944 int nxmit = 0;
2945 int i;
2946
2947 if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
2948 return -ENETDOWN;
2949
2950 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2951 return -EINVAL;
2952
2953
2954
2955
2956 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2957 if (unlikely(!tx_ring))
2958 return -ENXIO;
2959
2960 nq = txring_txq(tx_ring);
2961 __netif_tx_lock(nq, cpu);
2962
2963
2964 txq_trans_cond_update(nq);
2965
2966 for (i = 0; i < n; i++) {
2967 struct xdp_frame *xdpf = frames[i];
2968 int err;
2969
2970 err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2971 if (err != IGB_XDP_TX)
2972 break;
2973 nxmit++;
2974 }
2975
2976 __netif_tx_unlock(nq);
2977
2978 if (unlikely(flags & XDP_XMIT_FLUSH))
2979 igb_xdp_ring_update_tail(tx_ring);
2980
2981 return nxmit;
2982 }
2983
2984 static const struct net_device_ops igb_netdev_ops = {
2985 .ndo_open = igb_open,
2986 .ndo_stop = igb_close,
2987 .ndo_start_xmit = igb_xmit_frame,
2988 .ndo_get_stats64 = igb_get_stats64,
2989 .ndo_set_rx_mode = igb_set_rx_mode,
2990 .ndo_set_mac_address = igb_set_mac,
2991 .ndo_change_mtu = igb_change_mtu,
2992 .ndo_eth_ioctl = igb_ioctl,
2993 .ndo_tx_timeout = igb_tx_timeout,
2994 .ndo_validate_addr = eth_validate_addr,
2995 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2996 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2997 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2998 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2999 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
3000 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
3001 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
3002 .ndo_get_vf_config = igb_ndo_get_vf_config,
3003 .ndo_fix_features = igb_fix_features,
3004 .ndo_set_features = igb_set_features,
3005 .ndo_fdb_add = igb_ndo_fdb_add,
3006 .ndo_features_check = igb_features_check,
3007 .ndo_setup_tc = igb_setup_tc,
3008 .ndo_bpf = igb_xdp,
3009 .ndo_xdp_xmit = igb_xdp_xmit,
3010 };
3011
3012
3013
3014
3015
3016 void igb_set_fw_version(struct igb_adapter *adapter)
3017 {
3018 struct e1000_hw *hw = &adapter->hw;
3019 struct e1000_fw_version fw;
3020
3021 igb_get_fw_version(hw, &fw);
3022
3023 switch (hw->mac.type) {
3024 case e1000_i210:
3025 case e1000_i211:
3026 if (!(igb_get_flash_presence_i210(hw))) {
3027 snprintf(adapter->fw_version,
3028 sizeof(adapter->fw_version),
3029 "%2d.%2d-%d",
3030 fw.invm_major, fw.invm_minor,
3031 fw.invm_img_type);
3032 break;
3033 }
3034 fallthrough;
3035 default:
3036
3037 if (fw.or_valid) {
3038 snprintf(adapter->fw_version,
3039 sizeof(adapter->fw_version),
3040 "%d.%d, 0x%08x, %d.%d.%d",
3041 fw.eep_major, fw.eep_minor, fw.etrack_id,
3042 fw.or_major, fw.or_build, fw.or_patch);
3043
3044 } else if (fw.etrack_id != 0X0000) {
3045 snprintf(adapter->fw_version,
3046 sizeof(adapter->fw_version),
3047 "%d.%d, 0x%08x",
3048 fw.eep_major, fw.eep_minor, fw.etrack_id);
3049 } else {
3050 snprintf(adapter->fw_version,
3051 sizeof(adapter->fw_version),
3052 "%d.%d.%d",
3053 fw.eep_major, fw.eep_minor, fw.eep_build);
3054 }
3055 break;
3056 }
3057 }
3058
3059
3060
3061
3062
3063
3064 static void igb_init_mas(struct igb_adapter *adapter)
3065 {
3066 struct e1000_hw *hw = &adapter->hw;
3067 u16 eeprom_data;
3068
3069 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
3070 switch (hw->bus.func) {
3071 case E1000_FUNC_0:
3072 if (eeprom_data & IGB_MAS_ENABLE_0) {
3073 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3074 netdev_info(adapter->netdev,
3075 "MAS: Enabling Media Autosense for port %d\n",
3076 hw->bus.func);
3077 }
3078 break;
3079 case E1000_FUNC_1:
3080 if (eeprom_data & IGB_MAS_ENABLE_1) {
3081 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3082 netdev_info(adapter->netdev,
3083 "MAS: Enabling Media Autosense for port %d\n",
3084 hw->bus.func);
3085 }
3086 break;
3087 case E1000_FUNC_2:
3088 if (eeprom_data & IGB_MAS_ENABLE_2) {
3089 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3090 netdev_info(adapter->netdev,
3091 "MAS: Enabling Media Autosense for port %d\n",
3092 hw->bus.func);
3093 }
3094 break;
3095 case E1000_FUNC_3:
3096 if (eeprom_data & IGB_MAS_ENABLE_3) {
3097 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3098 netdev_info(adapter->netdev,
3099 "MAS: Enabling Media Autosense for port %d\n",
3100 hw->bus.func);
3101 }
3102 break;
3103 default:
3104
3105 netdev_err(adapter->netdev,
3106 "MAS: Invalid port configuration, returning\n");
3107 break;
3108 }
3109 }
3110
3111
3112
3113
3114
3115 static s32 igb_init_i2c(struct igb_adapter *adapter)
3116 {
3117 struct e1000_hw *hw = &adapter->hw;
3118 s32 status = 0;
3119 s32 i2cctl;
3120
3121
3122 if (adapter->hw.mac.type != e1000_i350)
3123 return 0;
3124
3125 i2cctl = rd32(E1000_I2CPARAMS);
3126 i2cctl |= E1000_I2CBB_EN
3127 | E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
3128 | E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
3129 wr32(E1000_I2CPARAMS, i2cctl);
3130 wrfl();
3131
3132
3133
3134
3135
3136 adapter->i2c_adap.owner = THIS_MODULE;
3137 adapter->i2c_algo = igb_i2c_algo;
3138 adapter->i2c_algo.data = adapter;
3139 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
3140 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
3141 strlcpy(adapter->i2c_adap.name, "igb BB",
3142 sizeof(adapter->i2c_adap.name));
3143 status = i2c_bit_add_bus(&adapter->i2c_adap);
3144 return status;
3145 }
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158 static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3159 {
3160 struct net_device *netdev;
3161 struct igb_adapter *adapter;
3162 struct e1000_hw *hw;
3163 u16 eeprom_data = 0;
3164 s32 ret_val;
3165 static int global_quad_port_a;
3166 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3167 u8 part_str[E1000_PBANUM_LENGTH];
3168 int err;
3169
3170
3171
3172
3173 if (pdev->is_virtfn) {
3174 WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n",
3175 pci_name(pdev), pdev->vendor, pdev->device);
3176 return -EINVAL;
3177 }
3178
3179 err = pci_enable_device_mem(pdev);
3180 if (err)
3181 return err;
3182
3183 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3184 if (err) {
3185 dev_err(&pdev->dev,
3186 "No usable DMA configuration, aborting\n");
3187 goto err_dma;
3188 }
3189
3190 err = pci_request_mem_regions(pdev, igb_driver_name);
3191 if (err)
3192 goto err_pci_reg;
3193
3194 pci_enable_pcie_error_reporting(pdev);
3195
3196 pci_set_master(pdev);
3197 pci_save_state(pdev);
3198
3199 err = -ENOMEM;
3200 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3201 IGB_MAX_TX_QUEUES);
3202 if (!netdev)
3203 goto err_alloc_etherdev;
3204
3205 SET_NETDEV_DEV(netdev, &pdev->dev);
3206
3207 pci_set_drvdata(pdev, netdev);
3208 adapter = netdev_priv(netdev);
3209 adapter->netdev = netdev;
3210 adapter->pdev = pdev;
3211 hw = &adapter->hw;
3212 hw->back = adapter;
3213 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3214
3215 err = -EIO;
3216 adapter->io_addr = pci_iomap(pdev, 0, 0);
3217 if (!adapter->io_addr)
3218 goto err_ioremap;
3219
3220 hw->hw_addr = adapter->io_addr;
3221
3222 netdev->netdev_ops = &igb_netdev_ops;
3223 igb_set_ethtool_ops(netdev);
3224 netdev->watchdog_timeo = 5 * HZ;
3225
3226 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
3227
3228 netdev->mem_start = pci_resource_start(pdev, 0);
3229 netdev->mem_end = pci_resource_end(pdev, 0);
3230
3231
3232 hw->vendor_id = pdev->vendor;
3233 hw->device_id = pdev->device;
3234 hw->revision_id = pdev->revision;
3235 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3236 hw->subsystem_device_id = pdev->subsystem_device;
3237
3238
3239 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3240 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3241 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3242
3243 err = ei->get_invariants(hw);
3244 if (err)
3245 goto err_sw_init;
3246
3247
3248 err = igb_sw_init(adapter);
3249 if (err)
3250 goto err_sw_init;
3251
3252 igb_get_bus_info_pcie(hw);
3253
3254 hw->phy.autoneg_wait_to_complete = false;
3255
3256
3257 if (hw->phy.media_type == e1000_media_type_copper) {
3258 hw->phy.mdix = AUTO_ALL_MODES;
3259 hw->phy.disable_polarity_correction = false;
3260 hw->phy.ms_type = e1000_ms_hw_default;
3261 }
3262
3263 if (igb_check_reset_block(hw))
3264 dev_info(&pdev->dev,
3265 "PHY reset is blocked due to SOL/IDER session.\n");
3266
3267
3268
3269
3270
3271 netdev->features |= NETIF_F_SG |
3272 NETIF_F_TSO |
3273 NETIF_F_TSO6 |
3274 NETIF_F_RXHASH |
3275 NETIF_F_RXCSUM |
3276 NETIF_F_HW_CSUM;
3277
3278 if (hw->mac.type >= e1000_82576)
3279 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
3280
3281 if (hw->mac.type >= e1000_i350)
3282 netdev->features |= NETIF_F_HW_TC;
3283
3284 #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3285 NETIF_F_GSO_GRE_CSUM | \
3286 NETIF_F_GSO_IPXIP4 | \
3287 NETIF_F_GSO_IPXIP6 | \
3288 NETIF_F_GSO_UDP_TUNNEL | \
3289 NETIF_F_GSO_UDP_TUNNEL_CSUM)
3290
3291 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3292 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3293
3294
3295 netdev->hw_features |= netdev->features |
3296 NETIF_F_HW_VLAN_CTAG_RX |
3297 NETIF_F_HW_VLAN_CTAG_TX |
3298 NETIF_F_RXALL;
3299
3300 if (hw->mac.type >= e1000_i350)
3301 netdev->hw_features |= NETIF_F_NTUPLE;
3302
3303 netdev->features |= NETIF_F_HIGHDMA;
3304
3305 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3306 netdev->mpls_features |= NETIF_F_HW_CSUM;
3307 netdev->hw_enc_features |= netdev->vlan_features;
3308
3309
3310 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3311 NETIF_F_HW_VLAN_CTAG_RX |
3312 NETIF_F_HW_VLAN_CTAG_TX;
3313
3314 netdev->priv_flags |= IFF_SUPP_NOFCS;
3315
3316 netdev->priv_flags |= IFF_UNICAST_FLT;
3317
3318
3319 netdev->min_mtu = ETH_MIN_MTU;
3320 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3321
3322 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3323
3324
3325
3326
3327 hw->mac.ops.reset_hw(hw);
3328
3329
3330
3331
3332 switch (hw->mac.type) {
3333 case e1000_i210:
3334 case e1000_i211:
3335 if (igb_get_flash_presence_i210(hw)) {
3336 if (hw->nvm.ops.validate(hw) < 0) {
3337 dev_err(&pdev->dev,
3338 "The NVM Checksum Is Not Valid\n");
3339 err = -EIO;
3340 goto err_eeprom;
3341 }
3342 }
3343 break;
3344 default:
3345 if (hw->nvm.ops.validate(hw) < 0) {
3346 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3347 err = -EIO;
3348 goto err_eeprom;
3349 }
3350 break;
3351 }
3352
3353 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3354
3355 if (hw->mac.ops.read_mac_addr(hw))
3356 dev_err(&pdev->dev, "NVM Read Error\n");
3357 }
3358
3359 eth_hw_addr_set(netdev, hw->mac.addr);
3360
3361 if (!is_valid_ether_addr(netdev->dev_addr)) {
3362 dev_err(&pdev->dev, "Invalid MAC Address\n");
3363 err = -EIO;
3364 goto err_eeprom;
3365 }
3366
3367 igb_set_default_mac_filter(adapter);
3368
3369
3370 igb_set_fw_version(adapter);
3371
3372
3373 if (hw->mac.type == e1000_i210) {
3374 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3375 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3376 }
3377
3378 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3379 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3380
3381 INIT_WORK(&adapter->reset_task, igb_reset_task);
3382 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3383
3384
3385 adapter->fc_autoneg = true;
3386 hw->mac.autoneg = true;
3387 hw->phy.autoneg_advertised = 0x2f;
3388
3389 hw->fc.requested_mode = e1000_fc_default;
3390 hw->fc.current_mode = e1000_fc_default;
3391
3392 igb_validate_mdi_setting(hw);
3393
3394
3395 if (hw->bus.func == 0)
3396 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3397
3398
3399 if (hw->mac.type >= e1000_82580)
3400 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3401 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3402 &eeprom_data);
3403 else if (hw->bus.func == 1)
3404 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3405
3406 if (eeprom_data & IGB_EEPROM_APME)
3407 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3408
3409
3410
3411
3412
3413 switch (pdev->device) {
3414 case E1000_DEV_ID_82575GB_QUAD_COPPER:
3415 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3416 break;
3417 case E1000_DEV_ID_82575EB_FIBER_SERDES:
3418 case E1000_DEV_ID_82576_FIBER:
3419 case E1000_DEV_ID_82576_SERDES:
3420
3421
3422
3423 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3424 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3425 break;
3426 case E1000_DEV_ID_82576_QUAD_COPPER:
3427 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3428
3429 if (global_quad_port_a != 0)
3430 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3431 else
3432 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3433
3434 if (++global_quad_port_a == 4)
3435 global_quad_port_a = 0;
3436 break;
3437 default:
3438
3439 if (!device_can_wakeup(&adapter->pdev->dev))
3440 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3441 }
3442
3443
3444 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3445 adapter->wol |= E1000_WUFC_MAG;
3446
3447
3448 if ((hw->mac.type == e1000_i350) &&
3449 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3450 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3451 adapter->wol = 0;
3452 }
3453
3454
3455
3456
3457 if (((hw->mac.type == e1000_i350) ||
3458 (hw->mac.type == e1000_i354)) &&
3459 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3460 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3461 adapter->wol = 0;
3462 }
3463 if (hw->mac.type == e1000_i350) {
3464 if (((pdev->subsystem_device == 0x5001) ||
3465 (pdev->subsystem_device == 0x5002)) &&
3466 (hw->bus.func == 0)) {
3467 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3468 adapter->wol = 0;
3469 }
3470 if (pdev->subsystem_device == 0x1F52)
3471 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3472 }
3473
3474 device_set_wakeup_enable(&adapter->pdev->dev,
3475 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3476
3477
3478 igb_reset(adapter);
3479
3480
3481 err = igb_init_i2c(adapter);
3482 if (err) {
3483 dev_err(&pdev->dev, "failed to init i2c interface\n");
3484 goto err_eeprom;
3485 }
3486
3487
3488
3489
3490 igb_get_hw_control(adapter);
3491
3492 strcpy(netdev->name, "eth%d");
3493 err = register_netdev(netdev);
3494 if (err)
3495 goto err_register;
3496
3497
3498 netif_carrier_off(netdev);
3499
3500 #ifdef CONFIG_IGB_DCA
3501 if (dca_add_requester(&pdev->dev) == 0) {
3502 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3503 dev_info(&pdev->dev, "DCA enabled\n");
3504 igb_setup_dca(adapter);
3505 }
3506
3507 #endif
3508 #ifdef CONFIG_IGB_HWMON
3509
3510 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3511 u16 ets_word;
3512
3513
3514
3515
3516 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3517 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3518 adapter->ets = true;
3519 else
3520 adapter->ets = false;
3521 if (igb_sysfs_init(adapter))
3522 dev_err(&pdev->dev,
3523 "failed to allocate sysfs resources\n");
3524 } else {
3525 adapter->ets = false;
3526 }
3527 #endif
3528
3529 adapter->ei = *ei;
3530 if (hw->dev_spec._82575.mas_capable)
3531 igb_init_mas(adapter);
3532
3533
3534 igb_ptp_init(adapter);
3535
3536 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3537
3538 if (hw->mac.type != e1000_i354) {
3539 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3540 netdev->name,
3541 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3542 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3543 "unknown"),
3544 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3545 "Width x4" :
3546 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3547 "Width x2" :
3548 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3549 "Width x1" : "unknown"), netdev->dev_addr);
3550 }
3551
3552 if ((hw->mac.type == e1000_82576 &&
3553 rd32(E1000_EECD) & E1000_EECD_PRES) ||
3554 (hw->mac.type >= e1000_i210 ||
3555 igb_get_flash_presence_i210(hw))) {
3556 ret_val = igb_read_part_string(hw, part_str,
3557 E1000_PBANUM_LENGTH);
3558 } else {
3559 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3560 }
3561
3562 if (ret_val)
3563 strcpy(part_str, "Unknown");
3564 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3565 dev_info(&pdev->dev,
3566 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3567 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3568 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3569 adapter->num_rx_queues, adapter->num_tx_queues);
3570 if (hw->phy.media_type == e1000_media_type_copper) {
3571 switch (hw->mac.type) {
3572 case e1000_i350:
3573 case e1000_i210:
3574 case e1000_i211:
3575
3576 err = igb_set_eee_i350(hw, true, true);
3577 if ((!err) &&
3578 (!hw->dev_spec._82575.eee_disable)) {
3579 adapter->eee_advert =
3580 MDIO_EEE_100TX | MDIO_EEE_1000T;
3581 adapter->flags |= IGB_FLAG_EEE;
3582 }
3583 break;
3584 case e1000_i354:
3585 if ((rd32(E1000_CTRL_EXT) &
3586 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3587 err = igb_set_eee_i354(hw, true, true);
3588 if ((!err) &&
3589 (!hw->dev_spec._82575.eee_disable)) {
3590 adapter->eee_advert =
3591 MDIO_EEE_100TX | MDIO_EEE_1000T;
3592 adapter->flags |= IGB_FLAG_EEE;
3593 }
3594 }
3595 break;
3596 default:
3597 break;
3598 }
3599 }
3600
3601 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
3602
3603 pm_runtime_put_noidle(&pdev->dev);
3604 return 0;
3605
3606 err_register:
3607 igb_release_hw_control(adapter);
3608 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3609 err_eeprom:
3610 if (!igb_check_reset_block(hw))
3611 igb_reset_phy(hw);
3612
3613 if (hw->flash_address)
3614 iounmap(hw->flash_address);
3615 err_sw_init:
3616 kfree(adapter->mac_table);
3617 kfree(adapter->shadow_vfta);
3618 igb_clear_interrupt_scheme(adapter);
3619 #ifdef CONFIG_PCI_IOV
3620 igb_disable_sriov(pdev);
3621 #endif
3622 pci_iounmap(pdev, adapter->io_addr);
3623 err_ioremap:
3624 free_netdev(netdev);
3625 err_alloc_etherdev:
3626 pci_disable_pcie_error_reporting(pdev);
3627 pci_release_mem_regions(pdev);
3628 err_pci_reg:
3629 err_dma:
3630 pci_disable_device(pdev);
3631 return err;
3632 }
3633
3634 #ifdef CONFIG_PCI_IOV
3635 static int igb_disable_sriov(struct pci_dev *pdev)
3636 {
3637 struct net_device *netdev = pci_get_drvdata(pdev);
3638 struct igb_adapter *adapter = netdev_priv(netdev);
3639 struct e1000_hw *hw = &adapter->hw;
3640 unsigned long flags;
3641
3642
3643 if (adapter->vf_data) {
3644
3645 if (pci_vfs_assigned(pdev)) {
3646 dev_warn(&pdev->dev,
3647 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3648 return -EPERM;
3649 } else {
3650 pci_disable_sriov(pdev);
3651 msleep(500);
3652 }
3653 spin_lock_irqsave(&adapter->vfs_lock, flags);
3654 kfree(adapter->vf_mac_list);
3655 adapter->vf_mac_list = NULL;
3656 kfree(adapter->vf_data);
3657 adapter->vf_data = NULL;
3658 adapter->vfs_allocated_count = 0;
3659 spin_unlock_irqrestore(&adapter->vfs_lock, flags);
3660 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3661 wrfl();
3662 msleep(100);
3663 dev_info(&pdev->dev, "IOV Disabled\n");
3664
3665
3666 adapter->flags |= IGB_FLAG_DMAC;
3667 }
3668
3669 return 0;
3670 }
3671
3672 static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3673 {
3674 struct net_device *netdev = pci_get_drvdata(pdev);
3675 struct igb_adapter *adapter = netdev_priv(netdev);
3676 int old_vfs = pci_num_vf(pdev);
3677 struct vf_mac_filter *mac_list;
3678 int err = 0;
3679 int num_vf_mac_filters, i;
3680
3681 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3682 err = -EPERM;
3683 goto out;
3684 }
3685 if (!num_vfs)
3686 goto out;
3687
3688 if (old_vfs) {
3689 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3690 old_vfs, max_vfs);
3691 adapter->vfs_allocated_count = old_vfs;
3692 } else
3693 adapter->vfs_allocated_count = num_vfs;
3694
3695 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3696 sizeof(struct vf_data_storage), GFP_KERNEL);
3697
3698
3699 if (!adapter->vf_data) {
3700 adapter->vfs_allocated_count = 0;
3701 err = -ENOMEM;
3702 goto out;
3703 }
3704
3705
3706
3707
3708
3709
3710 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3711 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3712 adapter->vfs_allocated_count);
3713
3714 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3715 sizeof(struct vf_mac_filter),
3716 GFP_KERNEL);
3717
3718 mac_list = adapter->vf_mac_list;
3719 INIT_LIST_HEAD(&adapter->vf_macs.l);
3720
3721 if (adapter->vf_mac_list) {
3722
3723 for (i = 0; i < num_vf_mac_filters; i++) {
3724 mac_list->vf = -1;
3725 mac_list->free = true;
3726 list_add(&mac_list->l, &adapter->vf_macs.l);
3727 mac_list++;
3728 }
3729 } else {
3730
3731
3732
3733 dev_err(&pdev->dev,
3734 "Unable to allocate memory for VF MAC filter list\n");
3735 }
3736
3737
3738 if (!old_vfs) {
3739 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3740 if (err)
3741 goto err_out;
3742 }
3743 dev_info(&pdev->dev, "%d VFs allocated\n",
3744 adapter->vfs_allocated_count);
3745 for (i = 0; i < adapter->vfs_allocated_count; i++)
3746 igb_vf_configure(adapter, i);
3747
3748
3749 adapter->flags &= ~IGB_FLAG_DMAC;
3750 goto out;
3751
3752 err_out:
3753 kfree(adapter->vf_mac_list);
3754 adapter->vf_mac_list = NULL;
3755 kfree(adapter->vf_data);
3756 adapter->vf_data = NULL;
3757 adapter->vfs_allocated_count = 0;
3758 out:
3759 return err;
3760 }
3761
3762 #endif
3763
3764
3765
3766
3767 static void igb_remove_i2c(struct igb_adapter *adapter)
3768 {
3769
3770 i2c_del_adapter(&adapter->i2c_adap);
3771 }
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782 static void igb_remove(struct pci_dev *pdev)
3783 {
3784 struct net_device *netdev = pci_get_drvdata(pdev);
3785 struct igb_adapter *adapter = netdev_priv(netdev);
3786 struct e1000_hw *hw = &adapter->hw;
3787
3788 pm_runtime_get_noresume(&pdev->dev);
3789 #ifdef CONFIG_IGB_HWMON
3790 igb_sysfs_exit(adapter);
3791 #endif
3792 igb_remove_i2c(adapter);
3793 igb_ptp_stop(adapter);
3794
3795
3796
3797 set_bit(__IGB_DOWN, &adapter->state);
3798 del_timer_sync(&adapter->watchdog_timer);
3799 del_timer_sync(&adapter->phy_info_timer);
3800
3801 cancel_work_sync(&adapter->reset_task);
3802 cancel_work_sync(&adapter->watchdog_task);
3803
3804 #ifdef CONFIG_IGB_DCA
3805 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3806 dev_info(&pdev->dev, "DCA disabled\n");
3807 dca_remove_requester(&pdev->dev);
3808 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3809 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3810 }
3811 #endif
3812
3813
3814
3815
3816 igb_release_hw_control(adapter);
3817
3818 #ifdef CONFIG_PCI_IOV
3819 rtnl_lock();
3820 igb_disable_sriov(pdev);
3821 rtnl_unlock();
3822 #endif
3823
3824 unregister_netdev(netdev);
3825
3826 igb_clear_interrupt_scheme(adapter);
3827
3828 pci_iounmap(pdev, adapter->io_addr);
3829 if (hw->flash_address)
3830 iounmap(hw->flash_address);
3831 pci_release_mem_regions(pdev);
3832
3833 kfree(adapter->mac_table);
3834 kfree(adapter->shadow_vfta);
3835 free_netdev(netdev);
3836
3837 pci_disable_pcie_error_reporting(pdev);
3838
3839 pci_disable_device(pdev);
3840 }
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851 static void igb_probe_vfs(struct igb_adapter *adapter)
3852 {
3853 #ifdef CONFIG_PCI_IOV
3854 struct pci_dev *pdev = adapter->pdev;
3855 struct e1000_hw *hw = &adapter->hw;
3856
3857
3858 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3859 return;
3860
3861
3862
3863
3864
3865 igb_set_interrupt_capability(adapter, true);
3866 igb_reset_interrupt_capability(adapter);
3867
3868 pci_sriov_set_totalvfs(pdev, 7);
3869 igb_enable_sriov(pdev, max_vfs);
3870
3871 #endif
3872 }
3873
3874 unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3875 {
3876 struct e1000_hw *hw = &adapter->hw;
3877 unsigned int max_rss_queues;
3878
3879
3880 switch (hw->mac.type) {
3881 case e1000_i211:
3882 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3883 break;
3884 case e1000_82575:
3885 case e1000_i210:
3886 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3887 break;
3888 case e1000_i350:
3889
3890 if (!!adapter->vfs_allocated_count) {
3891 max_rss_queues = 1;
3892 break;
3893 }
3894 fallthrough;
3895 case e1000_82576:
3896 if (!!adapter->vfs_allocated_count) {
3897 max_rss_queues = 2;
3898 break;
3899 }
3900 fallthrough;
3901 case e1000_82580:
3902 case e1000_i354:
3903 default:
3904 max_rss_queues = IGB_MAX_RX_QUEUES;
3905 break;
3906 }
3907
3908 return max_rss_queues;
3909 }
3910
3911 static void igb_init_queue_configuration(struct igb_adapter *adapter)
3912 {
3913 u32 max_rss_queues;
3914
3915 max_rss_queues = igb_get_max_rss_queues(adapter);
3916 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3917
3918 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3919 }
3920
3921 void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3922 const u32 max_rss_queues)
3923 {
3924 struct e1000_hw *hw = &adapter->hw;
3925
3926
3927 switch (hw->mac.type) {
3928 case e1000_82575:
3929 case e1000_i211:
3930
3931 break;
3932 case e1000_82576:
3933 case e1000_82580:
3934 case e1000_i350:
3935 case e1000_i354:
3936 case e1000_i210:
3937 default:
3938
3939
3940
3941 if (adapter->rss_queues > (max_rss_queues / 2))
3942 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3943 else
3944 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3945 break;
3946 }
3947 }
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957 static int igb_sw_init(struct igb_adapter *adapter)
3958 {
3959 struct e1000_hw *hw = &adapter->hw;
3960 struct net_device *netdev = adapter->netdev;
3961 struct pci_dev *pdev = adapter->pdev;
3962
3963 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3964
3965
3966 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3967 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3968
3969
3970 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3971 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3972
3973
3974 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3975
3976 adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
3977 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3978
3979 spin_lock_init(&adapter->nfc_lock);
3980 spin_lock_init(&adapter->stats64_lock);
3981
3982
3983 spin_lock_init(&adapter->vfs_lock);
3984 #ifdef CONFIG_PCI_IOV
3985 switch (hw->mac.type) {
3986 case e1000_82576:
3987 case e1000_i350:
3988 if (max_vfs > 7) {
3989 dev_warn(&pdev->dev,
3990 "Maximum of 7 VFs per PF, using max\n");
3991 max_vfs = adapter->vfs_allocated_count = 7;
3992 } else
3993 adapter->vfs_allocated_count = max_vfs;
3994 if (adapter->vfs_allocated_count)
3995 dev_warn(&pdev->dev,
3996 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3997 break;
3998 default:
3999 break;
4000 }
4001 #endif
4002
4003
4004 adapter->flags |= IGB_FLAG_HAS_MSIX;
4005
4006 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
4007 sizeof(struct igb_mac_addr),
4008 GFP_KERNEL);
4009 if (!adapter->mac_table)
4010 return -ENOMEM;
4011
4012 igb_probe_vfs(adapter);
4013
4014 igb_init_queue_configuration(adapter);
4015
4016
4017 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
4018 GFP_KERNEL);
4019 if (!adapter->shadow_vfta)
4020 return -ENOMEM;
4021
4022
4023 if (igb_init_interrupt_scheme(adapter, true)) {
4024 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4025 return -ENOMEM;
4026 }
4027
4028
4029 igb_irq_disable(adapter);
4030
4031 if (hw->mac.type >= e1000_i350)
4032 adapter->flags &= ~IGB_FLAG_DMAC;
4033
4034 set_bit(__IGB_DOWN, &adapter->state);
4035 return 0;
4036 }
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051 static int __igb_open(struct net_device *netdev, bool resuming)
4052 {
4053 struct igb_adapter *adapter = netdev_priv(netdev);
4054 struct e1000_hw *hw = &adapter->hw;
4055 struct pci_dev *pdev = adapter->pdev;
4056 int err;
4057 int i;
4058
4059
4060 if (test_bit(__IGB_TESTING, &adapter->state)) {
4061 WARN_ON(resuming);
4062 return -EBUSY;
4063 }
4064
4065 if (!resuming)
4066 pm_runtime_get_sync(&pdev->dev);
4067
4068 netif_carrier_off(netdev);
4069
4070
4071 err = igb_setup_all_tx_resources(adapter);
4072 if (err)
4073 goto err_setup_tx;
4074
4075
4076 err = igb_setup_all_rx_resources(adapter);
4077 if (err)
4078 goto err_setup_rx;
4079
4080 igb_power_up_link(adapter);
4081
4082
4083
4084
4085
4086
4087 igb_configure(adapter);
4088
4089 err = igb_request_irq(adapter);
4090 if (err)
4091 goto err_req_irq;
4092
4093
4094 err = netif_set_real_num_tx_queues(adapter->netdev,
4095 adapter->num_tx_queues);
4096 if (err)
4097 goto err_set_queues;
4098
4099 err = netif_set_real_num_rx_queues(adapter->netdev,
4100 adapter->num_rx_queues);
4101 if (err)
4102 goto err_set_queues;
4103
4104
4105 clear_bit(__IGB_DOWN, &adapter->state);
4106
4107 for (i = 0; i < adapter->num_q_vectors; i++)
4108 napi_enable(&(adapter->q_vector[i]->napi));
4109
4110
4111 rd32(E1000_TSICR);
4112 rd32(E1000_ICR);
4113
4114 igb_irq_enable(adapter);
4115
4116
4117 if (adapter->vfs_allocated_count) {
4118 u32 reg_data = rd32(E1000_CTRL_EXT);
4119
4120 reg_data |= E1000_CTRL_EXT_PFRSTD;
4121 wr32(E1000_CTRL_EXT, reg_data);
4122 }
4123
4124 netif_tx_start_all_queues(netdev);
4125
4126 if (!resuming)
4127 pm_runtime_put(&pdev->dev);
4128
4129
4130 hw->mac.get_link_status = 1;
4131 schedule_work(&adapter->watchdog_task);
4132
4133 return 0;
4134
4135 err_set_queues:
4136 igb_free_irq(adapter);
4137 err_req_irq:
4138 igb_release_hw_control(adapter);
4139 igb_power_down_link(adapter);
4140 igb_free_all_rx_resources(adapter);
4141 err_setup_rx:
4142 igb_free_all_tx_resources(adapter);
4143 err_setup_tx:
4144 igb_reset(adapter);
4145 if (!resuming)
4146 pm_runtime_put(&pdev->dev);
4147
4148 return err;
4149 }
4150
4151 int igb_open(struct net_device *netdev)
4152 {
4153 return __igb_open(netdev, false);
4154 }
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168 static int __igb_close(struct net_device *netdev, bool suspending)
4169 {
4170 struct igb_adapter *adapter = netdev_priv(netdev);
4171 struct pci_dev *pdev = adapter->pdev;
4172
4173 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4174
4175 if (!suspending)
4176 pm_runtime_get_sync(&pdev->dev);
4177
4178 igb_down(adapter);
4179 igb_free_irq(adapter);
4180
4181 igb_free_all_tx_resources(adapter);
4182 igb_free_all_rx_resources(adapter);
4183
4184 if (!suspending)
4185 pm_runtime_put_sync(&pdev->dev);
4186 return 0;
4187 }
4188
4189 int igb_close(struct net_device *netdev)
4190 {
4191 if (netif_device_present(netdev) || netdev->dismantle)
4192 return __igb_close(netdev, false);
4193 return 0;
4194 }
4195
4196
4197
4198
4199
4200
4201
4202 int igb_setup_tx_resources(struct igb_ring *tx_ring)
4203 {
4204 struct device *dev = tx_ring->dev;
4205 int size;
4206
4207 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4208
4209 tx_ring->tx_buffer_info = vmalloc(size);
4210 if (!tx_ring->tx_buffer_info)
4211 goto err;
4212
4213
4214 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4215 tx_ring->size = ALIGN(tx_ring->size, 4096);
4216
4217 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4218 &tx_ring->dma, GFP_KERNEL);
4219 if (!tx_ring->desc)
4220 goto err;
4221
4222 tx_ring->next_to_use = 0;
4223 tx_ring->next_to_clean = 0;
4224
4225 return 0;
4226
4227 err:
4228 vfree(tx_ring->tx_buffer_info);
4229 tx_ring->tx_buffer_info = NULL;
4230 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4231 return -ENOMEM;
4232 }
4233
4234
4235
4236
4237
4238
4239
4240
4241 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4242 {
4243 struct pci_dev *pdev = adapter->pdev;
4244 int i, err = 0;
4245
4246 for (i = 0; i < adapter->num_tx_queues; i++) {
4247 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4248 if (err) {
4249 dev_err(&pdev->dev,
4250 "Allocation for Tx Queue %u failed\n", i);
4251 for (i--; i >= 0; i--)
4252 igb_free_tx_resources(adapter->tx_ring[i]);
4253 break;
4254 }
4255 }
4256
4257 return err;
4258 }
4259
4260
4261
4262
4263
4264 void igb_setup_tctl(struct igb_adapter *adapter)
4265 {
4266 struct e1000_hw *hw = &adapter->hw;
4267 u32 tctl;
4268
4269
4270 wr32(E1000_TXDCTL(0), 0);
4271
4272
4273 tctl = rd32(E1000_TCTL);
4274 tctl &= ~E1000_TCTL_CT;
4275 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4276 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4277
4278 igb_config_collision_dist(hw);
4279
4280
4281 tctl |= E1000_TCTL_EN;
4282
4283 wr32(E1000_TCTL, tctl);
4284 }
4285
4286
4287
4288
4289
4290
4291
4292
4293 void igb_configure_tx_ring(struct igb_adapter *adapter,
4294 struct igb_ring *ring)
4295 {
4296 struct e1000_hw *hw = &adapter->hw;
4297 u32 txdctl = 0;
4298 u64 tdba = ring->dma;
4299 int reg_idx = ring->reg_idx;
4300
4301 wr32(E1000_TDLEN(reg_idx),
4302 ring->count * sizeof(union e1000_adv_tx_desc));
4303 wr32(E1000_TDBAL(reg_idx),
4304 tdba & 0x00000000ffffffffULL);
4305 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4306
4307 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4308 wr32(E1000_TDH(reg_idx), 0);
4309 writel(0, ring->tail);
4310
4311 txdctl |= IGB_TX_PTHRESH;
4312 txdctl |= IGB_TX_HTHRESH << 8;
4313 txdctl |= IGB_TX_WTHRESH << 16;
4314
4315
4316 memset(ring->tx_buffer_info, 0,
4317 sizeof(struct igb_tx_buffer) * ring->count);
4318
4319 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4320 wr32(E1000_TXDCTL(reg_idx), txdctl);
4321 }
4322
4323
4324
4325
4326
4327
4328
4329 static void igb_configure_tx(struct igb_adapter *adapter)
4330 {
4331 struct e1000_hw *hw = &adapter->hw;
4332 int i;
4333
4334
4335 for (i = 0; i < adapter->num_tx_queues; i++)
4336 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4337
4338 wrfl();
4339 usleep_range(10000, 20000);
4340
4341 for (i = 0; i < adapter->num_tx_queues; i++)
4342 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4343 }
4344
4345
4346
4347
4348
4349
4350
4351 int igb_setup_rx_resources(struct igb_ring *rx_ring)
4352 {
4353 struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
4354 struct device *dev = rx_ring->dev;
4355 int size, res;
4356
4357
4358 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
4359 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4360 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
4361 rx_ring->queue_index, 0);
4362 if (res < 0) {
4363 dev_err(dev, "Failed to register xdp_rxq index %u\n",
4364 rx_ring->queue_index);
4365 return res;
4366 }
4367
4368 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4369
4370 rx_ring->rx_buffer_info = vmalloc(size);
4371 if (!rx_ring->rx_buffer_info)
4372 goto err;
4373
4374
4375 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4376 rx_ring->size = ALIGN(rx_ring->size, 4096);
4377
4378 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4379 &rx_ring->dma, GFP_KERNEL);
4380 if (!rx_ring->desc)
4381 goto err;
4382
4383 rx_ring->next_to_alloc = 0;
4384 rx_ring->next_to_clean = 0;
4385 rx_ring->next_to_use = 0;
4386
4387 rx_ring->xdp_prog = adapter->xdp_prog;
4388
4389 return 0;
4390
4391 err:
4392 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4393 vfree(rx_ring->rx_buffer_info);
4394 rx_ring->rx_buffer_info = NULL;
4395 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4396 return -ENOMEM;
4397 }
4398
4399
4400
4401
4402
4403
4404
4405
4406 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4407 {
4408 struct pci_dev *pdev = adapter->pdev;
4409 int i, err = 0;
4410
4411 for (i = 0; i < adapter->num_rx_queues; i++) {
4412 err = igb_setup_rx_resources(adapter->rx_ring[i]);
4413 if (err) {
4414 dev_err(&pdev->dev,
4415 "Allocation for Rx Queue %u failed\n", i);
4416 for (i--; i >= 0; i--)
4417 igb_free_rx_resources(adapter->rx_ring[i]);
4418 break;
4419 }
4420 }
4421
4422 return err;
4423 }
4424
4425
4426
4427
4428
4429 static void igb_setup_mrqc(struct igb_adapter *adapter)
4430 {
4431 struct e1000_hw *hw = &adapter->hw;
4432 u32 mrqc, rxcsum;
4433 u32 j, num_rx_queues;
4434 u32 rss_key[10];
4435
4436 netdev_rss_key_fill(rss_key, sizeof(rss_key));
4437 for (j = 0; j < 10; j++)
4438 wr32(E1000_RSSRK(j), rss_key[j]);
4439
4440 num_rx_queues = adapter->rss_queues;
4441
4442 switch (hw->mac.type) {
4443 case e1000_82576:
4444
4445 if (adapter->vfs_allocated_count)
4446 num_rx_queues = 2;
4447 break;
4448 default:
4449 break;
4450 }
4451
4452 if (adapter->rss_indir_tbl_init != num_rx_queues) {
4453 for (j = 0; j < IGB_RETA_SIZE; j++)
4454 adapter->rss_indir_tbl[j] =
4455 (j * num_rx_queues) / IGB_RETA_SIZE;
4456 adapter->rss_indir_tbl_init = num_rx_queues;
4457 }
4458 igb_write_rss_indir_tbl(adapter);
4459
4460
4461
4462
4463
4464 rxcsum = rd32(E1000_RXCSUM);
4465 rxcsum |= E1000_RXCSUM_PCSD;
4466
4467 if (adapter->hw.mac.type >= e1000_82576)
4468
4469 rxcsum |= E1000_RXCSUM_CRCOFL;
4470
4471
4472 wr32(E1000_RXCSUM, rxcsum);
4473
4474
4475
4476
4477 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4478 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4479 E1000_MRQC_RSS_FIELD_IPV6 |
4480 E1000_MRQC_RSS_FIELD_IPV6_TCP |
4481 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4482
4483 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4484 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4485 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4486 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4487
4488
4489
4490
4491
4492 if (adapter->vfs_allocated_count) {
4493 if (hw->mac.type > e1000_82575) {
4494
4495 u32 vtctl = rd32(E1000_VT_CTL);
4496
4497 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4498 E1000_VT_CTL_DISABLE_DEF_POOL);
4499 vtctl |= adapter->vfs_allocated_count <<
4500 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4501 wr32(E1000_VT_CTL, vtctl);
4502 }
4503 if (adapter->rss_queues > 1)
4504 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4505 else
4506 mrqc |= E1000_MRQC_ENABLE_VMDQ;
4507 } else {
4508 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4509 }
4510 igb_vmm_control(adapter);
4511
4512 wr32(E1000_MRQC, mrqc);
4513 }
4514
4515
4516
4517
4518
4519 void igb_setup_rctl(struct igb_adapter *adapter)
4520 {
4521 struct e1000_hw *hw = &adapter->hw;
4522 u32 rctl;
4523
4524 rctl = rd32(E1000_RCTL);
4525
4526 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4527 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4528
4529 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4530 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4531
4532
4533
4534
4535
4536 rctl |= E1000_RCTL_SECRC;
4537
4538
4539 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4540
4541
4542 rctl |= E1000_RCTL_LPE;
4543
4544
4545 wr32(E1000_RXDCTL(0), 0);
4546
4547
4548
4549
4550
4551 if (adapter->vfs_allocated_count) {
4552
4553 wr32(E1000_QDE, ALL_QUEUES);
4554 }
4555
4556
4557 if (adapter->netdev->features & NETIF_F_RXALL) {
4558
4559
4560
4561 rctl |= (E1000_RCTL_SBP |
4562 E1000_RCTL_BAM |
4563 E1000_RCTL_PMCF);
4564
4565 rctl &= ~(E1000_RCTL_DPF |
4566 E1000_RCTL_CFIEN);
4567
4568
4569
4570 }
4571
4572 wr32(E1000_RCTL, rctl);
4573 }
4574
4575 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4576 int vfn)
4577 {
4578 struct e1000_hw *hw = &adapter->hw;
4579 u32 vmolr;
4580
4581 if (size > MAX_JUMBO_FRAME_SIZE)
4582 size = MAX_JUMBO_FRAME_SIZE;
4583
4584 vmolr = rd32(E1000_VMOLR(vfn));
4585 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4586 vmolr |= size | E1000_VMOLR_LPE;
4587 wr32(E1000_VMOLR(vfn), vmolr);
4588
4589 return 0;
4590 }
4591
4592 static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4593 int vfn, bool enable)
4594 {
4595 struct e1000_hw *hw = &adapter->hw;
4596 u32 val, reg;
4597
4598 if (hw->mac.type < e1000_82576)
4599 return;
4600
4601 if (hw->mac.type == e1000_i350)
4602 reg = E1000_DVMOLR(vfn);
4603 else
4604 reg = E1000_VMOLR(vfn);
4605
4606 val = rd32(reg);
4607 if (enable)
4608 val |= E1000_VMOLR_STRVLAN;
4609 else
4610 val &= ~(E1000_VMOLR_STRVLAN);
4611 wr32(reg, val);
4612 }
4613
4614 static inline void igb_set_vmolr(struct igb_adapter *adapter,
4615 int vfn, bool aupe)
4616 {
4617 struct e1000_hw *hw = &adapter->hw;
4618 u32 vmolr;
4619
4620
4621
4622
4623 if (hw->mac.type < e1000_82576)
4624 return;
4625
4626 vmolr = rd32(E1000_VMOLR(vfn));
4627 if (aupe)
4628 vmolr |= E1000_VMOLR_AUPE;
4629 else
4630 vmolr &= ~(E1000_VMOLR_AUPE);
4631
4632
4633 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4634
4635 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4636 vmolr |= E1000_VMOLR_RSSE;
4637
4638
4639
4640 if (vfn <= adapter->vfs_allocated_count)
4641 vmolr |= E1000_VMOLR_BAM;
4642
4643 wr32(E1000_VMOLR(vfn), vmolr);
4644 }
4645
4646
4647
4648
4649
4650
4651
4652 void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
4653 {
4654 struct e1000_hw *hw = &adapter->hw;
4655 int reg_idx = ring->reg_idx;
4656 u32 srrctl = 0;
4657
4658 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4659 if (ring_uses_large_buffer(ring))
4660 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4661 else
4662 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4663 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4664 if (hw->mac.type >= e1000_82580)
4665 srrctl |= E1000_SRRCTL_TIMESTAMP;
4666
4667
4668
4669 if (adapter->vfs_allocated_count ||
4670 (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
4671 adapter->num_rx_queues > 1))
4672 srrctl |= E1000_SRRCTL_DROP_EN;
4673
4674 wr32(E1000_SRRCTL(reg_idx), srrctl);
4675 }
4676
4677
4678
4679
4680
4681
4682
4683
4684 void igb_configure_rx_ring(struct igb_adapter *adapter,
4685 struct igb_ring *ring)
4686 {
4687 struct e1000_hw *hw = &adapter->hw;
4688 union e1000_adv_rx_desc *rx_desc;
4689 u64 rdba = ring->dma;
4690 int reg_idx = ring->reg_idx;
4691 u32 rxdctl = 0;
4692
4693 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4694 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4695 MEM_TYPE_PAGE_SHARED, NULL));
4696
4697
4698 wr32(E1000_RXDCTL(reg_idx), 0);
4699
4700
4701 wr32(E1000_RDBAL(reg_idx),
4702 rdba & 0x00000000ffffffffULL);
4703 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4704 wr32(E1000_RDLEN(reg_idx),
4705 ring->count * sizeof(union e1000_adv_rx_desc));
4706
4707
4708 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4709 wr32(E1000_RDH(reg_idx), 0);
4710 writel(0, ring->tail);
4711
4712
4713 igb_setup_srrctl(adapter, ring);
4714
4715
4716 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4717
4718 rxdctl |= IGB_RX_PTHRESH;
4719 rxdctl |= IGB_RX_HTHRESH << 8;
4720 rxdctl |= IGB_RX_WTHRESH << 16;
4721
4722
4723 memset(ring->rx_buffer_info, 0,
4724 sizeof(struct igb_rx_buffer) * ring->count);
4725
4726
4727 rx_desc = IGB_RX_DESC(ring, 0);
4728 rx_desc->wb.upper.length = 0;
4729
4730
4731 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4732 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4733 }
4734
4735 static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4736 struct igb_ring *rx_ring)
4737 {
4738
4739 clear_ring_build_skb_enabled(rx_ring);
4740 clear_ring_uses_large_buffer(rx_ring);
4741
4742 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4743 return;
4744
4745 set_ring_build_skb_enabled(rx_ring);
4746
4747 #if (PAGE_SIZE < 8192)
4748 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4749 return;
4750
4751 set_ring_uses_large_buffer(rx_ring);
4752 #endif
4753 }
4754
4755
4756
4757
4758
4759
4760
4761 static void igb_configure_rx(struct igb_adapter *adapter)
4762 {
4763 int i;
4764
4765
4766 igb_set_default_mac_filter(adapter);
4767
4768
4769
4770
4771 for (i = 0; i < adapter->num_rx_queues; i++) {
4772 struct igb_ring *rx_ring = adapter->rx_ring[i];
4773
4774 igb_set_rx_buffer_len(adapter, rx_ring);
4775 igb_configure_rx_ring(adapter, rx_ring);
4776 }
4777 }
4778
4779
4780
4781
4782
4783
4784
4785 void igb_free_tx_resources(struct igb_ring *tx_ring)
4786 {
4787 igb_clean_tx_ring(tx_ring);
4788
4789 vfree(tx_ring->tx_buffer_info);
4790 tx_ring->tx_buffer_info = NULL;
4791
4792
4793 if (!tx_ring->desc)
4794 return;
4795
4796 dma_free_coherent(tx_ring->dev, tx_ring->size,
4797 tx_ring->desc, tx_ring->dma);
4798
4799 tx_ring->desc = NULL;
4800 }
4801
4802
4803
4804
4805
4806
4807
4808 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4809 {
4810 int i;
4811
4812 for (i = 0; i < adapter->num_tx_queues; i++)
4813 if (adapter->tx_ring[i])
4814 igb_free_tx_resources(adapter->tx_ring[i]);
4815 }
4816
4817
4818
4819
4820
4821 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4822 {
4823 u16 i = tx_ring->next_to_clean;
4824 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4825
4826 while (i != tx_ring->next_to_use) {
4827 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4828
4829
4830 if (tx_buffer->type == IGB_TYPE_SKB)
4831 dev_kfree_skb_any(tx_buffer->skb);
4832 else
4833 xdp_return_frame(tx_buffer->xdpf);
4834
4835
4836 dma_unmap_single(tx_ring->dev,
4837 dma_unmap_addr(tx_buffer, dma),
4838 dma_unmap_len(tx_buffer, len),
4839 DMA_TO_DEVICE);
4840
4841
4842 eop_desc = tx_buffer->next_to_watch;
4843 tx_desc = IGB_TX_DESC(tx_ring, i);
4844
4845
4846 while (tx_desc != eop_desc) {
4847 tx_buffer++;
4848 tx_desc++;
4849 i++;
4850 if (unlikely(i == tx_ring->count)) {
4851 i = 0;
4852 tx_buffer = tx_ring->tx_buffer_info;
4853 tx_desc = IGB_TX_DESC(tx_ring, 0);
4854 }
4855
4856
4857 if (dma_unmap_len(tx_buffer, len))
4858 dma_unmap_page(tx_ring->dev,
4859 dma_unmap_addr(tx_buffer, dma),
4860 dma_unmap_len(tx_buffer, len),
4861 DMA_TO_DEVICE);
4862 }
4863
4864 tx_buffer->next_to_watch = NULL;
4865
4866
4867 tx_buffer++;
4868 i++;
4869 if (unlikely(i == tx_ring->count)) {
4870 i = 0;
4871 tx_buffer = tx_ring->tx_buffer_info;
4872 }
4873 }
4874
4875
4876 netdev_tx_reset_queue(txring_txq(tx_ring));
4877
4878
4879 tx_ring->next_to_use = 0;
4880 tx_ring->next_to_clean = 0;
4881 }
4882
4883
4884
4885
4886
4887 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4888 {
4889 int i;
4890
4891 for (i = 0; i < adapter->num_tx_queues; i++)
4892 if (adapter->tx_ring[i])
4893 igb_clean_tx_ring(adapter->tx_ring[i]);
4894 }
4895
4896
4897
4898
4899
4900
4901
4902 void igb_free_rx_resources(struct igb_ring *rx_ring)
4903 {
4904 igb_clean_rx_ring(rx_ring);
4905
4906 rx_ring->xdp_prog = NULL;
4907 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4908 vfree(rx_ring->rx_buffer_info);
4909 rx_ring->rx_buffer_info = NULL;
4910
4911
4912 if (!rx_ring->desc)
4913 return;
4914
4915 dma_free_coherent(rx_ring->dev, rx_ring->size,
4916 rx_ring->desc, rx_ring->dma);
4917
4918 rx_ring->desc = NULL;
4919 }
4920
4921
4922
4923
4924
4925
4926
4927 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4928 {
4929 int i;
4930
4931 for (i = 0; i < adapter->num_rx_queues; i++)
4932 if (adapter->rx_ring[i])
4933 igb_free_rx_resources(adapter->rx_ring[i]);
4934 }
4935
4936
4937
4938
4939
4940 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4941 {
4942 u16 i = rx_ring->next_to_clean;
4943
4944 dev_kfree_skb(rx_ring->skb);
4945 rx_ring->skb = NULL;
4946
4947
4948 while (i != rx_ring->next_to_alloc) {
4949 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4950
4951
4952
4953
4954 dma_sync_single_range_for_cpu(rx_ring->dev,
4955 buffer_info->dma,
4956 buffer_info->page_offset,
4957 igb_rx_bufsz(rx_ring),
4958 DMA_FROM_DEVICE);
4959
4960
4961 dma_unmap_page_attrs(rx_ring->dev,
4962 buffer_info->dma,
4963 igb_rx_pg_size(rx_ring),
4964 DMA_FROM_DEVICE,
4965 IGB_RX_DMA_ATTR);
4966 __page_frag_cache_drain(buffer_info->page,
4967 buffer_info->pagecnt_bias);
4968
4969 i++;
4970 if (i == rx_ring->count)
4971 i = 0;
4972 }
4973
4974 rx_ring->next_to_alloc = 0;
4975 rx_ring->next_to_clean = 0;
4976 rx_ring->next_to_use = 0;
4977 }
4978
4979
4980
4981
4982
4983 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4984 {
4985 int i;
4986
4987 for (i = 0; i < adapter->num_rx_queues; i++)
4988 if (adapter->rx_ring[i])
4989 igb_clean_rx_ring(adapter->rx_ring[i]);
4990 }
4991
4992
4993
4994
4995
4996
4997
4998
4999 static int igb_set_mac(struct net_device *netdev, void *p)
5000 {
5001 struct igb_adapter *adapter = netdev_priv(netdev);
5002 struct e1000_hw *hw = &adapter->hw;
5003 struct sockaddr *addr = p;
5004
5005 if (!is_valid_ether_addr(addr->sa_data))
5006 return -EADDRNOTAVAIL;
5007
5008 eth_hw_addr_set(netdev, addr->sa_data);
5009 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5010
5011
5012 igb_set_default_mac_filter(adapter);
5013
5014 return 0;
5015 }
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026 static int igb_write_mc_addr_list(struct net_device *netdev)
5027 {
5028 struct igb_adapter *adapter = netdev_priv(netdev);
5029 struct e1000_hw *hw = &adapter->hw;
5030 struct netdev_hw_addr *ha;
5031 u8 *mta_list;
5032 int i;
5033
5034 if (netdev_mc_empty(netdev)) {
5035
5036 igb_update_mc_addr_list(hw, NULL, 0);
5037 igb_restore_vf_multicasts(adapter);
5038 return 0;
5039 }
5040
5041 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
5042 if (!mta_list)
5043 return -ENOMEM;
5044
5045
5046 i = 0;
5047 netdev_for_each_mc_addr(ha, netdev)
5048 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
5049
5050 igb_update_mc_addr_list(hw, mta_list, i);
5051 kfree(mta_list);
5052
5053 return netdev_mc_count(netdev);
5054 }
5055
5056 static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
5057 {
5058 struct e1000_hw *hw = &adapter->hw;
5059 u32 i, pf_id;
5060
5061 switch (hw->mac.type) {
5062 case e1000_i210:
5063 case e1000_i211:
5064 case e1000_i350:
5065
5066 if (adapter->netdev->features & NETIF_F_NTUPLE)
5067 break;
5068 fallthrough;
5069 case e1000_82576:
5070 case e1000_82580:
5071 case e1000_i354:
5072
5073 if (adapter->vfs_allocated_count)
5074 break;
5075 fallthrough;
5076 default:
5077 return 1;
5078 }
5079
5080
5081 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
5082 return 0;
5083
5084 if (!adapter->vfs_allocated_count)
5085 goto set_vfta;
5086
5087
5088 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5089
5090 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5091 u32 vlvf = rd32(E1000_VLVF(i));
5092
5093 vlvf |= BIT(pf_id);
5094 wr32(E1000_VLVF(i), vlvf);
5095 }
5096
5097 set_vfta:
5098
5099 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
5100 hw->mac.ops.write_vfta(hw, i, ~0U);
5101
5102
5103 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
5104
5105 return 0;
5106 }
5107
5108 #define VFTA_BLOCK_SIZE 8
5109 static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
5110 {
5111 struct e1000_hw *hw = &adapter->hw;
5112 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
5113 u32 vid_start = vfta_offset * 32;
5114 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
5115 u32 i, vid, word, bits, pf_id;
5116
5117
5118 vid = adapter->mng_vlan_id;
5119 if (vid >= vid_start && vid < vid_end)
5120 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5121
5122 if (!adapter->vfs_allocated_count)
5123 goto set_vfta;
5124
5125 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5126
5127 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5128 u32 vlvf = rd32(E1000_VLVF(i));
5129
5130
5131 vid = vlvf & VLAN_VID_MASK;
5132
5133
5134 if (vid < vid_start || vid >= vid_end)
5135 continue;
5136
5137 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
5138
5139 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5140
5141
5142 if (test_bit(vid, adapter->active_vlans))
5143 continue;
5144 }
5145
5146
5147 bits = ~BIT(pf_id);
5148 bits &= rd32(E1000_VLVF(i));
5149 wr32(E1000_VLVF(i), bits);
5150 }
5151
5152 set_vfta:
5153
5154 for (i = VFTA_BLOCK_SIZE; i--;) {
5155 vid = (vfta_offset + i) * 32;
5156 word = vid / BITS_PER_LONG;
5157 bits = vid % BITS_PER_LONG;
5158
5159 vfta[i] |= adapter->active_vlans[word] >> bits;
5160
5161 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
5162 }
5163 }
5164
5165 static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
5166 {
5167 u32 i;
5168
5169
5170 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
5171 return;
5172
5173
5174 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
5175
5176 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
5177 igb_scrub_vfta(adapter, i);
5178 }
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189 static void igb_set_rx_mode(struct net_device *netdev)
5190 {
5191 struct igb_adapter *adapter = netdev_priv(netdev);
5192 struct e1000_hw *hw = &adapter->hw;
5193 unsigned int vfn = adapter->vfs_allocated_count;
5194 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
5195 int count;
5196
5197
5198 if (netdev->flags & IFF_PROMISC) {
5199 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
5200 vmolr |= E1000_VMOLR_MPME;
5201
5202
5203 if (hw->mac.type == e1000_82576)
5204 vmolr |= E1000_VMOLR_ROPE;
5205 } else {
5206 if (netdev->flags & IFF_ALLMULTI) {
5207 rctl |= E1000_RCTL_MPE;
5208 vmolr |= E1000_VMOLR_MPME;
5209 } else {
5210
5211
5212
5213
5214 count = igb_write_mc_addr_list(netdev);
5215 if (count < 0) {
5216 rctl |= E1000_RCTL_MPE;
5217 vmolr |= E1000_VMOLR_MPME;
5218 } else if (count) {
5219 vmolr |= E1000_VMOLR_ROMPE;
5220 }
5221 }
5222 }
5223
5224
5225
5226
5227
5228 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5229 rctl |= E1000_RCTL_UPE;
5230 vmolr |= E1000_VMOLR_ROPE;
5231 }
5232
5233
5234 rctl |= E1000_RCTL_VFE;
5235
5236
5237 if ((netdev->flags & IFF_PROMISC) ||
5238 (netdev->features & NETIF_F_RXALL)) {
5239
5240 if (igb_vlan_promisc_enable(adapter))
5241 rctl &= ~E1000_RCTL_VFE;
5242 } else {
5243 igb_vlan_promisc_disable(adapter);
5244 }
5245
5246
5247 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5248 E1000_RCTL_VFE);
5249 wr32(E1000_RCTL, rctl);
5250
5251 #if (PAGE_SIZE < 8192)
5252 if (!adapter->vfs_allocated_count) {
5253 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5254 rlpml = IGB_MAX_FRAME_BUILD_SKB;
5255 }
5256 #endif
5257 wr32(E1000_RLPML, rlpml);
5258
5259
5260
5261
5262
5263
5264 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5265 return;
5266
5267
5268 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5269
5270 vmolr |= rd32(E1000_VMOLR(vfn)) &
5271 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5272
5273
5274 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5275 #if (PAGE_SIZE < 8192)
5276 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5277 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5278 else
5279 #endif
5280 vmolr |= MAX_JUMBO_FRAME_SIZE;
5281 vmolr |= E1000_VMOLR_LPE;
5282
5283 wr32(E1000_VMOLR(vfn), vmolr);
5284
5285 igb_restore_vf_multicasts(adapter);
5286 }
5287
5288 static void igb_check_wvbr(struct igb_adapter *adapter)
5289 {
5290 struct e1000_hw *hw = &adapter->hw;
5291 u32 wvbr = 0;
5292
5293 switch (hw->mac.type) {
5294 case e1000_82576:
5295 case e1000_i350:
5296 wvbr = rd32(E1000_WVBR);
5297 if (!wvbr)
5298 return;
5299 break;
5300 default:
5301 break;
5302 }
5303
5304 adapter->wvbr |= wvbr;
5305 }
5306
5307 #define IGB_STAGGERED_QUEUE_OFFSET 8
5308
5309 static void igb_spoof_check(struct igb_adapter *adapter)
5310 {
5311 int j;
5312
5313 if (!adapter->wvbr)
5314 return;
5315
5316 for (j = 0; j < adapter->vfs_allocated_count; j++) {
5317 if (adapter->wvbr & BIT(j) ||
5318 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5319 dev_warn(&adapter->pdev->dev,
5320 "Spoof event(s) detected on VF %d\n", j);
5321 adapter->wvbr &=
5322 ~(BIT(j) |
5323 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5324 }
5325 }
5326 }
5327
5328
5329
5330
5331 static void igb_update_phy_info(struct timer_list *t)
5332 {
5333 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5334 igb_get_phy_info(&adapter->hw);
5335 }
5336
5337
5338
5339
5340
5341 bool igb_has_link(struct igb_adapter *adapter)
5342 {
5343 struct e1000_hw *hw = &adapter->hw;
5344 bool link_active = false;
5345
5346
5347
5348
5349
5350
5351 switch (hw->phy.media_type) {
5352 case e1000_media_type_copper:
5353 if (!hw->mac.get_link_status)
5354 return true;
5355 fallthrough;
5356 case e1000_media_type_internal_serdes:
5357 hw->mac.ops.check_for_link(hw);
5358 link_active = !hw->mac.get_link_status;
5359 break;
5360 default:
5361 case e1000_media_type_unknown:
5362 break;
5363 }
5364
5365 if (((hw->mac.type == e1000_i210) ||
5366 (hw->mac.type == e1000_i211)) &&
5367 (hw->phy.id == I210_I_PHY_ID)) {
5368 if (!netif_carrier_ok(adapter->netdev)) {
5369 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5370 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5371 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5372 adapter->link_check_timeout = jiffies;
5373 }
5374 }
5375
5376 return link_active;
5377 }
5378
5379 static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5380 {
5381 bool ret = false;
5382 u32 ctrl_ext, thstat;
5383
5384
5385 if (hw->mac.type == e1000_i350) {
5386 thstat = rd32(E1000_THSTAT);
5387 ctrl_ext = rd32(E1000_CTRL_EXT);
5388
5389 if ((hw->phy.media_type == e1000_media_type_copper) &&
5390 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5391 ret = !!(thstat & event);
5392 }
5393
5394 return ret;
5395 }
5396
5397
5398
5399
5400
5401
5402 static void igb_check_lvmmc(struct igb_adapter *adapter)
5403 {
5404 struct e1000_hw *hw = &adapter->hw;
5405 u32 lvmmc;
5406
5407 lvmmc = rd32(E1000_LVMMC);
5408 if (lvmmc) {
5409 if (unlikely(net_ratelimit())) {
5410 netdev_warn(adapter->netdev,
5411 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5412 lvmmc);
5413 }
5414 }
5415 }
5416
5417
5418
5419
5420
5421 static void igb_watchdog(struct timer_list *t)
5422 {
5423 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5424
5425 schedule_work(&adapter->watchdog_task);
5426 }
5427
5428 static void igb_watchdog_task(struct work_struct *work)
5429 {
5430 struct igb_adapter *adapter = container_of(work,
5431 struct igb_adapter,
5432 watchdog_task);
5433 struct e1000_hw *hw = &adapter->hw;
5434 struct e1000_phy_info *phy = &hw->phy;
5435 struct net_device *netdev = adapter->netdev;
5436 u32 link;
5437 int i;
5438 u32 connsw;
5439 u16 phy_data, retry_count = 20;
5440
5441 link = igb_has_link(adapter);
5442
5443 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5444 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5445 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5446 else
5447 link = false;
5448 }
5449
5450
5451 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5452 if (hw->phy.media_type == e1000_media_type_copper) {
5453 connsw = rd32(E1000_CONNSW);
5454 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5455 link = 0;
5456 }
5457 }
5458 if (link) {
5459
5460 if (hw->dev_spec._82575.media_changed) {
5461 hw->dev_spec._82575.media_changed = false;
5462 adapter->flags |= IGB_FLAG_MEDIA_RESET;
5463 igb_reset(adapter);
5464 }
5465
5466 pm_runtime_resume(netdev->dev.parent);
5467
5468 if (!netif_carrier_ok(netdev)) {
5469 u32 ctrl;
5470
5471 hw->mac.ops.get_speed_and_duplex(hw,
5472 &adapter->link_speed,
5473 &adapter->link_duplex);
5474
5475 ctrl = rd32(E1000_CTRL);
5476
5477 netdev_info(netdev,
5478 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5479 netdev->name,
5480 adapter->link_speed,
5481 adapter->link_duplex == FULL_DUPLEX ?
5482 "Full" : "Half",
5483 (ctrl & E1000_CTRL_TFCE) &&
5484 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5485 (ctrl & E1000_CTRL_RFCE) ? "RX" :
5486 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
5487
5488
5489 if ((adapter->flags & IGB_FLAG_EEE) &&
5490 (adapter->link_duplex == HALF_DUPLEX)) {
5491 dev_info(&adapter->pdev->dev,
5492 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5493 adapter->hw.dev_spec._82575.eee_disable = true;
5494 adapter->flags &= ~IGB_FLAG_EEE;
5495 }
5496
5497
5498 igb_check_downshift(hw);
5499 if (phy->speed_downgraded)
5500 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5501
5502
5503 if (igb_thermal_sensor_event(hw,
5504 E1000_THSTAT_LINK_THROTTLE))
5505 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5506
5507
5508 adapter->tx_timeout_factor = 1;
5509 switch (adapter->link_speed) {
5510 case SPEED_10:
5511 adapter->tx_timeout_factor = 14;
5512 break;
5513 case SPEED_100:
5514
5515 break;
5516 }
5517
5518 if (adapter->link_speed != SPEED_1000 ||
5519 !hw->phy.ops.read_reg)
5520 goto no_wait;
5521
5522
5523 retry_read_status:
5524 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5525 &phy_data)) {
5526 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5527 retry_count) {
5528 msleep(100);
5529 retry_count--;
5530 goto retry_read_status;
5531 } else if (!retry_count) {
5532 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5533 }
5534 } else {
5535 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5536 }
5537 no_wait:
5538 netif_carrier_on(netdev);
5539
5540 igb_ping_all_vfs(adapter);
5541 igb_check_vf_rate_limit(adapter);
5542
5543
5544 if (!test_bit(__IGB_DOWN, &adapter->state))
5545 mod_timer(&adapter->phy_info_timer,
5546 round_jiffies(jiffies + 2 * HZ));
5547 }
5548 } else {
5549 if (netif_carrier_ok(netdev)) {
5550 adapter->link_speed = 0;
5551 adapter->link_duplex = 0;
5552
5553
5554 if (igb_thermal_sensor_event(hw,
5555 E1000_THSTAT_PWR_DOWN)) {
5556 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5557 }
5558
5559
5560 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5561 netdev->name);
5562 netif_carrier_off(netdev);
5563
5564 igb_ping_all_vfs(adapter);
5565
5566
5567 if (!test_bit(__IGB_DOWN, &adapter->state))
5568 mod_timer(&adapter->phy_info_timer,
5569 round_jiffies(jiffies + 2 * HZ));
5570
5571
5572 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5573 igb_check_swap_media(adapter);
5574 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5575 schedule_work(&adapter->reset_task);
5576
5577 return;
5578 }
5579 }
5580 pm_schedule_suspend(netdev->dev.parent,
5581 MSEC_PER_SEC * 5);
5582
5583
5584 } else if (!netif_carrier_ok(netdev) &&
5585 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5586 igb_check_swap_media(adapter);
5587 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5588 schedule_work(&adapter->reset_task);
5589
5590 return;
5591 }
5592 }
5593 }
5594
5595 spin_lock(&adapter->stats64_lock);
5596 igb_update_stats(adapter);
5597 spin_unlock(&adapter->stats64_lock);
5598
5599 for (i = 0; i < adapter->num_tx_queues; i++) {
5600 struct igb_ring *tx_ring = adapter->tx_ring[i];
5601 if (!netif_carrier_ok(netdev)) {
5602
5603
5604
5605
5606
5607 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5608 adapter->tx_timeout_count++;
5609 schedule_work(&adapter->reset_task);
5610
5611 return;
5612 }
5613 }
5614
5615
5616 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5617 }
5618
5619
5620 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5621 u32 eics = 0;
5622
5623 for (i = 0; i < adapter->num_q_vectors; i++)
5624 eics |= adapter->q_vector[i]->eims_value;
5625 wr32(E1000_EICS, eics);
5626 } else {
5627 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5628 }
5629
5630 igb_spoof_check(adapter);
5631 igb_ptp_rx_hang(adapter);
5632 igb_ptp_tx_hang(adapter);
5633
5634
5635 if ((adapter->hw.mac.type == e1000_i350) ||
5636 (adapter->hw.mac.type == e1000_i354))
5637 igb_check_lvmmc(adapter);
5638
5639
5640 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5641 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5642 mod_timer(&adapter->watchdog_timer,
5643 round_jiffies(jiffies + HZ));
5644 else
5645 mod_timer(&adapter->watchdog_timer,
5646 round_jiffies(jiffies + 2 * HZ));
5647 }
5648 }
5649
5650 enum latency_range {
5651 lowest_latency = 0,
5652 low_latency = 1,
5653 bulk_latency = 2,
5654 latency_invalid = 255
5655 };
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5673 {
5674 int new_val = q_vector->itr_val;
5675 int avg_wire_size = 0;
5676 struct igb_adapter *adapter = q_vector->adapter;
5677 unsigned int packets;
5678
5679
5680
5681
5682 if (adapter->link_speed != SPEED_1000) {
5683 new_val = IGB_4K_ITR;
5684 goto set_itr_val;
5685 }
5686
5687 packets = q_vector->rx.total_packets;
5688 if (packets)
5689 avg_wire_size = q_vector->rx.total_bytes / packets;
5690
5691 packets = q_vector->tx.total_packets;
5692 if (packets)
5693 avg_wire_size = max_t(u32, avg_wire_size,
5694 q_vector->tx.total_bytes / packets);
5695
5696
5697 if (!avg_wire_size)
5698 goto clear_counts;
5699
5700
5701 avg_wire_size += 24;
5702
5703
5704 avg_wire_size = min(avg_wire_size, 3000);
5705
5706
5707 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5708 new_val = avg_wire_size / 3;
5709 else
5710 new_val = avg_wire_size / 2;
5711
5712
5713 if (new_val < IGB_20K_ITR &&
5714 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5715 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5716 new_val = IGB_20K_ITR;
5717
5718 set_itr_val:
5719 if (new_val != q_vector->itr_val) {
5720 q_vector->itr_val = new_val;
5721 q_vector->set_itr = 1;
5722 }
5723 clear_counts:
5724 q_vector->rx.total_bytes = 0;
5725 q_vector->rx.total_packets = 0;
5726 q_vector->tx.total_bytes = 0;
5727 q_vector->tx.total_packets = 0;
5728 }
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746 static void igb_update_itr(struct igb_q_vector *q_vector,
5747 struct igb_ring_container *ring_container)
5748 {
5749 unsigned int packets = ring_container->total_packets;
5750 unsigned int bytes = ring_container->total_bytes;
5751 u8 itrval = ring_container->itr;
5752
5753
5754 if (packets == 0)
5755 return;
5756
5757 switch (itrval) {
5758 case lowest_latency:
5759
5760 if (bytes/packets > 8000)
5761 itrval = bulk_latency;
5762 else if ((packets < 5) && (bytes > 512))
5763 itrval = low_latency;
5764 break;
5765 case low_latency:
5766 if (bytes > 10000) {
5767
5768 if (bytes/packets > 8000)
5769 itrval = bulk_latency;
5770 else if ((packets < 10) || ((bytes/packets) > 1200))
5771 itrval = bulk_latency;
5772 else if ((packets > 35))
5773 itrval = lowest_latency;
5774 } else if (bytes/packets > 2000) {
5775 itrval = bulk_latency;
5776 } else if (packets <= 2 && bytes < 512) {
5777 itrval = lowest_latency;
5778 }
5779 break;
5780 case bulk_latency:
5781 if (bytes > 25000) {
5782 if (packets > 35)
5783 itrval = low_latency;
5784 } else if (bytes < 1500) {
5785 itrval = low_latency;
5786 }
5787 break;
5788 }
5789
5790
5791 ring_container->total_bytes = 0;
5792 ring_container->total_packets = 0;
5793
5794
5795 ring_container->itr = itrval;
5796 }
5797
5798 static void igb_set_itr(struct igb_q_vector *q_vector)
5799 {
5800 struct igb_adapter *adapter = q_vector->adapter;
5801 u32 new_itr = q_vector->itr_val;
5802 u8 current_itr = 0;
5803
5804
5805 if (adapter->link_speed != SPEED_1000) {
5806 current_itr = 0;
5807 new_itr = IGB_4K_ITR;
5808 goto set_itr_now;
5809 }
5810
5811 igb_update_itr(q_vector, &q_vector->tx);
5812 igb_update_itr(q_vector, &q_vector->rx);
5813
5814 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5815
5816
5817 if (current_itr == lowest_latency &&
5818 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5819 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5820 current_itr = low_latency;
5821
5822 switch (current_itr) {
5823
5824 case lowest_latency:
5825 new_itr = IGB_70K_ITR;
5826 break;
5827 case low_latency:
5828 new_itr = IGB_20K_ITR;
5829 break;
5830 case bulk_latency:
5831 new_itr = IGB_4K_ITR;
5832 break;
5833 default:
5834 break;
5835 }
5836
5837 set_itr_now:
5838 if (new_itr != q_vector->itr_val) {
5839
5840
5841
5842
5843 new_itr = new_itr > q_vector->itr_val ?
5844 max((new_itr * q_vector->itr_val) /
5845 (new_itr + (q_vector->itr_val >> 2)),
5846 new_itr) : new_itr;
5847
5848
5849
5850
5851
5852
5853 q_vector->itr_val = new_itr;
5854 q_vector->set_itr = 1;
5855 }
5856 }
5857
5858 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5859 struct igb_tx_buffer *first,
5860 u32 vlan_macip_lens, u32 type_tucmd,
5861 u32 mss_l4len_idx)
5862 {
5863 struct e1000_adv_tx_context_desc *context_desc;
5864 u16 i = tx_ring->next_to_use;
5865 struct timespec64 ts;
5866
5867 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5868
5869 i++;
5870 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5871
5872
5873 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5874
5875
5876 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5877 mss_l4len_idx |= tx_ring->reg_idx << 4;
5878
5879 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5880 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5881 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5882
5883
5884
5885
5886 if (tx_ring->launchtime_enable) {
5887 ts = ktime_to_timespec64(first->skb->tstamp);
5888 skb_txtime_consumed(first->skb);
5889 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5890 } else {
5891 context_desc->seqnum_seed = 0;
5892 }
5893 }
5894
5895 static int igb_tso(struct igb_ring *tx_ring,
5896 struct igb_tx_buffer *first,
5897 u8 *hdr_len)
5898 {
5899 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5900 struct sk_buff *skb = first->skb;
5901 union {
5902 struct iphdr *v4;
5903 struct ipv6hdr *v6;
5904 unsigned char *hdr;
5905 } ip;
5906 union {
5907 struct tcphdr *tcp;
5908 struct udphdr *udp;
5909 unsigned char *hdr;
5910 } l4;
5911 u32 paylen, l4_offset;
5912 int err;
5913
5914 if (skb->ip_summed != CHECKSUM_PARTIAL)
5915 return 0;
5916
5917 if (!skb_is_gso(skb))
5918 return 0;
5919
5920 err = skb_cow_head(skb, 0);
5921 if (err < 0)
5922 return err;
5923
5924 ip.hdr = skb_network_header(skb);
5925 l4.hdr = skb_checksum_start(skb);
5926
5927
5928 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
5929 E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
5930
5931
5932 if (ip.v4->version == 4) {
5933 unsigned char *csum_start = skb_checksum_start(skb);
5934 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5935
5936
5937
5938
5939 ip.v4->check = csum_fold(csum_partial(trans_start,
5940 csum_start - trans_start,
5941 0));
5942 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5943
5944 ip.v4->tot_len = 0;
5945 first->tx_flags |= IGB_TX_FLAGS_TSO |
5946 IGB_TX_FLAGS_CSUM |
5947 IGB_TX_FLAGS_IPV4;
5948 } else {
5949 ip.v6->payload_len = 0;
5950 first->tx_flags |= IGB_TX_FLAGS_TSO |
5951 IGB_TX_FLAGS_CSUM;
5952 }
5953
5954
5955 l4_offset = l4.hdr - skb->data;
5956
5957
5958 paylen = skb->len - l4_offset;
5959 if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
5960
5961 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5962 csum_replace_by_diff(&l4.tcp->check,
5963 (__force __wsum)htonl(paylen));
5964 } else {
5965
5966 *hdr_len = sizeof(*l4.udp) + l4_offset;
5967 csum_replace_by_diff(&l4.udp->check,
5968 (__force __wsum)htonl(paylen));
5969 }
5970
5971
5972 first->gso_segs = skb_shinfo(skb)->gso_segs;
5973 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5974
5975
5976 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5977 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5978
5979
5980 vlan_macip_lens = l4.hdr - ip.hdr;
5981 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5982 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5983
5984 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
5985 type_tucmd, mss_l4len_idx);
5986
5987 return 1;
5988 }
5989
5990 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5991 {
5992 struct sk_buff *skb = first->skb;
5993 u32 vlan_macip_lens = 0;
5994 u32 type_tucmd = 0;
5995
5996 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5997 csum_failed:
5998 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5999 !tx_ring->launchtime_enable)
6000 return;
6001 goto no_csum;
6002 }
6003
6004 switch (skb->csum_offset) {
6005 case offsetof(struct tcphdr, check):
6006 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
6007 fallthrough;
6008 case offsetof(struct udphdr, check):
6009 break;
6010 case offsetof(struct sctphdr, checksum):
6011
6012 if (skb_csum_is_sctp(skb)) {
6013 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
6014 break;
6015 }
6016 fallthrough;
6017 default:
6018 skb_checksum_help(skb);
6019 goto csum_failed;
6020 }
6021
6022
6023 first->tx_flags |= IGB_TX_FLAGS_CSUM;
6024 vlan_macip_lens = skb_checksum_start_offset(skb) -
6025 skb_network_offset(skb);
6026 no_csum:
6027 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
6028 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
6029
6030 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
6031 }
6032
6033 #define IGB_SET_FLAG(_input, _flag, _result) \
6034 ((_flag <= _result) ? \
6035 ((u32)(_input & _flag) * (_result / _flag)) : \
6036 ((u32)(_input & _flag) / (_flag / _result)))
6037
6038 static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
6039 {
6040
6041 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
6042 E1000_ADVTXD_DCMD_DEXT |
6043 E1000_ADVTXD_DCMD_IFCS;
6044
6045
6046 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
6047 (E1000_ADVTXD_DCMD_VLE));
6048
6049
6050 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
6051 (E1000_ADVTXD_DCMD_TSE));
6052
6053
6054 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
6055 (E1000_ADVTXD_MAC_TSTAMP));
6056
6057
6058 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
6059
6060 return cmd_type;
6061 }
6062
6063 static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
6064 union e1000_adv_tx_desc *tx_desc,
6065 u32 tx_flags, unsigned int paylen)
6066 {
6067 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
6068
6069
6070 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6071 olinfo_status |= tx_ring->reg_idx << 4;
6072
6073
6074 olinfo_status |= IGB_SET_FLAG(tx_flags,
6075 IGB_TX_FLAGS_CSUM,
6076 (E1000_TXD_POPTS_TXSM << 8));
6077
6078
6079 olinfo_status |= IGB_SET_FLAG(tx_flags,
6080 IGB_TX_FLAGS_IPV4,
6081 (E1000_TXD_POPTS_IXSM << 8));
6082
6083 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6084 }
6085
6086 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6087 {
6088 struct net_device *netdev = tx_ring->netdev;
6089
6090 netif_stop_subqueue(netdev, tx_ring->queue_index);
6091
6092
6093
6094
6095
6096 smp_mb();
6097
6098
6099
6100
6101 if (igb_desc_unused(tx_ring) < size)
6102 return -EBUSY;
6103
6104
6105 netif_wake_subqueue(netdev, tx_ring->queue_index);
6106
6107 u64_stats_update_begin(&tx_ring->tx_syncp2);
6108 tx_ring->tx_stats.restart_queue2++;
6109 u64_stats_update_end(&tx_ring->tx_syncp2);
6110
6111 return 0;
6112 }
6113
6114 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6115 {
6116 if (igb_desc_unused(tx_ring) >= size)
6117 return 0;
6118 return __igb_maybe_stop_tx(tx_ring, size);
6119 }
6120
6121 static int igb_tx_map(struct igb_ring *tx_ring,
6122 struct igb_tx_buffer *first,
6123 const u8 hdr_len)
6124 {
6125 struct sk_buff *skb = first->skb;
6126 struct igb_tx_buffer *tx_buffer;
6127 union e1000_adv_tx_desc *tx_desc;
6128 skb_frag_t *frag;
6129 dma_addr_t dma;
6130 unsigned int data_len, size;
6131 u32 tx_flags = first->tx_flags;
6132 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
6133 u16 i = tx_ring->next_to_use;
6134
6135 tx_desc = IGB_TX_DESC(tx_ring, i);
6136
6137 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
6138
6139 size = skb_headlen(skb);
6140 data_len = skb->data_len;
6141
6142 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6143
6144 tx_buffer = first;
6145
6146 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
6147 if (dma_mapping_error(tx_ring->dev, dma))
6148 goto dma_error;
6149
6150
6151 dma_unmap_len_set(tx_buffer, len, size);
6152 dma_unmap_addr_set(tx_buffer, dma, dma);
6153
6154 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6155
6156 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
6157 tx_desc->read.cmd_type_len =
6158 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
6159
6160 i++;
6161 tx_desc++;
6162 if (i == tx_ring->count) {
6163 tx_desc = IGB_TX_DESC(tx_ring, 0);
6164 i = 0;
6165 }
6166 tx_desc->read.olinfo_status = 0;
6167
6168 dma += IGB_MAX_DATA_PER_TXD;
6169 size -= IGB_MAX_DATA_PER_TXD;
6170
6171 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6172 }
6173
6174 if (likely(!data_len))
6175 break;
6176
6177 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
6178
6179 i++;
6180 tx_desc++;
6181 if (i == tx_ring->count) {
6182 tx_desc = IGB_TX_DESC(tx_ring, 0);
6183 i = 0;
6184 }
6185 tx_desc->read.olinfo_status = 0;
6186
6187 size = skb_frag_size(frag);
6188 data_len -= size;
6189
6190 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
6191 size, DMA_TO_DEVICE);
6192
6193 tx_buffer = &tx_ring->tx_buffer_info[i];
6194 }
6195
6196
6197 cmd_type |= size | IGB_TXD_DCMD;
6198 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6199
6200 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6201
6202
6203 first->time_stamp = jiffies;
6204
6205 skb_tx_timestamp(skb);
6206
6207
6208
6209
6210
6211
6212
6213
6214 dma_wmb();
6215
6216
6217 first->next_to_watch = tx_desc;
6218
6219 i++;
6220 if (i == tx_ring->count)
6221 i = 0;
6222
6223 tx_ring->next_to_use = i;
6224
6225
6226 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6227
6228 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6229 writel(i, tx_ring->tail);
6230 }
6231 return 0;
6232
6233 dma_error:
6234 dev_err(tx_ring->dev, "TX DMA map failed\n");
6235 tx_buffer = &tx_ring->tx_buffer_info[i];
6236
6237
6238 while (tx_buffer != first) {
6239 if (dma_unmap_len(tx_buffer, len))
6240 dma_unmap_page(tx_ring->dev,
6241 dma_unmap_addr(tx_buffer, dma),
6242 dma_unmap_len(tx_buffer, len),
6243 DMA_TO_DEVICE);
6244 dma_unmap_len_set(tx_buffer, len, 0);
6245
6246 if (i-- == 0)
6247 i += tx_ring->count;
6248 tx_buffer = &tx_ring->tx_buffer_info[i];
6249 }
6250
6251 if (dma_unmap_len(tx_buffer, len))
6252 dma_unmap_single(tx_ring->dev,
6253 dma_unmap_addr(tx_buffer, dma),
6254 dma_unmap_len(tx_buffer, len),
6255 DMA_TO_DEVICE);
6256 dma_unmap_len_set(tx_buffer, len, 0);
6257
6258 dev_kfree_skb_any(tx_buffer->skb);
6259 tx_buffer->skb = NULL;
6260
6261 tx_ring->next_to_use = i;
6262
6263 return -1;
6264 }
6265
6266 int igb_xmit_xdp_ring(struct igb_adapter *adapter,
6267 struct igb_ring *tx_ring,
6268 struct xdp_frame *xdpf)
6269 {
6270 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
6271 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
6272 u16 count, i, index = tx_ring->next_to_use;
6273 struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index];
6274 struct igb_tx_buffer *tx_buffer = tx_head;
6275 union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index);
6276 u32 len = xdpf->len, cmd_type, olinfo_status;
6277 void *data = xdpf->data;
6278
6279 count = TXD_USE_COUNT(len);
6280 for (i = 0; i < nr_frags; i++)
6281 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
6282
6283 if (igb_maybe_stop_tx(tx_ring, count + 3))
6284 return IGB_XDP_CONSUMED;
6285
6286 i = 0;
6287
6288 tx_head->bytecount = xdp_get_frame_len(xdpf);
6289 tx_head->type = IGB_TYPE_XDP;
6290 tx_head->gso_segs = 1;
6291 tx_head->xdpf = xdpf;
6292
6293 olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT;
6294
6295 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6296 olinfo_status |= tx_ring->reg_idx << 4;
6297 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6298
6299 for (;;) {
6300 dma_addr_t dma;
6301
6302 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
6303 if (dma_mapping_error(tx_ring->dev, dma))
6304 goto unmap;
6305
6306
6307 dma_unmap_len_set(tx_buffer, len, len);
6308 dma_unmap_addr_set(tx_buffer, dma, dma);
6309
6310
6311 cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
6312 E1000_ADVTXD_DCMD_IFCS | len;
6313
6314 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6315 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6316
6317 tx_buffer->protocol = 0;
6318
6319 if (++index == tx_ring->count)
6320 index = 0;
6321
6322 if (i == nr_frags)
6323 break;
6324
6325 tx_buffer = &tx_ring->tx_buffer_info[index];
6326 tx_desc = IGB_TX_DESC(tx_ring, index);
6327 tx_desc->read.olinfo_status = 0;
6328
6329 data = skb_frag_address(&sinfo->frags[i]);
6330 len = skb_frag_size(&sinfo->frags[i]);
6331 i++;
6332 }
6333 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD);
6334
6335 netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount);
6336
6337 tx_head->time_stamp = jiffies;
6338
6339
6340 smp_wmb();
6341
6342
6343 tx_head->next_to_watch = tx_desc;
6344 tx_ring->next_to_use = index;
6345
6346
6347 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6348
6349 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
6350 writel(index, tx_ring->tail);
6351
6352 return IGB_XDP_TX;
6353
6354 unmap:
6355 for (;;) {
6356 tx_buffer = &tx_ring->tx_buffer_info[index];
6357 if (dma_unmap_len(tx_buffer, len))
6358 dma_unmap_page(tx_ring->dev,
6359 dma_unmap_addr(tx_buffer, dma),
6360 dma_unmap_len(tx_buffer, len),
6361 DMA_TO_DEVICE);
6362 dma_unmap_len_set(tx_buffer, len, 0);
6363 if (tx_buffer == tx_head)
6364 break;
6365
6366 if (!index)
6367 index += tx_ring->count;
6368 index--;
6369 }
6370
6371 return IGB_XDP_CONSUMED;
6372 }
6373
6374 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6375 struct igb_ring *tx_ring)
6376 {
6377 struct igb_tx_buffer *first;
6378 int tso;
6379 u32 tx_flags = 0;
6380 unsigned short f;
6381 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6382 __be16 protocol = vlan_get_protocol(skb);
6383 u8 hdr_len = 0;
6384
6385
6386
6387
6388
6389
6390
6391 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6392 count += TXD_USE_COUNT(skb_frag_size(
6393 &skb_shinfo(skb)->frags[f]));
6394
6395 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6396
6397 return NETDEV_TX_BUSY;
6398 }
6399
6400
6401 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6402 first->type = IGB_TYPE_SKB;
6403 first->skb = skb;
6404 first->bytecount = skb->len;
6405 first->gso_segs = 1;
6406
6407 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6408 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6409
6410 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6411 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6412 &adapter->state)) {
6413 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6414 tx_flags |= IGB_TX_FLAGS_TSTAMP;
6415
6416 adapter->ptp_tx_skb = skb_get(skb);
6417 adapter->ptp_tx_start = jiffies;
6418 if (adapter->hw.mac.type == e1000_82576)
6419 schedule_work(&adapter->ptp_tx_work);
6420 } else {
6421 adapter->tx_hwtstamp_skipped++;
6422 }
6423 }
6424
6425 if (skb_vlan_tag_present(skb)) {
6426 tx_flags |= IGB_TX_FLAGS_VLAN;
6427 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6428 }
6429
6430
6431 first->tx_flags = tx_flags;
6432 first->protocol = protocol;
6433
6434 tso = igb_tso(tx_ring, first, &hdr_len);
6435 if (tso < 0)
6436 goto out_drop;
6437 else if (!tso)
6438 igb_tx_csum(tx_ring, first);
6439
6440 if (igb_tx_map(tx_ring, first, hdr_len))
6441 goto cleanup_tx_tstamp;
6442
6443 return NETDEV_TX_OK;
6444
6445 out_drop:
6446 dev_kfree_skb_any(first->skb);
6447 first->skb = NULL;
6448 cleanup_tx_tstamp:
6449 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6450 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6451
6452 dev_kfree_skb_any(adapter->ptp_tx_skb);
6453 adapter->ptp_tx_skb = NULL;
6454 if (adapter->hw.mac.type == e1000_82576)
6455 cancel_work_sync(&adapter->ptp_tx_work);
6456 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6457 }
6458
6459 return NETDEV_TX_OK;
6460 }
6461
6462 static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6463 struct sk_buff *skb)
6464 {
6465 unsigned int r_idx = skb->queue_mapping;
6466
6467 if (r_idx >= adapter->num_tx_queues)
6468 r_idx = r_idx % adapter->num_tx_queues;
6469
6470 return adapter->tx_ring[r_idx];
6471 }
6472
6473 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6474 struct net_device *netdev)
6475 {
6476 struct igb_adapter *adapter = netdev_priv(netdev);
6477
6478
6479
6480
6481 if (skb_put_padto(skb, 17))
6482 return NETDEV_TX_OK;
6483
6484 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6485 }
6486
6487
6488
6489
6490
6491
6492 static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
6493 {
6494 struct igb_adapter *adapter = netdev_priv(netdev);
6495 struct e1000_hw *hw = &adapter->hw;
6496
6497
6498 adapter->tx_timeout_count++;
6499
6500 if (hw->mac.type >= e1000_82580)
6501 hw->dev_spec._82575.global_device_reset = true;
6502
6503 schedule_work(&adapter->reset_task);
6504 wr32(E1000_EICS,
6505 (adapter->eims_enable_mask & ~adapter->eims_other));
6506 }
6507
6508 static void igb_reset_task(struct work_struct *work)
6509 {
6510 struct igb_adapter *adapter;
6511 adapter = container_of(work, struct igb_adapter, reset_task);
6512
6513 rtnl_lock();
6514
6515 if (test_bit(__IGB_DOWN, &adapter->state) ||
6516 test_bit(__IGB_RESETTING, &adapter->state)) {
6517 rtnl_unlock();
6518 return;
6519 }
6520
6521 igb_dump(adapter);
6522 netdev_err(adapter->netdev, "Reset adapter\n");
6523 igb_reinit_locked(adapter);
6524 rtnl_unlock();
6525 }
6526
6527
6528
6529
6530
6531
6532 static void igb_get_stats64(struct net_device *netdev,
6533 struct rtnl_link_stats64 *stats)
6534 {
6535 struct igb_adapter *adapter = netdev_priv(netdev);
6536
6537 spin_lock(&adapter->stats64_lock);
6538 igb_update_stats(adapter);
6539 memcpy(stats, &adapter->stats64, sizeof(*stats));
6540 spin_unlock(&adapter->stats64_lock);
6541 }
6542
6543
6544
6545
6546
6547
6548
6549
6550 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6551 {
6552 struct igb_adapter *adapter = netdev_priv(netdev);
6553 int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
6554
6555 if (adapter->xdp_prog) {
6556 int i;
6557
6558 for (i = 0; i < adapter->num_rx_queues; i++) {
6559 struct igb_ring *ring = adapter->rx_ring[i];
6560
6561 if (max_frame > igb_rx_bufsz(ring)) {
6562 netdev_warn(adapter->netdev,
6563 "Requested MTU size is not supported with XDP. Max frame size is %d\n",
6564 max_frame);
6565 return -EINVAL;
6566 }
6567 }
6568 }
6569
6570
6571 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6572 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6573
6574 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6575 usleep_range(1000, 2000);
6576
6577
6578 adapter->max_frame_size = max_frame;
6579
6580 if (netif_running(netdev))
6581 igb_down(adapter);
6582
6583 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6584 netdev->mtu, new_mtu);
6585 netdev->mtu = new_mtu;
6586
6587 if (netif_running(netdev))
6588 igb_up(adapter);
6589 else
6590 igb_reset(adapter);
6591
6592 clear_bit(__IGB_RESETTING, &adapter->state);
6593
6594 return 0;
6595 }
6596
6597
6598
6599
6600
6601 void igb_update_stats(struct igb_adapter *adapter)
6602 {
6603 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6604 struct e1000_hw *hw = &adapter->hw;
6605 struct pci_dev *pdev = adapter->pdev;
6606 u32 reg, mpc;
6607 int i;
6608 u64 bytes, packets;
6609 unsigned int start;
6610 u64 _bytes, _packets;
6611
6612
6613
6614
6615 if (adapter->link_speed == 0)
6616 return;
6617 if (pci_channel_offline(pdev))
6618 return;
6619
6620 bytes = 0;
6621 packets = 0;
6622
6623 rcu_read_lock();
6624 for (i = 0; i < adapter->num_rx_queues; i++) {
6625 struct igb_ring *ring = adapter->rx_ring[i];
6626 u32 rqdpc = rd32(E1000_RQDPC(i));
6627 if (hw->mac.type >= e1000_i210)
6628 wr32(E1000_RQDPC(i), 0);
6629
6630 if (rqdpc) {
6631 ring->rx_stats.drops += rqdpc;
6632 net_stats->rx_fifo_errors += rqdpc;
6633 }
6634
6635 do {
6636 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6637 _bytes = ring->rx_stats.bytes;
6638 _packets = ring->rx_stats.packets;
6639 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6640 bytes += _bytes;
6641 packets += _packets;
6642 }
6643
6644 net_stats->rx_bytes = bytes;
6645 net_stats->rx_packets = packets;
6646
6647 bytes = 0;
6648 packets = 0;
6649 for (i = 0; i < adapter->num_tx_queues; i++) {
6650 struct igb_ring *ring = adapter->tx_ring[i];
6651 do {
6652 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6653 _bytes = ring->tx_stats.bytes;
6654 _packets = ring->tx_stats.packets;
6655 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6656 bytes += _bytes;
6657 packets += _packets;
6658 }
6659 net_stats->tx_bytes = bytes;
6660 net_stats->tx_packets = packets;
6661 rcu_read_unlock();
6662
6663
6664 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6665 adapter->stats.gprc += rd32(E1000_GPRC);
6666 adapter->stats.gorc += rd32(E1000_GORCL);
6667 rd32(E1000_GORCH);
6668 adapter->stats.bprc += rd32(E1000_BPRC);
6669 adapter->stats.mprc += rd32(E1000_MPRC);
6670 adapter->stats.roc += rd32(E1000_ROC);
6671
6672 adapter->stats.prc64 += rd32(E1000_PRC64);
6673 adapter->stats.prc127 += rd32(E1000_PRC127);
6674 adapter->stats.prc255 += rd32(E1000_PRC255);
6675 adapter->stats.prc511 += rd32(E1000_PRC511);
6676 adapter->stats.prc1023 += rd32(E1000_PRC1023);
6677 adapter->stats.prc1522 += rd32(E1000_PRC1522);
6678 adapter->stats.symerrs += rd32(E1000_SYMERRS);
6679 adapter->stats.sec += rd32(E1000_SEC);
6680
6681 mpc = rd32(E1000_MPC);
6682 adapter->stats.mpc += mpc;
6683 net_stats->rx_fifo_errors += mpc;
6684 adapter->stats.scc += rd32(E1000_SCC);
6685 adapter->stats.ecol += rd32(E1000_ECOL);
6686 adapter->stats.mcc += rd32(E1000_MCC);
6687 adapter->stats.latecol += rd32(E1000_LATECOL);
6688 adapter->stats.dc += rd32(E1000_DC);
6689 adapter->stats.rlec += rd32(E1000_RLEC);
6690 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6691 adapter->stats.xontxc += rd32(E1000_XONTXC);
6692 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6693 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6694 adapter->stats.fcruc += rd32(E1000_FCRUC);
6695 adapter->stats.gptc += rd32(E1000_GPTC);
6696 adapter->stats.gotc += rd32(E1000_GOTCL);
6697 rd32(E1000_GOTCH);
6698 adapter->stats.rnbc += rd32(E1000_RNBC);
6699 adapter->stats.ruc += rd32(E1000_RUC);
6700 adapter->stats.rfc += rd32(E1000_RFC);
6701 adapter->stats.rjc += rd32(E1000_RJC);
6702 adapter->stats.tor += rd32(E1000_TORH);
6703 adapter->stats.tot += rd32(E1000_TOTH);
6704 adapter->stats.tpr += rd32(E1000_TPR);
6705
6706 adapter->stats.ptc64 += rd32(E1000_PTC64);
6707 adapter->stats.ptc127 += rd32(E1000_PTC127);
6708 adapter->stats.ptc255 += rd32(E1000_PTC255);
6709 adapter->stats.ptc511 += rd32(E1000_PTC511);
6710 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6711 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6712
6713 adapter->stats.mptc += rd32(E1000_MPTC);
6714 adapter->stats.bptc += rd32(E1000_BPTC);
6715
6716 adapter->stats.tpt += rd32(E1000_TPT);
6717 adapter->stats.colc += rd32(E1000_COLC);
6718
6719 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6720
6721 reg = rd32(E1000_CTRL_EXT);
6722 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6723 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6724
6725
6726 if ((hw->mac.type != e1000_i210) &&
6727 (hw->mac.type != e1000_i211))
6728 adapter->stats.tncrs += rd32(E1000_TNCRS);
6729 }
6730
6731 adapter->stats.tsctc += rd32(E1000_TSCTC);
6732 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6733
6734 adapter->stats.iac += rd32(E1000_IAC);
6735 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6736 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6737 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6738 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6739 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6740 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6741 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6742 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6743
6744
6745 net_stats->multicast = adapter->stats.mprc;
6746 net_stats->collisions = adapter->stats.colc;
6747
6748
6749
6750
6751
6752
6753 net_stats->rx_errors = adapter->stats.rxerrc +
6754 adapter->stats.crcerrs + adapter->stats.algnerrc +
6755 adapter->stats.ruc + adapter->stats.roc +
6756 adapter->stats.cexterr;
6757 net_stats->rx_length_errors = adapter->stats.ruc +
6758 adapter->stats.roc;
6759 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6760 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6761 net_stats->rx_missed_errors = adapter->stats.mpc;
6762
6763
6764 net_stats->tx_errors = adapter->stats.ecol +
6765 adapter->stats.latecol;
6766 net_stats->tx_aborted_errors = adapter->stats.ecol;
6767 net_stats->tx_window_errors = adapter->stats.latecol;
6768 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6769
6770
6771
6772
6773 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6774 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6775 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6776
6777
6778 reg = rd32(E1000_MANC);
6779 if (reg & E1000_MANC_EN_BMC2OS) {
6780 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6781 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6782 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6783 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6784 }
6785 }
6786
6787 static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
6788 {
6789 int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt);
6790 struct e1000_hw *hw = &adapter->hw;
6791 struct timespec64 ts;
6792 u32 tsauxc;
6793
6794 if (pin < 0 || pin >= IGB_N_PEROUT)
6795 return;
6796
6797 spin_lock(&adapter->tmreg_lock);
6798
6799 if (hw->mac.type == e1000_82580 ||
6800 hw->mac.type == e1000_i354 ||
6801 hw->mac.type == e1000_i350) {
6802 s64 ns = timespec64_to_ns(&adapter->perout[pin].period);
6803 u32 systiml, systimh, level_mask, level, rem;
6804 u64 systim, now;
6805
6806
6807 rd32(E1000_SYSTIMR);
6808 systiml = rd32(E1000_SYSTIML);
6809 systimh = rd32(E1000_SYSTIMH);
6810 systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml);
6811 now = timecounter_cyc2time(&adapter->tc, systim);
6812
6813 if (pin < 2) {
6814 level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000;
6815 level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0;
6816 } else {
6817 level_mask = (tsintr_tt == 1) ? 0x80 : 0x40;
6818 level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0;
6819 }
6820
6821 div_u64_rem(now, ns, &rem);
6822 systim = systim + (ns - rem);
6823
6824
6825 div_u64_rem(now, ns << 1, &rem);
6826 if (rem < ns) {
6827
6828 if (level == 0) {
6829
6830 systim += ns;
6831 pr_notice("igb: periodic output on %s missed falling edge\n",
6832 adapter->sdp_config[pin].name);
6833 }
6834 } else {
6835
6836 if (level == 1) {
6837
6838 systim += ns;
6839 pr_notice("igb: periodic output on %s missed rising edge\n",
6840 adapter->sdp_config[pin].name);
6841 }
6842 }
6843
6844
6845
6846
6847 ts.tv_nsec = (u32)systim;
6848 ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
6849 } else {
6850 ts = timespec64_add(adapter->perout[pin].start,
6851 adapter->perout[pin].period);
6852 }
6853
6854
6855 wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec);
6856 wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec);
6857 tsauxc = rd32(E1000_TSAUXC);
6858 tsauxc |= TSAUXC_EN_TT0;
6859 wr32(E1000_TSAUXC, tsauxc);
6860 adapter->perout[pin].start = ts;
6861
6862 spin_unlock(&adapter->tmreg_lock);
6863 }
6864
6865 static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
6866 {
6867 int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt);
6868 int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0;
6869 int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0;
6870 struct e1000_hw *hw = &adapter->hw;
6871 struct ptp_clock_event event;
6872 struct timespec64 ts;
6873
6874 if (pin < 0 || pin >= IGB_N_EXTTS)
6875 return;
6876
6877 if (hw->mac.type == e1000_82580 ||
6878 hw->mac.type == e1000_i354 ||
6879 hw->mac.type == e1000_i350) {
6880 s64 ns = rd32(auxstmpl);
6881
6882 ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
6883 ts = ns_to_timespec64(ns);
6884 } else {
6885 ts.tv_nsec = rd32(auxstmpl);
6886 ts.tv_sec = rd32(auxstmph);
6887 }
6888
6889 event.type = PTP_CLOCK_EXTTS;
6890 event.index = tsintr_tt;
6891 event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
6892 ptp_clock_event(adapter->ptp_clock, &event);
6893 }
6894
6895 static void igb_tsync_interrupt(struct igb_adapter *adapter)
6896 {
6897 struct e1000_hw *hw = &adapter->hw;
6898 u32 ack = 0, tsicr = rd32(E1000_TSICR);
6899 struct ptp_clock_event event;
6900
6901 if (tsicr & TSINTR_SYS_WRAP) {
6902 event.type = PTP_CLOCK_PPS;
6903 if (adapter->ptp_caps.pps)
6904 ptp_clock_event(adapter->ptp_clock, &event);
6905 ack |= TSINTR_SYS_WRAP;
6906 }
6907
6908 if (tsicr & E1000_TSICR_TXTS) {
6909
6910 schedule_work(&adapter->ptp_tx_work);
6911 ack |= E1000_TSICR_TXTS;
6912 }
6913
6914 if (tsicr & TSINTR_TT0) {
6915 igb_perout(adapter, 0);
6916 ack |= TSINTR_TT0;
6917 }
6918
6919 if (tsicr & TSINTR_TT1) {
6920 igb_perout(adapter, 1);
6921 ack |= TSINTR_TT1;
6922 }
6923
6924 if (tsicr & TSINTR_AUTT0) {
6925 igb_extts(adapter, 0);
6926 ack |= TSINTR_AUTT0;
6927 }
6928
6929 if (tsicr & TSINTR_AUTT1) {
6930 igb_extts(adapter, 1);
6931 ack |= TSINTR_AUTT1;
6932 }
6933
6934
6935 wr32(E1000_TSICR, ack);
6936 }
6937
6938 static irqreturn_t igb_msix_other(int irq, void *data)
6939 {
6940 struct igb_adapter *adapter = data;
6941 struct e1000_hw *hw = &adapter->hw;
6942 u32 icr = rd32(E1000_ICR);
6943
6944
6945 if (icr & E1000_ICR_DRSTA)
6946 schedule_work(&adapter->reset_task);
6947
6948 if (icr & E1000_ICR_DOUTSYNC) {
6949
6950 adapter->stats.doosync++;
6951
6952
6953
6954
6955 igb_check_wvbr(adapter);
6956 }
6957
6958
6959 if (icr & E1000_ICR_VMMB)
6960 igb_msg_task(adapter);
6961
6962 if (icr & E1000_ICR_LSC) {
6963 hw->mac.get_link_status = 1;
6964
6965 if (!test_bit(__IGB_DOWN, &adapter->state))
6966 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6967 }
6968
6969 if (icr & E1000_ICR_TS)
6970 igb_tsync_interrupt(adapter);
6971
6972 wr32(E1000_EIMS, adapter->eims_other);
6973
6974 return IRQ_HANDLED;
6975 }
6976
6977 static void igb_write_itr(struct igb_q_vector *q_vector)
6978 {
6979 struct igb_adapter *adapter = q_vector->adapter;
6980 u32 itr_val = q_vector->itr_val & 0x7FFC;
6981
6982 if (!q_vector->set_itr)
6983 return;
6984
6985 if (!itr_val)
6986 itr_val = 0x4;
6987
6988 if (adapter->hw.mac.type == e1000_82575)
6989 itr_val |= itr_val << 16;
6990 else
6991 itr_val |= E1000_EITR_CNT_IGNR;
6992
6993 writel(itr_val, q_vector->itr_register);
6994 q_vector->set_itr = 0;
6995 }
6996
6997 static irqreturn_t igb_msix_ring(int irq, void *data)
6998 {
6999 struct igb_q_vector *q_vector = data;
7000
7001
7002 igb_write_itr(q_vector);
7003
7004 napi_schedule(&q_vector->napi);
7005
7006 return IRQ_HANDLED;
7007 }
7008
7009 #ifdef CONFIG_IGB_DCA
7010 static void igb_update_tx_dca(struct igb_adapter *adapter,
7011 struct igb_ring *tx_ring,
7012 int cpu)
7013 {
7014 struct e1000_hw *hw = &adapter->hw;
7015 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
7016
7017 if (hw->mac.type != e1000_82575)
7018 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
7019
7020
7021
7022
7023
7024 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
7025 E1000_DCA_TXCTRL_DATA_RRO_EN |
7026 E1000_DCA_TXCTRL_DESC_DCA_EN;
7027
7028 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
7029 }
7030
7031 static void igb_update_rx_dca(struct igb_adapter *adapter,
7032 struct igb_ring *rx_ring,
7033 int cpu)
7034 {
7035 struct e1000_hw *hw = &adapter->hw;
7036 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
7037
7038 if (hw->mac.type != e1000_82575)
7039 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
7040
7041
7042
7043
7044
7045 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
7046 E1000_DCA_RXCTRL_DESC_DCA_EN;
7047
7048 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
7049 }
7050
7051 static void igb_update_dca(struct igb_q_vector *q_vector)
7052 {
7053 struct igb_adapter *adapter = q_vector->adapter;
7054 int cpu = get_cpu();
7055
7056 if (q_vector->cpu == cpu)
7057 goto out_no_update;
7058
7059 if (q_vector->tx.ring)
7060 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
7061
7062 if (q_vector->rx.ring)
7063 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
7064
7065 q_vector->cpu = cpu;
7066 out_no_update:
7067 put_cpu();
7068 }
7069
7070 static void igb_setup_dca(struct igb_adapter *adapter)
7071 {
7072 struct e1000_hw *hw = &adapter->hw;
7073 int i;
7074
7075 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
7076 return;
7077
7078
7079 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
7080
7081 for (i = 0; i < adapter->num_q_vectors; i++) {
7082 adapter->q_vector[i]->cpu = -1;
7083 igb_update_dca(adapter->q_vector[i]);
7084 }
7085 }
7086
7087 static int __igb_notify_dca(struct device *dev, void *data)
7088 {
7089 struct net_device *netdev = dev_get_drvdata(dev);
7090 struct igb_adapter *adapter = netdev_priv(netdev);
7091 struct pci_dev *pdev = adapter->pdev;
7092 struct e1000_hw *hw = &adapter->hw;
7093 unsigned long event = *(unsigned long *)data;
7094
7095 switch (event) {
7096 case DCA_PROVIDER_ADD:
7097
7098 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
7099 break;
7100 if (dca_add_requester(dev) == 0) {
7101 adapter->flags |= IGB_FLAG_DCA_ENABLED;
7102 dev_info(&pdev->dev, "DCA enabled\n");
7103 igb_setup_dca(adapter);
7104 break;
7105 }
7106 fallthrough;
7107 case DCA_PROVIDER_REMOVE:
7108 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
7109
7110
7111
7112 dca_remove_requester(dev);
7113 dev_info(&pdev->dev, "DCA disabled\n");
7114 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
7115 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
7116 }
7117 break;
7118 }
7119
7120 return 0;
7121 }
7122
7123 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
7124 void *p)
7125 {
7126 int ret_val;
7127
7128 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
7129 __igb_notify_dca);
7130
7131 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7132 }
7133 #endif
7134
7135 #ifdef CONFIG_PCI_IOV
7136 static int igb_vf_configure(struct igb_adapter *adapter, int vf)
7137 {
7138 unsigned char mac_addr[ETH_ALEN];
7139
7140 eth_zero_addr(mac_addr);
7141 igb_set_vf_mac(adapter, vf, mac_addr);
7142
7143
7144 adapter->vf_data[vf].spoofchk_enabled = true;
7145
7146
7147 adapter->vf_data[vf].trusted = false;
7148
7149 return 0;
7150 }
7151
7152 #endif
7153 static void igb_ping_all_vfs(struct igb_adapter *adapter)
7154 {
7155 struct e1000_hw *hw = &adapter->hw;
7156 u32 ping;
7157 int i;
7158
7159 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
7160 ping = E1000_PF_CONTROL_MSG;
7161 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
7162 ping |= E1000_VT_MSGTYPE_CTS;
7163 igb_write_mbx(hw, &ping, 1, i);
7164 }
7165 }
7166
7167 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7168 {
7169 struct e1000_hw *hw = &adapter->hw;
7170 u32 vmolr = rd32(E1000_VMOLR(vf));
7171 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7172
7173 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
7174 IGB_VF_FLAG_MULTI_PROMISC);
7175 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7176
7177 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
7178 vmolr |= E1000_VMOLR_MPME;
7179 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
7180 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
7181 } else {
7182
7183
7184
7185
7186 if (vf_data->num_vf_mc_hashes > 30) {
7187 vmolr |= E1000_VMOLR_MPME;
7188 } else if (vf_data->num_vf_mc_hashes) {
7189 int j;
7190
7191 vmolr |= E1000_VMOLR_ROMPE;
7192 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7193 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7194 }
7195 }
7196
7197 wr32(E1000_VMOLR(vf), vmolr);
7198
7199
7200 if (*msgbuf & E1000_VT_MSGINFO_MASK)
7201 return -EINVAL;
7202
7203 return 0;
7204 }
7205
7206 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
7207 u32 *msgbuf, u32 vf)
7208 {
7209 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7210 u16 *hash_list = (u16 *)&msgbuf[1];
7211 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7212 int i;
7213
7214
7215
7216
7217
7218 vf_data->num_vf_mc_hashes = n;
7219
7220
7221 if (n > 30)
7222 n = 30;
7223
7224
7225 for (i = 0; i < n; i++)
7226 vf_data->vf_mc_hashes[i] = hash_list[i];
7227
7228
7229 igb_set_rx_mode(adapter->netdev);
7230
7231 return 0;
7232 }
7233
7234 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
7235 {
7236 struct e1000_hw *hw = &adapter->hw;
7237 struct vf_data_storage *vf_data;
7238 int i, j;
7239
7240 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7241 u32 vmolr = rd32(E1000_VMOLR(i));
7242
7243 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7244
7245 vf_data = &adapter->vf_data[i];
7246
7247 if ((vf_data->num_vf_mc_hashes > 30) ||
7248 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
7249 vmolr |= E1000_VMOLR_MPME;
7250 } else if (vf_data->num_vf_mc_hashes) {
7251 vmolr |= E1000_VMOLR_ROMPE;
7252 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7253 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7254 }
7255 wr32(E1000_VMOLR(i), vmolr);
7256 }
7257 }
7258
7259 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
7260 {
7261 struct e1000_hw *hw = &adapter->hw;
7262 u32 pool_mask, vlvf_mask, i;
7263
7264
7265 pool_mask = E1000_VLVF_POOLSEL_MASK;
7266 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
7267
7268
7269 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
7270 adapter->vfs_allocated_count);
7271
7272
7273 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
7274 u32 vlvf = rd32(E1000_VLVF(i));
7275 u32 vfta_mask, vid, vfta;
7276
7277
7278 if (!(vlvf & vlvf_mask))
7279 continue;
7280
7281
7282 vlvf ^= vlvf_mask;
7283
7284
7285 if (vlvf & pool_mask)
7286 goto update_vlvfb;
7287
7288
7289 if (vlvf & E1000_VLVF_POOLSEL_MASK)
7290 goto update_vlvf;
7291
7292 vid = vlvf & E1000_VLVF_VLANID_MASK;
7293 vfta_mask = BIT(vid % 32);
7294
7295
7296 vfta = adapter->shadow_vfta[vid / 32];
7297 if (vfta & vfta_mask)
7298 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
7299 update_vlvf:
7300
7301 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7302 vlvf &= E1000_VLVF_POOLSEL_MASK;
7303 else
7304 vlvf = 0;
7305 update_vlvfb:
7306
7307 wr32(E1000_VLVF(i), vlvf);
7308 }
7309 }
7310
7311 static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
7312 {
7313 u32 vlvf;
7314 int idx;
7315
7316
7317 if (vlan == 0)
7318 return 0;
7319
7320
7321 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
7322 vlvf = rd32(E1000_VLVF(idx));
7323 if ((vlvf & VLAN_VID_MASK) == vlan)
7324 break;
7325 }
7326
7327 return idx;
7328 }
7329
7330 static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
7331 {
7332 struct e1000_hw *hw = &adapter->hw;
7333 u32 bits, pf_id;
7334 int idx;
7335
7336 idx = igb_find_vlvf_entry(hw, vid);
7337 if (!idx)
7338 return;
7339
7340
7341
7342
7343 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
7344 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
7345 bits &= rd32(E1000_VLVF(idx));
7346
7347
7348 if (!bits) {
7349 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7350 wr32(E1000_VLVF(idx), BIT(pf_id));
7351 else
7352 wr32(E1000_VLVF(idx), 0);
7353 }
7354 }
7355
7356 static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
7357 bool add, u32 vf)
7358 {
7359 int pf_id = adapter->vfs_allocated_count;
7360 struct e1000_hw *hw = &adapter->hw;
7361 int err;
7362
7363
7364
7365
7366
7367
7368 if (add && test_bit(vid, adapter->active_vlans)) {
7369 err = igb_vfta_set(hw, vid, pf_id, true, false);
7370 if (err)
7371 return err;
7372 }
7373
7374 err = igb_vfta_set(hw, vid, vf, add, false);
7375
7376 if (add && !err)
7377 return err;
7378
7379
7380
7381
7382
7383 if (test_bit(vid, adapter->active_vlans) ||
7384 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
7385 igb_update_pf_vlvf(adapter, vid);
7386
7387 return err;
7388 }
7389
7390 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
7391 {
7392 struct e1000_hw *hw = &adapter->hw;
7393
7394 if (vid)
7395 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
7396 else
7397 wr32(E1000_VMVIR(vf), 0);
7398 }
7399
7400 static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
7401 u16 vlan, u8 qos)
7402 {
7403 int err;
7404
7405 err = igb_set_vf_vlan(adapter, vlan, true, vf);
7406 if (err)
7407 return err;
7408
7409 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
7410 igb_set_vmolr(adapter, vf, !vlan);
7411
7412
7413 if (vlan != adapter->vf_data[vf].pf_vlan)
7414 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7415 false, vf);
7416
7417 adapter->vf_data[vf].pf_vlan = vlan;
7418 adapter->vf_data[vf].pf_qos = qos;
7419 igb_set_vf_vlan_strip(adapter, vf, true);
7420 dev_info(&adapter->pdev->dev,
7421 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7422 if (test_bit(__IGB_DOWN, &adapter->state)) {
7423 dev_warn(&adapter->pdev->dev,
7424 "The VF VLAN has been set, but the PF device is not up.\n");
7425 dev_warn(&adapter->pdev->dev,
7426 "Bring the PF device up before attempting to use the VF device.\n");
7427 }
7428
7429 return err;
7430 }
7431
7432 static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7433 {
7434
7435 igb_set_vf_vlan(adapter, 0, true, vf);
7436
7437 igb_set_vmvir(adapter, 0, vf);
7438 igb_set_vmolr(adapter, vf, true);
7439
7440
7441 if (adapter->vf_data[vf].pf_vlan)
7442 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7443 false, vf);
7444
7445 adapter->vf_data[vf].pf_vlan = 0;
7446 adapter->vf_data[vf].pf_qos = 0;
7447 igb_set_vf_vlan_strip(adapter, vf, false);
7448
7449 return 0;
7450 }
7451
7452 static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7453 u16 vlan, u8 qos, __be16 vlan_proto)
7454 {
7455 struct igb_adapter *adapter = netdev_priv(netdev);
7456
7457 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7458 return -EINVAL;
7459
7460 if (vlan_proto != htons(ETH_P_8021Q))
7461 return -EPROTONOSUPPORT;
7462
7463 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7464 igb_disable_port_vlan(adapter, vf);
7465 }
7466
7467 static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7468 {
7469 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7470 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7471 int ret;
7472
7473 if (adapter->vf_data[vf].pf_vlan)
7474 return -1;
7475
7476
7477 if (!vid && !add)
7478 return 0;
7479
7480 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7481 if (!ret)
7482 igb_set_vf_vlan_strip(adapter, vf, !!vid);
7483 return ret;
7484 }
7485
7486 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7487 {
7488 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7489
7490
7491 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7492 vf_data->last_nack = jiffies;
7493
7494
7495 igb_clear_vf_vfta(adapter, vf);
7496 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7497 igb_set_vmvir(adapter, vf_data->pf_vlan |
7498 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7499 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7500 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7501
7502
7503 adapter->vf_data[vf].num_vf_mc_hashes = 0;
7504
7505
7506 igb_set_rx_mode(adapter->netdev);
7507 }
7508
7509 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7510 {
7511 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7512
7513
7514 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7515 eth_zero_addr(vf_mac);
7516
7517
7518 igb_vf_reset(adapter, vf);
7519 }
7520
7521 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7522 {
7523 struct e1000_hw *hw = &adapter->hw;
7524 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7525 u32 reg, msgbuf[3];
7526 u8 *addr = (u8 *)(&msgbuf[1]);
7527
7528
7529 igb_vf_reset(adapter, vf);
7530
7531
7532 igb_set_vf_mac(adapter, vf, vf_mac);
7533
7534
7535 reg = rd32(E1000_VFTE);
7536 wr32(E1000_VFTE, reg | BIT(vf));
7537 reg = rd32(E1000_VFRE);
7538 wr32(E1000_VFRE, reg | BIT(vf));
7539
7540 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7541
7542
7543 if (!is_zero_ether_addr(vf_mac)) {
7544 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7545 memcpy(addr, vf_mac, ETH_ALEN);
7546 } else {
7547 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7548 }
7549 igb_write_mbx(hw, msgbuf, 3, vf);
7550 }
7551
7552 static void igb_flush_mac_table(struct igb_adapter *adapter)
7553 {
7554 struct e1000_hw *hw = &adapter->hw;
7555 int i;
7556
7557 for (i = 0; i < hw->mac.rar_entry_count; i++) {
7558 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7559 eth_zero_addr(adapter->mac_table[i].addr);
7560 adapter->mac_table[i].queue = 0;
7561 igb_rar_set_index(adapter, i);
7562 }
7563 }
7564
7565 static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7566 {
7567 struct e1000_hw *hw = &adapter->hw;
7568
7569 int rar_entries = hw->mac.rar_entry_count -
7570 adapter->vfs_allocated_count;
7571 int i, count = 0;
7572
7573 for (i = 0; i < rar_entries; i++) {
7574
7575 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7576 continue;
7577
7578
7579 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7580 (adapter->mac_table[i].queue != queue))
7581 continue;
7582
7583 count++;
7584 }
7585
7586 return count;
7587 }
7588
7589
7590 static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7591 {
7592 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7593
7594 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7595 mac_table->queue = adapter->vfs_allocated_count;
7596 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7597
7598 igb_rar_set_index(adapter, 0);
7599 }
7600
7601
7602
7603
7604
7605
7606 static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7607 const u8 *addr, const u8 flags)
7608 {
7609 if (!(entry->state & IGB_MAC_STATE_IN_USE))
7610 return true;
7611
7612 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7613 (flags & IGB_MAC_STATE_SRC_ADDR))
7614 return false;
7615
7616 if (!ether_addr_equal(addr, entry->addr))
7617 return false;
7618
7619 return true;
7620 }
7621
7622
7623
7624
7625
7626
7627 static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7628 const u8 *addr, const u8 queue,
7629 const u8 flags)
7630 {
7631 struct e1000_hw *hw = &adapter->hw;
7632 int rar_entries = hw->mac.rar_entry_count -
7633 adapter->vfs_allocated_count;
7634 int i;
7635
7636 if (is_zero_ether_addr(addr))
7637 return -EINVAL;
7638
7639
7640
7641
7642
7643 for (i = 0; i < rar_entries; i++) {
7644 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7645 addr, flags))
7646 continue;
7647
7648 ether_addr_copy(adapter->mac_table[i].addr, addr);
7649 adapter->mac_table[i].queue = queue;
7650 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7651
7652 igb_rar_set_index(adapter, i);
7653 return i;
7654 }
7655
7656 return -ENOSPC;
7657 }
7658
7659 static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7660 const u8 queue)
7661 {
7662 return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7663 }
7664
7665
7666
7667
7668
7669
7670
7671 static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7672 const u8 *addr, const u8 queue,
7673 const u8 flags)
7674 {
7675 struct e1000_hw *hw = &adapter->hw;
7676 int rar_entries = hw->mac.rar_entry_count -
7677 adapter->vfs_allocated_count;
7678 int i;
7679
7680 if (is_zero_ether_addr(addr))
7681 return -EINVAL;
7682
7683
7684
7685
7686
7687 for (i = 0; i < rar_entries; i++) {
7688 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7689 continue;
7690 if ((adapter->mac_table[i].state & flags) != flags)
7691 continue;
7692 if (adapter->mac_table[i].queue != queue)
7693 continue;
7694 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7695 continue;
7696
7697
7698
7699
7700 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7701 adapter->mac_table[i].state =
7702 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7703 adapter->mac_table[i].queue =
7704 adapter->vfs_allocated_count;
7705 } else {
7706 adapter->mac_table[i].state = 0;
7707 adapter->mac_table[i].queue = 0;
7708 eth_zero_addr(adapter->mac_table[i].addr);
7709 }
7710
7711 igb_rar_set_index(adapter, i);
7712 return 0;
7713 }
7714
7715 return -ENOENT;
7716 }
7717
7718 static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7719 const u8 queue)
7720 {
7721 return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7722 }
7723
7724 int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7725 const u8 *addr, u8 queue, u8 flags)
7726 {
7727 struct e1000_hw *hw = &adapter->hw;
7728
7729
7730
7731
7732 if (hw->mac.type != e1000_i210)
7733 return -EOPNOTSUPP;
7734
7735 return igb_add_mac_filter_flags(adapter, addr, queue,
7736 IGB_MAC_STATE_QUEUE_STEERING | flags);
7737 }
7738
7739 int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7740 const u8 *addr, u8 queue, u8 flags)
7741 {
7742 return igb_del_mac_filter_flags(adapter, addr, queue,
7743 IGB_MAC_STATE_QUEUE_STEERING | flags);
7744 }
7745
7746 static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7747 {
7748 struct igb_adapter *adapter = netdev_priv(netdev);
7749 int ret;
7750
7751 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7752
7753 return min_t(int, ret, 0);
7754 }
7755
7756 static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7757 {
7758 struct igb_adapter *adapter = netdev_priv(netdev);
7759
7760 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7761
7762 return 0;
7763 }
7764
7765 static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7766 const u32 info, const u8 *addr)
7767 {
7768 struct pci_dev *pdev = adapter->pdev;
7769 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7770 struct list_head *pos;
7771 struct vf_mac_filter *entry = NULL;
7772 int ret = 0;
7773
7774 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7775 !vf_data->trusted) {
7776 dev_warn(&pdev->dev,
7777 "VF %d requested MAC filter but is administratively denied\n",
7778 vf);
7779 return -EINVAL;
7780 }
7781 if (!is_valid_ether_addr(addr)) {
7782 dev_warn(&pdev->dev,
7783 "VF %d attempted to set invalid MAC filter\n",
7784 vf);
7785 return -EINVAL;
7786 }
7787
7788 switch (info) {
7789 case E1000_VF_MAC_FILTER_CLR:
7790
7791 list_for_each(pos, &adapter->vf_macs.l) {
7792 entry = list_entry(pos, struct vf_mac_filter, l);
7793 if (entry->vf == vf) {
7794 entry->vf = -1;
7795 entry->free = true;
7796 igb_del_mac_filter(adapter, entry->vf_mac, vf);
7797 }
7798 }
7799 break;
7800 case E1000_VF_MAC_FILTER_ADD:
7801
7802 list_for_each(pos, &adapter->vf_macs.l) {
7803 entry = list_entry(pos, struct vf_mac_filter, l);
7804 if (entry->free)
7805 break;
7806 }
7807
7808 if (entry && entry->free) {
7809 entry->free = false;
7810 entry->vf = vf;
7811 ether_addr_copy(entry->vf_mac, addr);
7812
7813 ret = igb_add_mac_filter(adapter, addr, vf);
7814 ret = min_t(int, ret, 0);
7815 } else {
7816 ret = -ENOSPC;
7817 }
7818
7819 if (ret == -ENOSPC)
7820 dev_warn(&pdev->dev,
7821 "VF %d has requested MAC filter but there is no space for it\n",
7822 vf);
7823 break;
7824 default:
7825 ret = -EINVAL;
7826 break;
7827 }
7828
7829 return ret;
7830 }
7831
7832 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7833 {
7834 struct pci_dev *pdev = adapter->pdev;
7835 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7836 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7837
7838
7839
7840
7841 unsigned char *addr = (unsigned char *)&msg[1];
7842 int ret = 0;
7843
7844 if (!info) {
7845 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7846 !vf_data->trusted) {
7847 dev_warn(&pdev->dev,
7848 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7849 vf);
7850 return -EINVAL;
7851 }
7852
7853 if (!is_valid_ether_addr(addr)) {
7854 dev_warn(&pdev->dev,
7855 "VF %d attempted to set invalid MAC\n",
7856 vf);
7857 return -EINVAL;
7858 }
7859
7860 ret = igb_set_vf_mac(adapter, vf, addr);
7861 } else {
7862 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7863 }
7864
7865 return ret;
7866 }
7867
7868 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7869 {
7870 struct e1000_hw *hw = &adapter->hw;
7871 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7872 u32 msg = E1000_VT_MSGTYPE_NACK;
7873
7874
7875 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7876 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7877 igb_write_mbx(hw, &msg, 1, vf);
7878 vf_data->last_nack = jiffies;
7879 }
7880 }
7881
7882 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7883 {
7884 struct pci_dev *pdev = adapter->pdev;
7885 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7886 struct e1000_hw *hw = &adapter->hw;
7887 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7888 s32 retval;
7889
7890 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7891
7892 if (retval) {
7893
7894 dev_err(&pdev->dev, "Error receiving message from VF\n");
7895 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7896 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7897 goto unlock;
7898 goto out;
7899 }
7900
7901
7902 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7903 goto unlock;
7904
7905
7906
7907
7908 if (msgbuf[0] == E1000_VF_RESET) {
7909
7910 igb_vf_reset_msg(adapter, vf);
7911 return;
7912 }
7913
7914 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7915 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7916 goto unlock;
7917 retval = -1;
7918 goto out;
7919 }
7920
7921 switch ((msgbuf[0] & 0xFFFF)) {
7922 case E1000_VF_SET_MAC_ADDR:
7923 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7924 break;
7925 case E1000_VF_SET_PROMISC:
7926 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7927 break;
7928 case E1000_VF_SET_MULTICAST:
7929 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7930 break;
7931 case E1000_VF_SET_LPE:
7932 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7933 break;
7934 case E1000_VF_SET_VLAN:
7935 retval = -1;
7936 if (vf_data->pf_vlan)
7937 dev_warn(&pdev->dev,
7938 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7939 vf);
7940 else
7941 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7942 break;
7943 default:
7944 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7945 retval = -1;
7946 break;
7947 }
7948
7949 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7950 out:
7951
7952 if (retval)
7953 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7954 else
7955 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7956
7957
7958 igb_write_mbx(hw, msgbuf, 1, vf);
7959 return;
7960
7961 unlock:
7962 igb_unlock_mbx(hw, vf);
7963 }
7964
7965 static void igb_msg_task(struct igb_adapter *adapter)
7966 {
7967 struct e1000_hw *hw = &adapter->hw;
7968 unsigned long flags;
7969 u32 vf;
7970
7971 spin_lock_irqsave(&adapter->vfs_lock, flags);
7972 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7973
7974 if (!igb_check_for_rst(hw, vf))
7975 igb_vf_reset_event(adapter, vf);
7976
7977
7978 if (!igb_check_for_msg(hw, vf))
7979 igb_rcv_msg_from_vf(adapter, vf);
7980
7981
7982 if (!igb_check_for_ack(hw, vf))
7983 igb_rcv_ack_from_vf(adapter, vf);
7984 }
7985 spin_unlock_irqrestore(&adapter->vfs_lock, flags);
7986 }
7987
7988
7989
7990
7991
7992
7993
7994
7995
7996
7997
7998
7999 static void igb_set_uta(struct igb_adapter *adapter, bool set)
8000 {
8001 struct e1000_hw *hw = &adapter->hw;
8002 u32 uta = set ? ~0 : 0;
8003 int i;
8004
8005
8006 if (!adapter->vfs_allocated_count)
8007 return;
8008
8009 for (i = hw->mac.uta_reg_count; i--;)
8010 array_wr32(E1000_UTA, i, uta);
8011 }
8012
8013
8014
8015
8016
8017
8018 static irqreturn_t igb_intr_msi(int irq, void *data)
8019 {
8020 struct igb_adapter *adapter = data;
8021 struct igb_q_vector *q_vector = adapter->q_vector[0];
8022 struct e1000_hw *hw = &adapter->hw;
8023
8024 u32 icr = rd32(E1000_ICR);
8025
8026 igb_write_itr(q_vector);
8027
8028 if (icr & E1000_ICR_DRSTA)
8029 schedule_work(&adapter->reset_task);
8030
8031 if (icr & E1000_ICR_DOUTSYNC) {
8032
8033 adapter->stats.doosync++;
8034 }
8035
8036 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
8037 hw->mac.get_link_status = 1;
8038 if (!test_bit(__IGB_DOWN, &adapter->state))
8039 mod_timer(&adapter->watchdog_timer, jiffies + 1);
8040 }
8041
8042 if (icr & E1000_ICR_TS)
8043 igb_tsync_interrupt(adapter);
8044
8045 napi_schedule(&q_vector->napi);
8046
8047 return IRQ_HANDLED;
8048 }
8049
8050
8051
8052
8053
8054
8055 static irqreturn_t igb_intr(int irq, void *data)
8056 {
8057 struct igb_adapter *adapter = data;
8058 struct igb_q_vector *q_vector = adapter->q_vector[0];
8059 struct e1000_hw *hw = &adapter->hw;
8060
8061
8062
8063 u32 icr = rd32(E1000_ICR);
8064
8065
8066
8067
8068 if (!(icr & E1000_ICR_INT_ASSERTED))
8069 return IRQ_NONE;
8070
8071 igb_write_itr(q_vector);
8072
8073 if (icr & E1000_ICR_DRSTA)
8074 schedule_work(&adapter->reset_task);
8075
8076 if (icr & E1000_ICR_DOUTSYNC) {
8077
8078 adapter->stats.doosync++;
8079 }
8080
8081 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
8082 hw->mac.get_link_status = 1;
8083
8084 if (!test_bit(__IGB_DOWN, &adapter->state))
8085 mod_timer(&adapter->watchdog_timer, jiffies + 1);
8086 }
8087
8088 if (icr & E1000_ICR_TS)
8089 igb_tsync_interrupt(adapter);
8090
8091 napi_schedule(&q_vector->napi);
8092
8093 return IRQ_HANDLED;
8094 }
8095
8096 static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
8097 {
8098 struct igb_adapter *adapter = q_vector->adapter;
8099 struct e1000_hw *hw = &adapter->hw;
8100
8101 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
8102 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
8103 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
8104 igb_set_itr(q_vector);
8105 else
8106 igb_update_ring_itr(q_vector);
8107 }
8108
8109 if (!test_bit(__IGB_DOWN, &adapter->state)) {
8110 if (adapter->flags & IGB_FLAG_HAS_MSIX)
8111 wr32(E1000_EIMS, q_vector->eims_value);
8112 else
8113 igb_irq_enable(adapter);
8114 }
8115 }
8116
8117
8118
8119
8120
8121
8122 static int igb_poll(struct napi_struct *napi, int budget)
8123 {
8124 struct igb_q_vector *q_vector = container_of(napi,
8125 struct igb_q_vector,
8126 napi);
8127 bool clean_complete = true;
8128 int work_done = 0;
8129
8130 #ifdef CONFIG_IGB_DCA
8131 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
8132 igb_update_dca(q_vector);
8133 #endif
8134 if (q_vector->tx.ring)
8135 clean_complete = igb_clean_tx_irq(q_vector, budget);
8136
8137 if (q_vector->rx.ring) {
8138 int cleaned = igb_clean_rx_irq(q_vector, budget);
8139
8140 work_done += cleaned;
8141 if (cleaned >= budget)
8142 clean_complete = false;
8143 }
8144
8145
8146 if (!clean_complete)
8147 return budget;
8148
8149
8150
8151
8152 if (likely(napi_complete_done(napi, work_done)))
8153 igb_ring_irq_enable(q_vector);
8154
8155 return work_done;
8156 }
8157
8158
8159
8160
8161
8162
8163
8164
8165 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
8166 {
8167 struct igb_adapter *adapter = q_vector->adapter;
8168 struct igb_ring *tx_ring = q_vector->tx.ring;
8169 struct igb_tx_buffer *tx_buffer;
8170 union e1000_adv_tx_desc *tx_desc;
8171 unsigned int total_bytes = 0, total_packets = 0;
8172 unsigned int budget = q_vector->tx.work_limit;
8173 unsigned int i = tx_ring->next_to_clean;
8174
8175 if (test_bit(__IGB_DOWN, &adapter->state))
8176 return true;
8177
8178 tx_buffer = &tx_ring->tx_buffer_info[i];
8179 tx_desc = IGB_TX_DESC(tx_ring, i);
8180 i -= tx_ring->count;
8181
8182 do {
8183 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
8184
8185
8186 if (!eop_desc)
8187 break;
8188
8189
8190 smp_rmb();
8191
8192
8193 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
8194 break;
8195
8196
8197 tx_buffer->next_to_watch = NULL;
8198
8199
8200 total_bytes += tx_buffer->bytecount;
8201 total_packets += tx_buffer->gso_segs;
8202
8203
8204 if (tx_buffer->type == IGB_TYPE_SKB)
8205 napi_consume_skb(tx_buffer->skb, napi_budget);
8206 else
8207 xdp_return_frame(tx_buffer->xdpf);
8208
8209
8210 dma_unmap_single(tx_ring->dev,
8211 dma_unmap_addr(tx_buffer, dma),
8212 dma_unmap_len(tx_buffer, len),
8213 DMA_TO_DEVICE);
8214
8215
8216 dma_unmap_len_set(tx_buffer, len, 0);
8217
8218
8219 while (tx_desc != eop_desc) {
8220 tx_buffer++;
8221 tx_desc++;
8222 i++;
8223 if (unlikely(!i)) {
8224 i -= tx_ring->count;
8225 tx_buffer = tx_ring->tx_buffer_info;
8226 tx_desc = IGB_TX_DESC(tx_ring, 0);
8227 }
8228
8229
8230 if (dma_unmap_len(tx_buffer, len)) {
8231 dma_unmap_page(tx_ring->dev,
8232 dma_unmap_addr(tx_buffer, dma),
8233 dma_unmap_len(tx_buffer, len),
8234 DMA_TO_DEVICE);
8235 dma_unmap_len_set(tx_buffer, len, 0);
8236 }
8237 }
8238
8239
8240 tx_buffer++;
8241 tx_desc++;
8242 i++;
8243 if (unlikely(!i)) {
8244 i -= tx_ring->count;
8245 tx_buffer = tx_ring->tx_buffer_info;
8246 tx_desc = IGB_TX_DESC(tx_ring, 0);
8247 }
8248
8249
8250 prefetch(tx_desc);
8251
8252
8253 budget--;
8254 } while (likely(budget));
8255
8256 netdev_tx_completed_queue(txring_txq(tx_ring),
8257 total_packets, total_bytes);
8258 i += tx_ring->count;
8259 tx_ring->next_to_clean = i;
8260 u64_stats_update_begin(&tx_ring->tx_syncp);
8261 tx_ring->tx_stats.bytes += total_bytes;
8262 tx_ring->tx_stats.packets += total_packets;
8263 u64_stats_update_end(&tx_ring->tx_syncp);
8264 q_vector->tx.total_bytes += total_bytes;
8265 q_vector->tx.total_packets += total_packets;
8266
8267 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
8268 struct e1000_hw *hw = &adapter->hw;
8269
8270
8271
8272
8273 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
8274 if (tx_buffer->next_to_watch &&
8275 time_after(jiffies, tx_buffer->time_stamp +
8276 (adapter->tx_timeout_factor * HZ)) &&
8277 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
8278
8279
8280 dev_err(tx_ring->dev,
8281 "Detected Tx Unit Hang\n"
8282 " Tx Queue <%d>\n"
8283 " TDH <%x>\n"
8284 " TDT <%x>\n"
8285 " next_to_use <%x>\n"
8286 " next_to_clean <%x>\n"
8287 "buffer_info[next_to_clean]\n"
8288 " time_stamp <%lx>\n"
8289 " next_to_watch <%p>\n"
8290 " jiffies <%lx>\n"
8291 " desc.status <%x>\n",
8292 tx_ring->queue_index,
8293 rd32(E1000_TDH(tx_ring->reg_idx)),
8294 readl(tx_ring->tail),
8295 tx_ring->next_to_use,
8296 tx_ring->next_to_clean,
8297 tx_buffer->time_stamp,
8298 tx_buffer->next_to_watch,
8299 jiffies,
8300 tx_buffer->next_to_watch->wb.status);
8301 netif_stop_subqueue(tx_ring->netdev,
8302 tx_ring->queue_index);
8303
8304
8305 return true;
8306 }
8307 }
8308
8309 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
8310 if (unlikely(total_packets &&
8311 netif_carrier_ok(tx_ring->netdev) &&
8312 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
8313
8314
8315
8316 smp_mb();
8317 if (__netif_subqueue_stopped(tx_ring->netdev,
8318 tx_ring->queue_index) &&
8319 !(test_bit(__IGB_DOWN, &adapter->state))) {
8320 netif_wake_subqueue(tx_ring->netdev,
8321 tx_ring->queue_index);
8322
8323 u64_stats_update_begin(&tx_ring->tx_syncp);
8324 tx_ring->tx_stats.restart_queue++;
8325 u64_stats_update_end(&tx_ring->tx_syncp);
8326 }
8327 }
8328
8329 return !!budget;
8330 }
8331
8332
8333
8334
8335
8336
8337
8338
8339 static void igb_reuse_rx_page(struct igb_ring *rx_ring,
8340 struct igb_rx_buffer *old_buff)
8341 {
8342 struct igb_rx_buffer *new_buff;
8343 u16 nta = rx_ring->next_to_alloc;
8344
8345 new_buff = &rx_ring->rx_buffer_info[nta];
8346
8347
8348 nta++;
8349 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
8350
8351
8352
8353
8354
8355 new_buff->dma = old_buff->dma;
8356 new_buff->page = old_buff->page;
8357 new_buff->page_offset = old_buff->page_offset;
8358 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
8359 }
8360
8361 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
8362 int rx_buf_pgcnt)
8363 {
8364 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
8365 struct page *page = rx_buffer->page;
8366
8367
8368 if (!dev_page_is_reusable(page))
8369 return false;
8370
8371 #if (PAGE_SIZE < 8192)
8372
8373 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
8374 return false;
8375 #else
8376 #define IGB_LAST_OFFSET \
8377 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
8378
8379 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
8380 return false;
8381 #endif
8382
8383
8384
8385
8386
8387 if (unlikely(pagecnt_bias == 1)) {
8388 page_ref_add(page, USHRT_MAX - 1);
8389 rx_buffer->pagecnt_bias = USHRT_MAX;
8390 }
8391
8392 return true;
8393 }
8394
8395
8396
8397
8398
8399
8400
8401
8402
8403
8404 static void igb_add_rx_frag(struct igb_ring *rx_ring,
8405 struct igb_rx_buffer *rx_buffer,
8406 struct sk_buff *skb,
8407 unsigned int size)
8408 {
8409 #if (PAGE_SIZE < 8192)
8410 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8411 #else
8412 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8413 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
8414 SKB_DATA_ALIGN(size);
8415 #endif
8416 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
8417 rx_buffer->page_offset, size, truesize);
8418 #if (PAGE_SIZE < 8192)
8419 rx_buffer->page_offset ^= truesize;
8420 #else
8421 rx_buffer->page_offset += truesize;
8422 #endif
8423 }
8424
8425 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8426 struct igb_rx_buffer *rx_buffer,
8427 struct xdp_buff *xdp,
8428 ktime_t timestamp)
8429 {
8430 #if (PAGE_SIZE < 8192)
8431 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8432 #else
8433 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
8434 xdp->data_hard_start);
8435 #endif
8436 unsigned int size = xdp->data_end - xdp->data;
8437 unsigned int headlen;
8438 struct sk_buff *skb;
8439
8440
8441 net_prefetch(xdp->data);
8442
8443
8444 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8445 if (unlikely(!skb))
8446 return NULL;
8447
8448 if (timestamp)
8449 skb_hwtstamps(skb)->hwtstamp = timestamp;
8450
8451
8452 headlen = size;
8453 if (headlen > IGB_RX_HDR_LEN)
8454 headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);
8455
8456
8457 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
8458
8459
8460 size -= headlen;
8461 if (size) {
8462 skb_add_rx_frag(skb, 0, rx_buffer->page,
8463 (xdp->data + headlen) - page_address(rx_buffer->page),
8464 size, truesize);
8465 #if (PAGE_SIZE < 8192)
8466 rx_buffer->page_offset ^= truesize;
8467 #else
8468 rx_buffer->page_offset += truesize;
8469 #endif
8470 } else {
8471 rx_buffer->pagecnt_bias++;
8472 }
8473
8474 return skb;
8475 }
8476
8477 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8478 struct igb_rx_buffer *rx_buffer,
8479 struct xdp_buff *xdp,
8480 ktime_t timestamp)
8481 {
8482 #if (PAGE_SIZE < 8192)
8483 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8484 #else
8485 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8486 SKB_DATA_ALIGN(xdp->data_end -
8487 xdp->data_hard_start);
8488 #endif
8489 unsigned int metasize = xdp->data - xdp->data_meta;
8490 struct sk_buff *skb;
8491
8492
8493 net_prefetch(xdp->data_meta);
8494
8495
8496 skb = napi_build_skb(xdp->data_hard_start, truesize);
8497 if (unlikely(!skb))
8498 return NULL;
8499
8500
8501 skb_reserve(skb, xdp->data - xdp->data_hard_start);
8502 __skb_put(skb, xdp->data_end - xdp->data);
8503
8504 if (metasize)
8505 skb_metadata_set(skb, metasize);
8506
8507 if (timestamp)
8508 skb_hwtstamps(skb)->hwtstamp = timestamp;
8509
8510
8511 #if (PAGE_SIZE < 8192)
8512 rx_buffer->page_offset ^= truesize;
8513 #else
8514 rx_buffer->page_offset += truesize;
8515 #endif
8516
8517 return skb;
8518 }
8519
8520 static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
8521 struct igb_ring *rx_ring,
8522 struct xdp_buff *xdp)
8523 {
8524 int err, result = IGB_XDP_PASS;
8525 struct bpf_prog *xdp_prog;
8526 u32 act;
8527
8528 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
8529
8530 if (!xdp_prog)
8531 goto xdp_out;
8532
8533 prefetchw(xdp->data_hard_start);
8534
8535 act = bpf_prog_run_xdp(xdp_prog, xdp);
8536 switch (act) {
8537 case XDP_PASS:
8538 break;
8539 case XDP_TX:
8540 result = igb_xdp_xmit_back(adapter, xdp);
8541 if (result == IGB_XDP_CONSUMED)
8542 goto out_failure;
8543 break;
8544 case XDP_REDIRECT:
8545 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
8546 if (err)
8547 goto out_failure;
8548 result = IGB_XDP_REDIR;
8549 break;
8550 default:
8551 bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
8552 fallthrough;
8553 case XDP_ABORTED:
8554 out_failure:
8555 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
8556 fallthrough;
8557 case XDP_DROP:
8558 result = IGB_XDP_CONSUMED;
8559 break;
8560 }
8561 xdp_out:
8562 return ERR_PTR(-result);
8563 }
8564
8565 static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
8566 unsigned int size)
8567 {
8568 unsigned int truesize;
8569
8570 #if (PAGE_SIZE < 8192)
8571 truesize = igb_rx_pg_size(rx_ring) / 2;
8572 #else
8573 truesize = ring_uses_build_skb(rx_ring) ?
8574 SKB_DATA_ALIGN(IGB_SKB_PAD + size) +
8575 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
8576 SKB_DATA_ALIGN(size);
8577 #endif
8578 return truesize;
8579 }
8580
8581 static void igb_rx_buffer_flip(struct igb_ring *rx_ring,
8582 struct igb_rx_buffer *rx_buffer,
8583 unsigned int size)
8584 {
8585 unsigned int truesize = igb_rx_frame_truesize(rx_ring, size);
8586 #if (PAGE_SIZE < 8192)
8587 rx_buffer->page_offset ^= truesize;
8588 #else
8589 rx_buffer->page_offset += truesize;
8590 #endif
8591 }
8592
8593 static inline void igb_rx_checksum(struct igb_ring *ring,
8594 union e1000_adv_rx_desc *rx_desc,
8595 struct sk_buff *skb)
8596 {
8597 skb_checksum_none_assert(skb);
8598
8599
8600 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8601 return;
8602
8603
8604 if (!(ring->netdev->features & NETIF_F_RXCSUM))
8605 return;
8606
8607
8608 if (igb_test_staterr(rx_desc,
8609 E1000_RXDEXT_STATERR_TCPE |
8610 E1000_RXDEXT_STATERR_IPE)) {
8611
8612
8613
8614
8615 if (!((skb->len == 60) &&
8616 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8617 u64_stats_update_begin(&ring->rx_syncp);
8618 ring->rx_stats.csum_err++;
8619 u64_stats_update_end(&ring->rx_syncp);
8620 }
8621
8622 return;
8623 }
8624
8625 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8626 E1000_RXD_STAT_UDPCS))
8627 skb->ip_summed = CHECKSUM_UNNECESSARY;
8628
8629 dev_dbg(ring->dev, "cksum success: bits %08X\n",
8630 le32_to_cpu(rx_desc->wb.upper.status_error));
8631 }
8632
8633 static inline void igb_rx_hash(struct igb_ring *ring,
8634 union e1000_adv_rx_desc *rx_desc,
8635 struct sk_buff *skb)
8636 {
8637 if (ring->netdev->features & NETIF_F_RXHASH)
8638 skb_set_hash(skb,
8639 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8640 PKT_HASH_TYPE_L3);
8641 }
8642
8643
8644
8645
8646
8647
8648
8649
8650
8651
8652
8653 static bool igb_is_non_eop(struct igb_ring *rx_ring,
8654 union e1000_adv_rx_desc *rx_desc)
8655 {
8656 u32 ntc = rx_ring->next_to_clean + 1;
8657
8658
8659 ntc = (ntc < rx_ring->count) ? ntc : 0;
8660 rx_ring->next_to_clean = ntc;
8661
8662 prefetch(IGB_RX_DESC(rx_ring, ntc));
8663
8664 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8665 return false;
8666
8667 return true;
8668 }
8669
8670
8671
8672
8673
8674
8675
8676
8677
8678
8679
8680
8681
8682
8683
8684 static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8685 union e1000_adv_rx_desc *rx_desc,
8686 struct sk_buff *skb)
8687 {
8688
8689 if (IS_ERR(skb))
8690 return true;
8691
8692 if (unlikely((igb_test_staterr(rx_desc,
8693 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8694 struct net_device *netdev = rx_ring->netdev;
8695 if (!(netdev->features & NETIF_F_RXALL)) {
8696 dev_kfree_skb_any(skb);
8697 return true;
8698 }
8699 }
8700
8701
8702 if (eth_skb_pad(skb))
8703 return true;
8704
8705 return false;
8706 }
8707
8708
8709
8710
8711
8712
8713
8714
8715
8716
8717
8718 static void igb_process_skb_fields(struct igb_ring *rx_ring,
8719 union e1000_adv_rx_desc *rx_desc,
8720 struct sk_buff *skb)
8721 {
8722 struct net_device *dev = rx_ring->netdev;
8723
8724 igb_rx_hash(rx_ring, rx_desc, skb);
8725
8726 igb_rx_checksum(rx_ring, rx_desc, skb);
8727
8728 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8729 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8730 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8731
8732 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8733 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8734 u16 vid;
8735
8736 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8737 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8738 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
8739 else
8740 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8741
8742 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8743 }
8744
8745 skb_record_rx_queue(skb, rx_ring->queue_index);
8746
8747 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8748 }
8749
8750 static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8751 {
8752 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8753 }
8754
8755 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8756 const unsigned int size, int *rx_buf_pgcnt)
8757 {
8758 struct igb_rx_buffer *rx_buffer;
8759
8760 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8761 *rx_buf_pgcnt =
8762 #if (PAGE_SIZE < 8192)
8763 page_count(rx_buffer->page);
8764 #else
8765 0;
8766 #endif
8767 prefetchw(rx_buffer->page);
8768
8769
8770 dma_sync_single_range_for_cpu(rx_ring->dev,
8771 rx_buffer->dma,
8772 rx_buffer->page_offset,
8773 size,
8774 DMA_FROM_DEVICE);
8775
8776 rx_buffer->pagecnt_bias--;
8777
8778 return rx_buffer;
8779 }
8780
8781 static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8782 struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
8783 {
8784 if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
8785
8786 igb_reuse_rx_page(rx_ring, rx_buffer);
8787 } else {
8788
8789
8790
8791 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8792 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8793 IGB_RX_DMA_ATTR);
8794 __page_frag_cache_drain(rx_buffer->page,
8795 rx_buffer->pagecnt_bias);
8796 }
8797
8798
8799 rx_buffer->page = NULL;
8800 }
8801
8802 static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8803 {
8804 struct igb_adapter *adapter = q_vector->adapter;
8805 struct igb_ring *rx_ring = q_vector->rx.ring;
8806 struct sk_buff *skb = rx_ring->skb;
8807 unsigned int total_bytes = 0, total_packets = 0;
8808 u16 cleaned_count = igb_desc_unused(rx_ring);
8809 unsigned int xdp_xmit = 0;
8810 struct xdp_buff xdp;
8811 u32 frame_sz = 0;
8812 int rx_buf_pgcnt;
8813
8814
8815 #if (PAGE_SIZE < 8192)
8816 frame_sz = igb_rx_frame_truesize(rx_ring, 0);
8817 #endif
8818 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
8819
8820 while (likely(total_packets < budget)) {
8821 union e1000_adv_rx_desc *rx_desc;
8822 struct igb_rx_buffer *rx_buffer;
8823 ktime_t timestamp = 0;
8824 int pkt_offset = 0;
8825 unsigned int size;
8826 void *pktbuf;
8827
8828
8829 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8830 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8831 cleaned_count = 0;
8832 }
8833
8834 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8835 size = le16_to_cpu(rx_desc->wb.upper.length);
8836 if (!size)
8837 break;
8838
8839
8840
8841
8842
8843 dma_rmb();
8844
8845 rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
8846 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
8847
8848
8849 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8850 int ts_hdr_len;
8851
8852 ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
8853 pktbuf, ×tamp);
8854
8855 pkt_offset += ts_hdr_len;
8856 size -= ts_hdr_len;
8857 }
8858
8859
8860 if (!skb) {
8861 unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
8862 unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
8863
8864 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
8865 xdp_buff_clear_frags_flag(&xdp);
8866 #if (PAGE_SIZE > 4096)
8867
8868 xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
8869 #endif
8870 skb = igb_run_xdp(adapter, rx_ring, &xdp);
8871 }
8872
8873 if (IS_ERR(skb)) {
8874 unsigned int xdp_res = -PTR_ERR(skb);
8875
8876 if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
8877 xdp_xmit |= xdp_res;
8878 igb_rx_buffer_flip(rx_ring, rx_buffer, size);
8879 } else {
8880 rx_buffer->pagecnt_bias++;
8881 }
8882 total_packets++;
8883 total_bytes += size;
8884 } else if (skb)
8885 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8886 else if (ring_uses_build_skb(rx_ring))
8887 skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
8888 timestamp);
8889 else
8890 skb = igb_construct_skb(rx_ring, rx_buffer,
8891 &xdp, timestamp);
8892
8893
8894 if (!skb) {
8895 rx_ring->rx_stats.alloc_failed++;
8896 rx_buffer->pagecnt_bias++;
8897 break;
8898 }
8899
8900 igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
8901 cleaned_count++;
8902
8903
8904 if (igb_is_non_eop(rx_ring, rx_desc))
8905 continue;
8906
8907
8908 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8909 skb = NULL;
8910 continue;
8911 }
8912
8913
8914 total_bytes += skb->len;
8915
8916
8917 igb_process_skb_fields(rx_ring, rx_desc, skb);
8918
8919 napi_gro_receive(&q_vector->napi, skb);
8920
8921
8922 skb = NULL;
8923
8924
8925 total_packets++;
8926 }
8927
8928
8929 rx_ring->skb = skb;
8930
8931 if (xdp_xmit & IGB_XDP_REDIR)
8932 xdp_do_flush();
8933
8934 if (xdp_xmit & IGB_XDP_TX) {
8935 struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
8936
8937 igb_xdp_ring_update_tail(tx_ring);
8938 }
8939
8940 u64_stats_update_begin(&rx_ring->rx_syncp);
8941 rx_ring->rx_stats.packets += total_packets;
8942 rx_ring->rx_stats.bytes += total_bytes;
8943 u64_stats_update_end(&rx_ring->rx_syncp);
8944 q_vector->rx.total_packets += total_packets;
8945 q_vector->rx.total_bytes += total_bytes;
8946
8947 if (cleaned_count)
8948 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8949
8950 return total_packets;
8951 }
8952
8953 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8954 struct igb_rx_buffer *bi)
8955 {
8956 struct page *page = bi->page;
8957 dma_addr_t dma;
8958
8959
8960 if (likely(page))
8961 return true;
8962
8963
8964 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8965 if (unlikely(!page)) {
8966 rx_ring->rx_stats.alloc_failed++;
8967 return false;
8968 }
8969
8970
8971 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8972 igb_rx_pg_size(rx_ring),
8973 DMA_FROM_DEVICE,
8974 IGB_RX_DMA_ATTR);
8975
8976
8977
8978
8979 if (dma_mapping_error(rx_ring->dev, dma)) {
8980 __free_pages(page, igb_rx_pg_order(rx_ring));
8981
8982 rx_ring->rx_stats.alloc_failed++;
8983 return false;
8984 }
8985
8986 bi->dma = dma;
8987 bi->page = page;
8988 bi->page_offset = igb_rx_offset(rx_ring);
8989 page_ref_add(page, USHRT_MAX - 1);
8990 bi->pagecnt_bias = USHRT_MAX;
8991
8992 return true;
8993 }
8994
8995
8996
8997
8998
8999
9000 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
9001 {
9002 union e1000_adv_rx_desc *rx_desc;
9003 struct igb_rx_buffer *bi;
9004 u16 i = rx_ring->next_to_use;
9005 u16 bufsz;
9006
9007
9008 if (!cleaned_count)
9009 return;
9010
9011 rx_desc = IGB_RX_DESC(rx_ring, i);
9012 bi = &rx_ring->rx_buffer_info[i];
9013 i -= rx_ring->count;
9014
9015 bufsz = igb_rx_bufsz(rx_ring);
9016
9017 do {
9018 if (!igb_alloc_mapped_page(rx_ring, bi))
9019 break;
9020
9021
9022 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
9023 bi->page_offset, bufsz,
9024 DMA_FROM_DEVICE);
9025
9026
9027
9028
9029 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
9030
9031 rx_desc++;
9032 bi++;
9033 i++;
9034 if (unlikely(!i)) {
9035 rx_desc = IGB_RX_DESC(rx_ring, 0);
9036 bi = rx_ring->rx_buffer_info;
9037 i -= rx_ring->count;
9038 }
9039
9040
9041 rx_desc->wb.upper.length = 0;
9042
9043 cleaned_count--;
9044 } while (cleaned_count);
9045
9046 i += rx_ring->count;
9047
9048 if (rx_ring->next_to_use != i) {
9049
9050 rx_ring->next_to_use = i;
9051
9052
9053 rx_ring->next_to_alloc = i;
9054
9055
9056
9057
9058
9059
9060 dma_wmb();
9061 writel(i, rx_ring->tail);
9062 }
9063 }
9064
9065
9066
9067
9068
9069
9070
9071 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
9072 {
9073 struct igb_adapter *adapter = netdev_priv(netdev);
9074 struct mii_ioctl_data *data = if_mii(ifr);
9075
9076 if (adapter->hw.phy.media_type != e1000_media_type_copper)
9077 return -EOPNOTSUPP;
9078
9079 switch (cmd) {
9080 case SIOCGMIIPHY:
9081 data->phy_id = adapter->hw.phy.addr;
9082 break;
9083 case SIOCGMIIREG:
9084 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
9085 &data->val_out))
9086 return -EIO;
9087 break;
9088 case SIOCSMIIREG:
9089 default:
9090 return -EOPNOTSUPP;
9091 }
9092 return 0;
9093 }
9094
9095
9096
9097
9098
9099
9100
9101 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
9102 {
9103 switch (cmd) {
9104 case SIOCGMIIPHY:
9105 case SIOCGMIIREG:
9106 case SIOCSMIIREG:
9107 return igb_mii_ioctl(netdev, ifr, cmd);
9108 case SIOCGHWTSTAMP:
9109 return igb_ptp_get_ts_config(netdev, ifr);
9110 case SIOCSHWTSTAMP:
9111 return igb_ptp_set_ts_config(netdev, ifr);
9112 default:
9113 return -EOPNOTSUPP;
9114 }
9115 }
9116
9117 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
9118 {
9119 struct igb_adapter *adapter = hw->back;
9120
9121 pci_read_config_word(adapter->pdev, reg, value);
9122 }
9123
9124 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
9125 {
9126 struct igb_adapter *adapter = hw->back;
9127
9128 pci_write_config_word(adapter->pdev, reg, *value);
9129 }
9130
9131 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
9132 {
9133 struct igb_adapter *adapter = hw->back;
9134
9135 if (pcie_capability_read_word(adapter->pdev, reg, value))
9136 return -E1000_ERR_CONFIG;
9137
9138 return 0;
9139 }
9140
9141 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
9142 {
9143 struct igb_adapter *adapter = hw->back;
9144
9145 if (pcie_capability_write_word(adapter->pdev, reg, *value))
9146 return -E1000_ERR_CONFIG;
9147
9148 return 0;
9149 }
9150
9151 static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
9152 {
9153 struct igb_adapter *adapter = netdev_priv(netdev);
9154 struct e1000_hw *hw = &adapter->hw;
9155 u32 ctrl, rctl;
9156 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
9157
9158 if (enable) {
9159
9160 ctrl = rd32(E1000_CTRL);
9161 ctrl |= E1000_CTRL_VME;
9162 wr32(E1000_CTRL, ctrl);
9163
9164
9165 rctl = rd32(E1000_RCTL);
9166 rctl &= ~E1000_RCTL_CFIEN;
9167 wr32(E1000_RCTL, rctl);
9168 } else {
9169
9170 ctrl = rd32(E1000_CTRL);
9171 ctrl &= ~E1000_CTRL_VME;
9172 wr32(E1000_CTRL, ctrl);
9173 }
9174
9175 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
9176 }
9177
9178 static int igb_vlan_rx_add_vid(struct net_device *netdev,
9179 __be16 proto, u16 vid)
9180 {
9181 struct igb_adapter *adapter = netdev_priv(netdev);
9182 struct e1000_hw *hw = &adapter->hw;
9183 int pf_id = adapter->vfs_allocated_count;
9184
9185
9186 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9187 igb_vfta_set(hw, vid, pf_id, true, !!vid);
9188
9189 set_bit(vid, adapter->active_vlans);
9190
9191 return 0;
9192 }
9193
9194 static int igb_vlan_rx_kill_vid(struct net_device *netdev,
9195 __be16 proto, u16 vid)
9196 {
9197 struct igb_adapter *adapter = netdev_priv(netdev);
9198 int pf_id = adapter->vfs_allocated_count;
9199 struct e1000_hw *hw = &adapter->hw;
9200
9201
9202 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9203 igb_vfta_set(hw, vid, pf_id, false, true);
9204
9205 clear_bit(vid, adapter->active_vlans);
9206
9207 return 0;
9208 }
9209
9210 static void igb_restore_vlan(struct igb_adapter *adapter)
9211 {
9212 u16 vid = 1;
9213
9214 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
9215 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
9216
9217 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
9218 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
9219 }
9220
9221 int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
9222 {
9223 struct pci_dev *pdev = adapter->pdev;
9224 struct e1000_mac_info *mac = &adapter->hw.mac;
9225
9226 mac->autoneg = 0;
9227
9228
9229
9230
9231 if ((spd & 1) || (dplx & ~1))
9232 goto err_inval;
9233
9234
9235
9236
9237 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
9238 switch (spd + dplx) {
9239 case SPEED_10 + DUPLEX_HALF:
9240 case SPEED_10 + DUPLEX_FULL:
9241 case SPEED_100 + DUPLEX_HALF:
9242 goto err_inval;
9243 default:
9244 break;
9245 }
9246 }
9247
9248 switch (spd + dplx) {
9249 case SPEED_10 + DUPLEX_HALF:
9250 mac->forced_speed_duplex = ADVERTISE_10_HALF;
9251 break;
9252 case SPEED_10 + DUPLEX_FULL:
9253 mac->forced_speed_duplex = ADVERTISE_10_FULL;
9254 break;
9255 case SPEED_100 + DUPLEX_HALF:
9256 mac->forced_speed_duplex = ADVERTISE_100_HALF;
9257 break;
9258 case SPEED_100 + DUPLEX_FULL:
9259 mac->forced_speed_duplex = ADVERTISE_100_FULL;
9260 break;
9261 case SPEED_1000 + DUPLEX_FULL:
9262 mac->autoneg = 1;
9263 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
9264 break;
9265 case SPEED_1000 + DUPLEX_HALF:
9266 default:
9267 goto err_inval;
9268 }
9269
9270
9271 adapter->hw.phy.mdix = AUTO_ALL_MODES;
9272
9273 return 0;
9274
9275 err_inval:
9276 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
9277 return -EINVAL;
9278 }
9279
9280 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
9281 bool runtime)
9282 {
9283 struct net_device *netdev = pci_get_drvdata(pdev);
9284 struct igb_adapter *adapter = netdev_priv(netdev);
9285 struct e1000_hw *hw = &adapter->hw;
9286 u32 ctrl, rctl, status;
9287 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
9288 bool wake;
9289
9290 rtnl_lock();
9291 netif_device_detach(netdev);
9292
9293 if (netif_running(netdev))
9294 __igb_close(netdev, true);
9295
9296 igb_ptp_suspend(adapter);
9297
9298 igb_clear_interrupt_scheme(adapter);
9299 rtnl_unlock();
9300
9301 status = rd32(E1000_STATUS);
9302 if (status & E1000_STATUS_LU)
9303 wufc &= ~E1000_WUFC_LNKC;
9304
9305 if (wufc) {
9306 igb_setup_rctl(adapter);
9307 igb_set_rx_mode(netdev);
9308
9309
9310 if (wufc & E1000_WUFC_MC) {
9311 rctl = rd32(E1000_RCTL);
9312 rctl |= E1000_RCTL_MPE;
9313 wr32(E1000_RCTL, rctl);
9314 }
9315
9316 ctrl = rd32(E1000_CTRL);
9317 ctrl |= E1000_CTRL_ADVD3WUC;
9318 wr32(E1000_CTRL, ctrl);
9319
9320
9321 igb_disable_pcie_master(hw);
9322
9323 wr32(E1000_WUC, E1000_WUC_PME_EN);
9324 wr32(E1000_WUFC, wufc);
9325 } else {
9326 wr32(E1000_WUC, 0);
9327 wr32(E1000_WUFC, 0);
9328 }
9329
9330 wake = wufc || adapter->en_mng_pt;
9331 if (!wake)
9332 igb_power_down_link(adapter);
9333 else
9334 igb_power_up_link(adapter);
9335
9336 if (enable_wake)
9337 *enable_wake = wake;
9338
9339
9340
9341
9342 igb_release_hw_control(adapter);
9343
9344 pci_disable_device(pdev);
9345
9346 return 0;
9347 }
9348
9349 static void igb_deliver_wake_packet(struct net_device *netdev)
9350 {
9351 struct igb_adapter *adapter = netdev_priv(netdev);
9352 struct e1000_hw *hw = &adapter->hw;
9353 struct sk_buff *skb;
9354 u32 wupl;
9355
9356 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
9357
9358
9359
9360
9361 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
9362 return;
9363
9364 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
9365 if (!skb)
9366 return;
9367
9368 skb_put(skb, wupl);
9369
9370
9371 wupl = roundup(wupl, 4);
9372
9373 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
9374
9375 skb->protocol = eth_type_trans(skb, netdev);
9376 netif_rx(skb);
9377 }
9378
9379 static int __maybe_unused igb_suspend(struct device *dev)
9380 {
9381 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
9382 }
9383
9384 static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
9385 {
9386 struct pci_dev *pdev = to_pci_dev(dev);
9387 struct net_device *netdev = pci_get_drvdata(pdev);
9388 struct igb_adapter *adapter = netdev_priv(netdev);
9389 struct e1000_hw *hw = &adapter->hw;
9390 u32 err, val;
9391
9392 pci_set_power_state(pdev, PCI_D0);
9393 pci_restore_state(pdev);
9394 pci_save_state(pdev);
9395
9396 if (!pci_device_is_present(pdev))
9397 return -ENODEV;
9398 err = pci_enable_device_mem(pdev);
9399 if (err) {
9400 dev_err(&pdev->dev,
9401 "igb: Cannot enable PCI device from suspend\n");
9402 return err;
9403 }
9404 pci_set_master(pdev);
9405
9406 pci_enable_wake(pdev, PCI_D3hot, 0);
9407 pci_enable_wake(pdev, PCI_D3cold, 0);
9408
9409 if (igb_init_interrupt_scheme(adapter, true)) {
9410 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9411 return -ENOMEM;
9412 }
9413
9414 igb_reset(adapter);
9415
9416
9417
9418
9419 igb_get_hw_control(adapter);
9420
9421 val = rd32(E1000_WUS);
9422 if (val & WAKE_PKT_WUS)
9423 igb_deliver_wake_packet(netdev);
9424
9425 wr32(E1000_WUS, ~0);
9426
9427 if (!rpm)
9428 rtnl_lock();
9429 if (!err && netif_running(netdev))
9430 err = __igb_open(netdev, true);
9431
9432 if (!err)
9433 netif_device_attach(netdev);
9434 if (!rpm)
9435 rtnl_unlock();
9436
9437 return err;
9438 }
9439
9440 static int __maybe_unused igb_resume(struct device *dev)
9441 {
9442 return __igb_resume(dev, false);
9443 }
9444
9445 static int __maybe_unused igb_runtime_idle(struct device *dev)
9446 {
9447 struct net_device *netdev = dev_get_drvdata(dev);
9448 struct igb_adapter *adapter = netdev_priv(netdev);
9449
9450 if (!igb_has_link(adapter))
9451 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
9452
9453 return -EBUSY;
9454 }
9455
9456 static int __maybe_unused igb_runtime_suspend(struct device *dev)
9457 {
9458 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
9459 }
9460
9461 static int __maybe_unused igb_runtime_resume(struct device *dev)
9462 {
9463 return __igb_resume(dev, true);
9464 }
9465
9466 static void igb_shutdown(struct pci_dev *pdev)
9467 {
9468 bool wake;
9469
9470 __igb_shutdown(pdev, &wake, 0);
9471
9472 if (system_state == SYSTEM_POWER_OFF) {
9473 pci_wake_from_d3(pdev, wake);
9474 pci_set_power_state(pdev, PCI_D3hot);
9475 }
9476 }
9477
9478 #ifdef CONFIG_PCI_IOV
9479 static int igb_sriov_reinit(struct pci_dev *dev)
9480 {
9481 struct net_device *netdev = pci_get_drvdata(dev);
9482 struct igb_adapter *adapter = netdev_priv(netdev);
9483 struct pci_dev *pdev = adapter->pdev;
9484
9485 rtnl_lock();
9486
9487 if (netif_running(netdev))
9488 igb_close(netdev);
9489 else
9490 igb_reset(adapter);
9491
9492 igb_clear_interrupt_scheme(adapter);
9493
9494 igb_init_queue_configuration(adapter);
9495
9496 if (igb_init_interrupt_scheme(adapter, true)) {
9497 rtnl_unlock();
9498 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9499 return -ENOMEM;
9500 }
9501
9502 if (netif_running(netdev))
9503 igb_open(netdev);
9504
9505 rtnl_unlock();
9506
9507 return 0;
9508 }
9509
9510 static int igb_pci_disable_sriov(struct pci_dev *dev)
9511 {
9512 int err = igb_disable_sriov(dev);
9513
9514 if (!err)
9515 err = igb_sriov_reinit(dev);
9516
9517 return err;
9518 }
9519
9520 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
9521 {
9522 int err = igb_enable_sriov(dev, num_vfs);
9523
9524 if (err)
9525 goto out;
9526
9527 err = igb_sriov_reinit(dev);
9528 if (!err)
9529 return num_vfs;
9530
9531 out:
9532 return err;
9533 }
9534
9535 #endif
9536 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9537 {
9538 #ifdef CONFIG_PCI_IOV
9539 if (num_vfs == 0)
9540 return igb_pci_disable_sriov(dev);
9541 else
9542 return igb_pci_enable_sriov(dev, num_vfs);
9543 #endif
9544 return 0;
9545 }
9546
9547
9548
9549
9550
9551
9552
9553
9554
9555 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
9556 pci_channel_state_t state)
9557 {
9558 struct net_device *netdev = pci_get_drvdata(pdev);
9559 struct igb_adapter *adapter = netdev_priv(netdev);
9560
9561 netif_device_detach(netdev);
9562
9563 if (state == pci_channel_io_perm_failure)
9564 return PCI_ERS_RESULT_DISCONNECT;
9565
9566 if (netif_running(netdev))
9567 igb_down(adapter);
9568 pci_disable_device(pdev);
9569
9570
9571 return PCI_ERS_RESULT_NEED_RESET;
9572 }
9573
9574
9575
9576
9577
9578
9579
9580
9581 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9582 {
9583 struct net_device *netdev = pci_get_drvdata(pdev);
9584 struct igb_adapter *adapter = netdev_priv(netdev);
9585 struct e1000_hw *hw = &adapter->hw;
9586 pci_ers_result_t result;
9587
9588 if (pci_enable_device_mem(pdev)) {
9589 dev_err(&pdev->dev,
9590 "Cannot re-enable PCI device after reset.\n");
9591 result = PCI_ERS_RESULT_DISCONNECT;
9592 } else {
9593 pci_set_master(pdev);
9594 pci_restore_state(pdev);
9595 pci_save_state(pdev);
9596
9597 pci_enable_wake(pdev, PCI_D3hot, 0);
9598 pci_enable_wake(pdev, PCI_D3cold, 0);
9599
9600
9601
9602
9603 hw->hw_addr = adapter->io_addr;
9604
9605 igb_reset(adapter);
9606 wr32(E1000_WUS, ~0);
9607 result = PCI_ERS_RESULT_RECOVERED;
9608 }
9609
9610 return result;
9611 }
9612
9613
9614
9615
9616
9617
9618
9619
9620
9621 static void igb_io_resume(struct pci_dev *pdev)
9622 {
9623 struct net_device *netdev = pci_get_drvdata(pdev);
9624 struct igb_adapter *adapter = netdev_priv(netdev);
9625
9626 if (netif_running(netdev)) {
9627 if (igb_up(adapter)) {
9628 dev_err(&pdev->dev, "igb_up failed after reset\n");
9629 return;
9630 }
9631 }
9632
9633 netif_device_attach(netdev);
9634
9635
9636
9637
9638 igb_get_hw_control(adapter);
9639 }
9640
9641
9642
9643
9644
9645
9646 static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9647 {
9648 struct e1000_hw *hw = &adapter->hw;
9649 u32 rar_low, rar_high;
9650 u8 *addr = adapter->mac_table[index].addr;
9651
9652
9653
9654
9655
9656
9657 rar_low = le32_to_cpup((__le32 *)(addr));
9658 rar_high = le16_to_cpup((__le16 *)(addr + 4));
9659
9660
9661 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9662 if (is_valid_ether_addr(addr))
9663 rar_high |= E1000_RAH_AV;
9664
9665 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9666 rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9667
9668 switch (hw->mac.type) {
9669 case e1000_82575:
9670 case e1000_i210:
9671 if (adapter->mac_table[index].state &
9672 IGB_MAC_STATE_QUEUE_STEERING)
9673 rar_high |= E1000_RAH_QSEL_ENABLE;
9674
9675 rar_high |= E1000_RAH_POOL_1 *
9676 adapter->mac_table[index].queue;
9677 break;
9678 default:
9679 rar_high |= E1000_RAH_POOL_1 <<
9680 adapter->mac_table[index].queue;
9681 break;
9682 }
9683 }
9684
9685 wr32(E1000_RAL(index), rar_low);
9686 wrfl();
9687 wr32(E1000_RAH(index), rar_high);
9688 wrfl();
9689 }
9690
9691 static int igb_set_vf_mac(struct igb_adapter *adapter,
9692 int vf, unsigned char *mac_addr)
9693 {
9694 struct e1000_hw *hw = &adapter->hw;
9695
9696
9697
9698 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9699 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9700
9701 ether_addr_copy(vf_mac_addr, mac_addr);
9702 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9703 adapter->mac_table[rar_entry].queue = vf;
9704 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9705 igb_rar_set_index(adapter, rar_entry);
9706
9707 return 0;
9708 }
9709
9710 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9711 {
9712 struct igb_adapter *adapter = netdev_priv(netdev);
9713
9714 if (vf >= adapter->vfs_allocated_count)
9715 return -EINVAL;
9716
9717
9718
9719
9720
9721
9722
9723 if (is_zero_ether_addr(mac)) {
9724 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9725 dev_info(&adapter->pdev->dev,
9726 "remove administratively set MAC on VF %d\n",
9727 vf);
9728 } else if (is_valid_ether_addr(mac)) {
9729 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9730 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9731 mac, vf);
9732 dev_info(&adapter->pdev->dev,
9733 "Reload the VF driver to make this change effective.");
9734
9735 if (test_bit(__IGB_DOWN, &adapter->state)) {
9736 dev_warn(&adapter->pdev->dev,
9737 "The VF MAC address has been set, but the PF device is not up.\n");
9738 dev_warn(&adapter->pdev->dev,
9739 "Bring the PF device up before attempting to use the VF device.\n");
9740 }
9741 } else {
9742 return -EINVAL;
9743 }
9744 return igb_set_vf_mac(adapter, vf, mac);
9745 }
9746
9747 static int igb_link_mbps(int internal_link_speed)
9748 {
9749 switch (internal_link_speed) {
9750 case SPEED_100:
9751 return 100;
9752 case SPEED_1000:
9753 return 1000;
9754 default:
9755 return 0;
9756 }
9757 }
9758
9759 static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9760 int link_speed)
9761 {
9762 int rf_dec, rf_int;
9763 u32 bcnrc_val;
9764
9765 if (tx_rate != 0) {
9766
9767 rf_int = link_speed / tx_rate;
9768 rf_dec = (link_speed - (rf_int * tx_rate));
9769 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9770 tx_rate;
9771
9772 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9773 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9774 E1000_RTTBCNRC_RF_INT_MASK);
9775 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9776 } else {
9777 bcnrc_val = 0;
9778 }
9779
9780 wr32(E1000_RTTDQSEL, vf);
9781
9782
9783
9784 wr32(E1000_RTTBCNRM, 0x14);
9785 wr32(E1000_RTTBCNRC, bcnrc_val);
9786 }
9787
9788 static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9789 {
9790 int actual_link_speed, i;
9791 bool reset_rate = false;
9792
9793
9794 if ((adapter->vf_rate_link_speed == 0) ||
9795 (adapter->hw.mac.type != e1000_82576))
9796 return;
9797
9798 actual_link_speed = igb_link_mbps(adapter->link_speed);
9799 if (actual_link_speed != adapter->vf_rate_link_speed) {
9800 reset_rate = true;
9801 adapter->vf_rate_link_speed = 0;
9802 dev_info(&adapter->pdev->dev,
9803 "Link speed has been changed. VF Transmit rate is disabled\n");
9804 }
9805
9806 for (i = 0; i < adapter->vfs_allocated_count; i++) {
9807 if (reset_rate)
9808 adapter->vf_data[i].tx_rate = 0;
9809
9810 igb_set_vf_rate_limit(&adapter->hw, i,
9811 adapter->vf_data[i].tx_rate,
9812 actual_link_speed);
9813 }
9814 }
9815
9816 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9817 int min_tx_rate, int max_tx_rate)
9818 {
9819 struct igb_adapter *adapter = netdev_priv(netdev);
9820 struct e1000_hw *hw = &adapter->hw;
9821 int actual_link_speed;
9822
9823 if (hw->mac.type != e1000_82576)
9824 return -EOPNOTSUPP;
9825
9826 if (min_tx_rate)
9827 return -EINVAL;
9828
9829 actual_link_speed = igb_link_mbps(adapter->link_speed);
9830 if ((vf >= adapter->vfs_allocated_count) ||
9831 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9832 (max_tx_rate < 0) ||
9833 (max_tx_rate > actual_link_speed))
9834 return -EINVAL;
9835
9836 adapter->vf_rate_link_speed = actual_link_speed;
9837 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9838 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9839
9840 return 0;
9841 }
9842
9843 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9844 bool setting)
9845 {
9846 struct igb_adapter *adapter = netdev_priv(netdev);
9847 struct e1000_hw *hw = &adapter->hw;
9848 u32 reg_val, reg_offset;
9849
9850 if (!adapter->vfs_allocated_count)
9851 return -EOPNOTSUPP;
9852
9853 if (vf >= adapter->vfs_allocated_count)
9854 return -EINVAL;
9855
9856 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9857 reg_val = rd32(reg_offset);
9858 if (setting)
9859 reg_val |= (BIT(vf) |
9860 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9861 else
9862 reg_val &= ~(BIT(vf) |
9863 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9864 wr32(reg_offset, reg_val);
9865
9866 adapter->vf_data[vf].spoofchk_enabled = setting;
9867 return 0;
9868 }
9869
9870 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9871 {
9872 struct igb_adapter *adapter = netdev_priv(netdev);
9873
9874 if (vf >= adapter->vfs_allocated_count)
9875 return -EINVAL;
9876 if (adapter->vf_data[vf].trusted == setting)
9877 return 0;
9878
9879 adapter->vf_data[vf].trusted = setting;
9880
9881 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9882 vf, setting ? "" : "not ");
9883 return 0;
9884 }
9885
9886 static int igb_ndo_get_vf_config(struct net_device *netdev,
9887 int vf, struct ifla_vf_info *ivi)
9888 {
9889 struct igb_adapter *adapter = netdev_priv(netdev);
9890 if (vf >= adapter->vfs_allocated_count)
9891 return -EINVAL;
9892 ivi->vf = vf;
9893 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9894 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9895 ivi->min_tx_rate = 0;
9896 ivi->vlan = adapter->vf_data[vf].pf_vlan;
9897 ivi->qos = adapter->vf_data[vf].pf_qos;
9898 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9899 ivi->trusted = adapter->vf_data[vf].trusted;
9900 return 0;
9901 }
9902
9903 static void igb_vmm_control(struct igb_adapter *adapter)
9904 {
9905 struct e1000_hw *hw = &adapter->hw;
9906 u32 reg;
9907
9908 switch (hw->mac.type) {
9909 case e1000_82575:
9910 case e1000_i210:
9911 case e1000_i211:
9912 case e1000_i354:
9913 default:
9914
9915 return;
9916 case e1000_82576:
9917
9918 reg = rd32(E1000_DTXCTL);
9919 reg |= E1000_DTXCTL_VLAN_ADDED;
9920 wr32(E1000_DTXCTL, reg);
9921 fallthrough;
9922 case e1000_82580:
9923
9924 reg = rd32(E1000_RPLOLR);
9925 reg |= E1000_RPLOLR_STRVLAN;
9926 wr32(E1000_RPLOLR, reg);
9927 fallthrough;
9928 case e1000_i350:
9929
9930 break;
9931 }
9932
9933 if (adapter->vfs_allocated_count) {
9934 igb_vmdq_set_loopback_pf(hw, true);
9935 igb_vmdq_set_replication_pf(hw, true);
9936 igb_vmdq_set_anti_spoofing_pf(hw, true,
9937 adapter->vfs_allocated_count);
9938 } else {
9939 igb_vmdq_set_loopback_pf(hw, false);
9940 igb_vmdq_set_replication_pf(hw, false);
9941 }
9942 }
9943
9944 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9945 {
9946 struct e1000_hw *hw = &adapter->hw;
9947 u32 dmac_thr;
9948 u16 hwm;
9949 u32 reg;
9950
9951 if (hw->mac.type > e1000_82580) {
9952 if (adapter->flags & IGB_FLAG_DMAC) {
9953
9954 wr32(E1000_DMCTXTH, 0);
9955
9956
9957
9958
9959
9960 hwm = 64 * (pba - 6);
9961 reg = rd32(E1000_FCRTC);
9962 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9963 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9964 & E1000_FCRTC_RTH_COAL_MASK);
9965 wr32(E1000_FCRTC, reg);
9966
9967
9968
9969
9970 dmac_thr = pba - 10;
9971 reg = rd32(E1000_DMACR);
9972 reg &= ~E1000_DMACR_DMACTHR_MASK;
9973 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9974 & E1000_DMACR_DMACTHR_MASK);
9975
9976
9977 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9978
9979
9980 reg |= (1000 >> 5);
9981
9982
9983 if (hw->mac.type != e1000_i354)
9984 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9985 wr32(E1000_DMACR, reg);
9986
9987
9988
9989
9990 wr32(E1000_DMCRTRH, 0);
9991
9992 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9993
9994 wr32(E1000_DMCTLX, reg);
9995
9996
9997
9998
9999 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
10000 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
10001 }
10002
10003 if (hw->mac.type >= e1000_i210 ||
10004 (adapter->flags & IGB_FLAG_DMAC)) {
10005 reg = rd32(E1000_PCIEMISC);
10006 reg |= E1000_PCIEMISC_LX_DECISION;
10007 wr32(E1000_PCIEMISC, reg);
10008 }
10009 } else if (hw->mac.type == e1000_82580) {
10010 u32 reg = rd32(E1000_PCIEMISC);
10011
10012 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
10013 wr32(E1000_DMACR, 0);
10014 }
10015 }
10016
10017
10018
10019
10020
10021
10022
10023
10024
10025
10026
10027 s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
10028 u8 dev_addr, u8 *data)
10029 {
10030 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
10031 struct i2c_client *this_client = adapter->i2c_client;
10032 s32 status;
10033 u16 swfw_mask = 0;
10034
10035 if (!this_client)
10036 return E1000_ERR_I2C;
10037
10038 swfw_mask = E1000_SWFW_PHY0_SM;
10039
10040 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
10041 return E1000_ERR_SWFW_SYNC;
10042
10043 status = i2c_smbus_read_byte_data(this_client, byte_offset);
10044 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
10045
10046 if (status < 0)
10047 return E1000_ERR_I2C;
10048 else {
10049 *data = status;
10050 return 0;
10051 }
10052 }
10053
10054
10055
10056
10057
10058
10059
10060
10061
10062
10063
10064 s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
10065 u8 dev_addr, u8 data)
10066 {
10067 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
10068 struct i2c_client *this_client = adapter->i2c_client;
10069 s32 status;
10070 u16 swfw_mask = E1000_SWFW_PHY0_SM;
10071
10072 if (!this_client)
10073 return E1000_ERR_I2C;
10074
10075 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
10076 return E1000_ERR_SWFW_SYNC;
10077 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
10078 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
10079
10080 if (status)
10081 return E1000_ERR_I2C;
10082 else
10083 return 0;
10084
10085 }
10086
10087 int igb_reinit_queues(struct igb_adapter *adapter)
10088 {
10089 struct net_device *netdev = adapter->netdev;
10090 struct pci_dev *pdev = adapter->pdev;
10091 int err = 0;
10092
10093 if (netif_running(netdev))
10094 igb_close(netdev);
10095
10096 igb_reset_interrupt_capability(adapter);
10097
10098 if (igb_init_interrupt_scheme(adapter, true)) {
10099 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
10100 return -ENOMEM;
10101 }
10102
10103 if (netif_running(netdev))
10104 err = igb_open(netdev);
10105
10106 return err;
10107 }
10108
10109 static void igb_nfc_filter_exit(struct igb_adapter *adapter)
10110 {
10111 struct igb_nfc_filter *rule;
10112
10113 spin_lock(&adapter->nfc_lock);
10114
10115 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
10116 igb_erase_filter(adapter, rule);
10117
10118 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
10119 igb_erase_filter(adapter, rule);
10120
10121 spin_unlock(&adapter->nfc_lock);
10122 }
10123
10124 static void igb_nfc_filter_restore(struct igb_adapter *adapter)
10125 {
10126 struct igb_nfc_filter *rule;
10127
10128 spin_lock(&adapter->nfc_lock);
10129
10130 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
10131 igb_add_filter(adapter, rule);
10132
10133 spin_unlock(&adapter->nfc_lock);
10134 }
10135