0001
0002
0003
0004 #ifndef _IGC_H_
0005 #define _IGC_H_
0006
0007 #include <linux/kobject.h>
0008 #include <linux/pci.h>
0009 #include <linux/netdevice.h>
0010 #include <linux/vmalloc.h>
0011 #include <linux/ethtool.h>
0012 #include <linux/sctp.h>
0013 #include <linux/ptp_clock_kernel.h>
0014 #include <linux/timecounter.h>
0015 #include <linux/net_tstamp.h>
0016
0017 #include "igc_hw.h"
0018
0019 void igc_ethtool_set_ops(struct net_device *);
0020
0021
0022 #define IGC_MAX_RX_QUEUES 4
0023 #define IGC_MAX_TX_QUEUES 4
0024
0025 #define MAX_Q_VECTORS 8
0026 #define MAX_STD_JUMBO_FRAME_SIZE 9216
0027
0028 #define MAX_ETYPE_FILTER 8
0029 #define IGC_RETA_SIZE 128
0030
0031
0032 #define IGC_N_EXTTS 2
0033 #define IGC_N_PEROUT 2
0034 #define IGC_N_SDP 4
0035
0036 #define MAX_FLEX_FILTER 32
0037
0038 enum igc_mac_filter_type {
0039 IGC_MAC_FILTER_TYPE_DST = 0,
0040 IGC_MAC_FILTER_TYPE_SRC
0041 };
0042
0043 struct igc_tx_queue_stats {
0044 u64 packets;
0045 u64 bytes;
0046 u64 restart_queue;
0047 u64 restart_queue2;
0048 };
0049
0050 struct igc_rx_queue_stats {
0051 u64 packets;
0052 u64 bytes;
0053 u64 drops;
0054 u64 csum_err;
0055 u64 alloc_failed;
0056 };
0057
0058 struct igc_rx_packet_stats {
0059 u64 ipv4_packets;
0060 u64 ipv4e_packets;
0061 u64 ipv6_packets;
0062 u64 ipv6e_packets;
0063 u64 tcp_packets;
0064 u64 udp_packets;
0065 u64 sctp_packets;
0066 u64 nfs_packets;
0067 u64 other_packets;
0068 };
0069
0070 struct igc_ring_container {
0071 struct igc_ring *ring;
0072 unsigned int total_bytes;
0073 unsigned int total_packets;
0074 u16 work_limit;
0075 u8 count;
0076 u8 itr;
0077 };
0078
0079 struct igc_ring {
0080 struct igc_q_vector *q_vector;
0081 struct net_device *netdev;
0082 struct device *dev;
0083 union {
0084 struct igc_tx_buffer *tx_buffer_info;
0085 struct igc_rx_buffer *rx_buffer_info;
0086 };
0087 void *desc;
0088 unsigned long flags;
0089 void __iomem *tail;
0090 dma_addr_t dma;
0091 unsigned int size;
0092
0093 u16 count;
0094 u8 queue_index;
0095 u8 reg_idx;
0096 bool launchtime_enable;
0097
0098 u32 start_time;
0099 u32 end_time;
0100
0101
0102 bool cbs_enable;
0103 s32 idleslope;
0104 s32 sendslope;
0105 s32 hicredit;
0106 s32 locredit;
0107
0108
0109 u16 next_to_clean;
0110 u16 next_to_use;
0111 u16 next_to_alloc;
0112
0113 union {
0114
0115 struct {
0116 struct igc_tx_queue_stats tx_stats;
0117 struct u64_stats_sync tx_syncp;
0118 struct u64_stats_sync tx_syncp2;
0119 };
0120
0121 struct {
0122 struct igc_rx_queue_stats rx_stats;
0123 struct igc_rx_packet_stats pkt_stats;
0124 struct u64_stats_sync rx_syncp;
0125 struct sk_buff *skb;
0126 };
0127 };
0128
0129 struct xdp_rxq_info xdp_rxq;
0130 struct xsk_buff_pool *xsk_pool;
0131 } ____cacheline_internodealigned_in_smp;
0132
0133
0134 struct igc_adapter {
0135 struct net_device *netdev;
0136
0137 struct ethtool_eee eee;
0138 u16 eee_advert;
0139
0140 unsigned long state;
0141 unsigned int flags;
0142 unsigned int num_q_vectors;
0143
0144 struct msix_entry *msix_entries;
0145
0146
0147 u16 tx_work_limit;
0148 u32 tx_timeout_count;
0149 int num_tx_queues;
0150 struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
0151
0152
0153 int num_rx_queues;
0154 struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
0155
0156 struct timer_list watchdog_timer;
0157 struct timer_list dma_err_timer;
0158 struct timer_list phy_info_timer;
0159
0160 u32 wol;
0161 u32 en_mng_pt;
0162 u16 link_speed;
0163 u16 link_duplex;
0164
0165 u8 port_num;
0166
0167 u8 __iomem *io_addr;
0168
0169 u32 rx_itr_setting;
0170 u32 tx_itr_setting;
0171
0172 struct work_struct reset_task;
0173 struct work_struct watchdog_task;
0174 struct work_struct dma_err_task;
0175 bool fc_autoneg;
0176
0177 u8 tx_timeout_factor;
0178
0179 int msg_enable;
0180 u32 max_frame_size;
0181 u32 min_frame_size;
0182
0183 ktime_t base_time;
0184 ktime_t cycle_time;
0185
0186
0187 struct pci_dev *pdev;
0188
0189 spinlock_t stats64_lock;
0190 struct rtnl_link_stats64 stats64;
0191
0192
0193 struct igc_hw hw;
0194 struct igc_hw_stats stats;
0195
0196 struct igc_q_vector *q_vector[MAX_Q_VECTORS];
0197 u32 eims_enable_mask;
0198 u32 eims_other;
0199
0200 u16 tx_ring_count;
0201 u16 rx_ring_count;
0202
0203 u32 tx_hwtstamp_timeouts;
0204 u32 tx_hwtstamp_skipped;
0205 u32 rx_hwtstamp_cleared;
0206
0207 u32 rss_queues;
0208 u32 rss_indir_tbl_init;
0209
0210
0211
0212
0213 struct mutex nfc_rule_lock;
0214 struct list_head nfc_rule_list;
0215 unsigned int nfc_rule_count;
0216
0217 u8 rss_indir_tbl[IGC_RETA_SIZE];
0218
0219 unsigned long link_check_timeout;
0220 struct igc_info ei;
0221
0222 u32 test_icr;
0223
0224 struct ptp_clock *ptp_clock;
0225 struct ptp_clock_info ptp_caps;
0226 struct work_struct ptp_tx_work;
0227 struct sk_buff *ptp_tx_skb;
0228 struct hwtstamp_config tstamp_config;
0229 unsigned long ptp_tx_start;
0230 unsigned int ptp_flags;
0231
0232 spinlock_t tmreg_lock;
0233 struct cyclecounter cc;
0234 struct timecounter tc;
0235 struct timespec64 prev_ptp_time;
0236 ktime_t ptp_reset_start;
0237 struct system_time_snapshot snapshot;
0238
0239 char fw_version[32];
0240
0241 struct bpf_prog *xdp_prog;
0242
0243 bool pps_sys_wrap_on;
0244
0245 struct ptp_pin_desc sdp_config[IGC_N_SDP];
0246 struct {
0247 struct timespec64 start;
0248 struct timespec64 period;
0249 } perout[IGC_N_PEROUT];
0250 };
0251
0252 void igc_up(struct igc_adapter *adapter);
0253 void igc_down(struct igc_adapter *adapter);
0254 int igc_open(struct net_device *netdev);
0255 int igc_close(struct net_device *netdev);
0256 int igc_setup_tx_resources(struct igc_ring *ring);
0257 int igc_setup_rx_resources(struct igc_ring *ring);
0258 void igc_free_tx_resources(struct igc_ring *ring);
0259 void igc_free_rx_resources(struct igc_ring *ring);
0260 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter);
0261 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
0262 const u32 max_rss_queues);
0263 int igc_reinit_queues(struct igc_adapter *adapter);
0264 void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
0265 bool igc_has_link(struct igc_adapter *adapter);
0266 void igc_reset(struct igc_adapter *adapter);
0267 void igc_update_stats(struct igc_adapter *adapter);
0268 void igc_disable_rx_ring(struct igc_ring *ring);
0269 void igc_enable_rx_ring(struct igc_ring *ring);
0270 void igc_disable_tx_ring(struct igc_ring *ring);
0271 void igc_enable_tx_ring(struct igc_ring *ring);
0272 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
0273
0274
0275 void igc_rings_dump(struct igc_adapter *adapter);
0276 void igc_regs_dump(struct igc_adapter *adapter);
0277
0278 extern char igc_driver_name[];
0279
0280 #define IGC_REGS_LEN 740
0281
0282
0283 #define IGC_PTP_ENABLED BIT(0)
0284
0285
0286 #define IGC_FLAG_HAS_MSI BIT(0)
0287 #define IGC_FLAG_QUEUE_PAIRS BIT(3)
0288 #define IGC_FLAG_DMAC BIT(4)
0289 #define IGC_FLAG_PTP BIT(8)
0290 #define IGC_FLAG_WOL_SUPPORTED BIT(8)
0291 #define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
0292 #define IGC_FLAG_MEDIA_RESET BIT(10)
0293 #define IGC_FLAG_MAS_ENABLE BIT(12)
0294 #define IGC_FLAG_HAS_MSIX BIT(13)
0295 #define IGC_FLAG_EEE BIT(14)
0296 #define IGC_FLAG_VLAN_PROMISC BIT(15)
0297 #define IGC_FLAG_RX_LEGACY BIT(16)
0298 #define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
0299 #define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
0300
0301 #define IGC_FLAG_TSN_ANY_ENABLED \
0302 (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED)
0303
0304 #define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
0305 #define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
0306
0307 #define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
0308 #define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
0309 #define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
0310
0311
0312 #define IGC_START_ITR 648
0313 #define IGC_4K_ITR 980
0314 #define IGC_20K_ITR 196
0315 #define IGC_70K_ITR 56
0316
0317 #define IGC_DEFAULT_ITR 3
0318 #define IGC_MAX_ITR_USECS 10000
0319 #define IGC_MIN_ITR_USECS 10
0320 #define NON_Q_VECTORS 1
0321 #define MAX_MSIX_ENTRIES 10
0322
0323
0324 #define IGC_DEFAULT_TXD 256
0325 #define IGC_DEFAULT_TX_WORK 128
0326 #define IGC_MIN_TXD 80
0327 #define IGC_MAX_TXD 4096
0328
0329 #define IGC_DEFAULT_RXD 256
0330 #define IGC_MIN_RXD 80
0331 #define IGC_MAX_RXD 4096
0332
0333
0334 #define IGC_RXBUFFER_256 256
0335 #define IGC_RXBUFFER_2048 2048
0336 #define IGC_RXBUFFER_3072 3072
0337
0338 #define AUTO_ALL_MODES 0
0339 #define IGC_RX_HDR_LEN IGC_RXBUFFER_256
0340
0341
0342 #define IGC_I225_TX_LATENCY_10 240
0343 #define IGC_I225_TX_LATENCY_100 58
0344 #define IGC_I225_TX_LATENCY_1000 80
0345 #define IGC_I225_TX_LATENCY_2500 1325
0346 #define IGC_I225_RX_LATENCY_10 6450
0347 #define IGC_I225_RX_LATENCY_100 185
0348 #define IGC_I225_RX_LATENCY_1000 300
0349 #define IGC_I225_RX_LATENCY_2500 1485
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362 #define IGC_RX_PTHRESH 8
0363 #define IGC_RX_HTHRESH 8
0364 #define IGC_TX_PTHRESH 8
0365 #define IGC_TX_HTHRESH 1
0366 #define IGC_RX_WTHRESH 4
0367 #define IGC_TX_WTHRESH 16
0368
0369 #define IGC_RX_DMA_ATTR \
0370 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
0371
0372 #define IGC_TS_HDR_LEN 16
0373
0374 #define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
0375
0376 #if (PAGE_SIZE < 8192)
0377 #define IGC_MAX_FRAME_BUILD_SKB \
0378 (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN)
0379 #else
0380 #define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN)
0381 #endif
0382
0383
0384 #define IGC_RX_BUFFER_WRITE 16
0385
0386
0387 #define IGC_TX_FLAGS_VLAN_MASK 0xffff0000
0388 #define IGC_TX_FLAGS_VLAN_SHIFT 16
0389
0390
0391 static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,
0392 const u32 stat_err_bits)
0393 {
0394 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
0395 }
0396
0397 enum igc_state_t {
0398 __IGC_TESTING,
0399 __IGC_RESETTING,
0400 __IGC_DOWN,
0401 __IGC_PTP_TX_IN_PROGRESS,
0402 };
0403
0404 enum igc_tx_flags {
0405
0406 IGC_TX_FLAGS_VLAN = 0x01,
0407 IGC_TX_FLAGS_TSO = 0x02,
0408 IGC_TX_FLAGS_TSTAMP = 0x04,
0409
0410
0411 IGC_TX_FLAGS_IPV4 = 0x10,
0412 IGC_TX_FLAGS_CSUM = 0x20,
0413 };
0414
0415 enum igc_boards {
0416 board_base,
0417 };
0418
0419
0420
0421
0422 #define IGC_MAX_TXD_PWR 15
0423 #define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR)
0424
0425
0426 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
0427 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
0428
0429 enum igc_tx_buffer_type {
0430 IGC_TX_BUFFER_TYPE_SKB,
0431 IGC_TX_BUFFER_TYPE_XDP,
0432 IGC_TX_BUFFER_TYPE_XSK,
0433 };
0434
0435
0436
0437
0438 struct igc_tx_buffer {
0439 union igc_adv_tx_desc *next_to_watch;
0440 unsigned long time_stamp;
0441 enum igc_tx_buffer_type type;
0442 union {
0443 struct sk_buff *skb;
0444 struct xdp_frame *xdpf;
0445 };
0446 unsigned int bytecount;
0447 u16 gso_segs;
0448 __be16 protocol;
0449
0450 DEFINE_DMA_UNMAP_ADDR(dma);
0451 DEFINE_DMA_UNMAP_LEN(len);
0452 u32 tx_flags;
0453 };
0454
0455 struct igc_rx_buffer {
0456 union {
0457 struct {
0458 dma_addr_t dma;
0459 struct page *page;
0460 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
0461 __u32 page_offset;
0462 #else
0463 __u16 page_offset;
0464 #endif
0465 __u16 pagecnt_bias;
0466 };
0467 struct xdp_buff *xdp;
0468 };
0469 };
0470
0471 struct igc_q_vector {
0472 struct igc_adapter *adapter;
0473 void __iomem *itr_register;
0474 u32 eims_value;
0475
0476 u16 itr_val;
0477 u8 set_itr;
0478
0479 struct igc_ring_container rx, tx;
0480
0481 struct napi_struct napi;
0482
0483 struct rcu_head rcu;
0484 char name[IFNAMSIZ + 9];
0485 struct net_device poll_dev;
0486
0487
0488 struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
0489 };
0490
0491 enum igc_filter_match_flags {
0492 IGC_FILTER_FLAG_ETHER_TYPE = BIT(0),
0493 IGC_FILTER_FLAG_VLAN_TCI = BIT(1),
0494 IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2),
0495 IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3),
0496 IGC_FILTER_FLAG_USER_DATA = BIT(4),
0497 IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5),
0498 };
0499
0500 struct igc_nfc_filter {
0501 u8 match_flags;
0502 u16 etype;
0503 __be16 vlan_etype;
0504 u16 vlan_tci;
0505 u8 src_addr[ETH_ALEN];
0506 u8 dst_addr[ETH_ALEN];
0507 u8 user_data[8];
0508 u8 user_mask[8];
0509 u8 flex_index;
0510 u8 rx_queue;
0511 u8 prio;
0512 u8 immediate_irq;
0513 u8 drop;
0514 };
0515
0516 struct igc_nfc_rule {
0517 struct list_head list;
0518 struct igc_nfc_filter filter;
0519 u32 location;
0520 u16 action;
0521 bool flex;
0522 };
0523
0524
0525
0526
0527 #define IGC_MAX_RXNFC_RULES 64
0528
0529 struct igc_flex_filter {
0530 u8 index;
0531 u8 data[128];
0532 u8 mask[16];
0533 u8 length;
0534 u8 rx_queue;
0535 u8 prio;
0536 u8 immediate_irq;
0537 u8 drop;
0538 };
0539
0540
0541 static inline u16 igc_desc_unused(const struct igc_ring *ring)
0542 {
0543 u16 ntc = ring->next_to_clean;
0544 u16 ntu = ring->next_to_use;
0545
0546 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
0547 }
0548
0549 static inline s32 igc_get_phy_info(struct igc_hw *hw)
0550 {
0551 if (hw->phy.ops.get_phy_info)
0552 return hw->phy.ops.get_phy_info(hw);
0553
0554 return 0;
0555 }
0556
0557 static inline s32 igc_reset_phy(struct igc_hw *hw)
0558 {
0559 if (hw->phy.ops.reset)
0560 return hw->phy.ops.reset(hw);
0561
0562 return 0;
0563 }
0564
0565 static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring)
0566 {
0567 return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
0568 }
0569
0570 enum igc_ring_flags_t {
0571 IGC_RING_FLAG_RX_3K_BUFFER,
0572 IGC_RING_FLAG_RX_BUILD_SKB_ENABLED,
0573 IGC_RING_FLAG_RX_SCTP_CSUM,
0574 IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
0575 IGC_RING_FLAG_TX_CTX_IDX,
0576 IGC_RING_FLAG_TX_DETECT_HANG,
0577 IGC_RING_FLAG_AF_XDP_ZC,
0578 };
0579
0580 #define ring_uses_large_buffer(ring) \
0581 test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
0582 #define set_ring_uses_large_buffer(ring) \
0583 set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
0584 #define clear_ring_uses_large_buffer(ring) \
0585 clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
0586
0587 #define ring_uses_build_skb(ring) \
0588 test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
0589
0590 static inline unsigned int igc_rx_bufsz(struct igc_ring *ring)
0591 {
0592 #if (PAGE_SIZE < 8192)
0593 if (ring_uses_large_buffer(ring))
0594 return IGC_RXBUFFER_3072;
0595
0596 if (ring_uses_build_skb(ring))
0597 return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN;
0598 #endif
0599 return IGC_RXBUFFER_2048;
0600 }
0601
0602 static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)
0603 {
0604 #if (PAGE_SIZE < 8192)
0605 if (ring_uses_large_buffer(ring))
0606 return 1;
0607 #endif
0608 return 0;
0609 }
0610
0611 static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
0612 {
0613 if (hw->phy.ops.read_reg)
0614 return hw->phy.ops.read_reg(hw, offset, data);
0615
0616 return -EOPNOTSUPP;
0617 }
0618
0619 void igc_reinit_locked(struct igc_adapter *);
0620 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
0621 u32 location);
0622 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
0623 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
0624
0625 void igc_ptp_init(struct igc_adapter *adapter);
0626 void igc_ptp_reset(struct igc_adapter *adapter);
0627 void igc_ptp_suspend(struct igc_adapter *adapter);
0628 void igc_ptp_stop(struct igc_adapter *adapter);
0629 ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf);
0630 int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
0631 int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
0632 void igc_ptp_tx_hang(struct igc_adapter *adapter);
0633 void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
0634
0635 #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
0636
0637 #define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
0638
0639 #define IGC_RX_DESC(R, i) \
0640 (&(((union igc_adv_rx_desc *)((R)->desc))[i]))
0641 #define IGC_TX_DESC(R, i) \
0642 (&(((union igc_adv_tx_desc *)((R)->desc))[i]))
0643 #define IGC_TX_CTXTDESC(R, i) \
0644 (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i]))
0645
0646 #endif