Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* Copyright(c) 2007 - 2018 Intel Corporation. */
0003 
0004 /* Linux PRO/1000 Ethernet Driver main header file */
0005 
0006 #ifndef _IGB_H_
0007 #define _IGB_H_
0008 
0009 #include "e1000_mac.h"
0010 #include "e1000_82575.h"
0011 
0012 #include <linux/timecounter.h>
0013 #include <linux/net_tstamp.h>
0014 #include <linux/ptp_clock_kernel.h>
0015 #include <linux/bitops.h>
0016 #include <linux/if_vlan.h>
0017 #include <linux/i2c.h>
0018 #include <linux/i2c-algo-bit.h>
0019 #include <linux/pci.h>
0020 #include <linux/mdio.h>
0021 
0022 #include <net/xdp.h>
0023 
0024 struct igb_adapter;
0025 
0026 #define E1000_PCS_CFG_IGN_SD    1
0027 
0028 /* Interrupt defines */
0029 #define IGB_START_ITR       648 /* ~6000 ints/sec */
0030 #define IGB_4K_ITR      980
0031 #define IGB_20K_ITR     196
0032 #define IGB_70K_ITR     56
0033 
0034 /* TX/RX descriptor defines */
0035 #define IGB_DEFAULT_TXD     256
0036 #define IGB_DEFAULT_TX_WORK 128
0037 #define IGB_MIN_TXD     80
0038 #define IGB_MAX_TXD     4096
0039 
0040 #define IGB_DEFAULT_RXD     256
0041 #define IGB_MIN_RXD     80
0042 #define IGB_MAX_RXD     4096
0043 
0044 #define IGB_DEFAULT_ITR     3 /* dynamic */
0045 #define IGB_MAX_ITR_USECS   10000
0046 #define IGB_MIN_ITR_USECS   10
0047 #define NON_Q_VECTORS       1
0048 #define MAX_Q_VECTORS       8
0049 #define MAX_MSIX_ENTRIES    10
0050 
0051 /* Transmit and receive queues */
0052 #define IGB_MAX_RX_QUEUES   8
0053 #define IGB_MAX_RX_QUEUES_82575 4
0054 #define IGB_MAX_RX_QUEUES_I211  2
0055 #define IGB_MAX_TX_QUEUES   8
0056 #define IGB_MAX_VF_MC_ENTRIES   30
0057 #define IGB_MAX_VF_FUNCTIONS    8
0058 #define IGB_MAX_VFTA_ENTRIES    128
0059 #define IGB_82576_VF_DEV_ID 0x10CA
0060 #define IGB_I350_VF_DEV_ID  0x1520
0061 
0062 /* NVM version defines */
0063 #define IGB_MAJOR_MASK      0xF000
0064 #define IGB_MINOR_MASK      0x0FF0
0065 #define IGB_BUILD_MASK      0x000F
0066 #define IGB_COMB_VER_MASK   0x00FF
0067 #define IGB_MAJOR_SHIFT     12
0068 #define IGB_MINOR_SHIFT     4
0069 #define IGB_COMB_VER_SHFT   8
0070 #define IGB_NVM_VER_INVALID 0xFFFF
0071 #define IGB_ETRACK_SHIFT    16
0072 #define NVM_ETRACK_WORD     0x0042
0073 #define NVM_COMB_VER_OFF    0x0083
0074 #define NVM_COMB_VER_PTR    0x003d
0075 
0076 /* Transmit and receive latency (for PTP timestamps) */
0077 #define IGB_I210_TX_LATENCY_10      9542
0078 #define IGB_I210_TX_LATENCY_100     1024
0079 #define IGB_I210_TX_LATENCY_1000    178
0080 #define IGB_I210_RX_LATENCY_10      20662
0081 #define IGB_I210_RX_LATENCY_100     2213
0082 #define IGB_I210_RX_LATENCY_1000    448
0083 
0084 /* XDP */
0085 #define IGB_XDP_PASS        0
0086 #define IGB_XDP_CONSUMED    BIT(0)
0087 #define IGB_XDP_TX      BIT(1)
0088 #define IGB_XDP_REDIR       BIT(2)
0089 
0090 struct vf_data_storage {
0091     unsigned char vf_mac_addresses[ETH_ALEN];
0092     u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
0093     u16 num_vf_mc_hashes;
0094     u32 flags;
0095     unsigned long last_nack;
0096     u16 pf_vlan; /* When set, guest VLAN config not allowed. */
0097     u16 pf_qos;
0098     u16 tx_rate;
0099     bool spoofchk_enabled;
0100     bool trusted;
0101 };
0102 
0103 /* Number of unicast MAC filters reserved for the PF in the RAR registers */
0104 #define IGB_PF_MAC_FILTERS_RESERVED 3
0105 
0106 struct vf_mac_filter {
0107     struct list_head l;
0108     int vf;
0109     bool free;
0110     u8 vf_mac[ETH_ALEN];
0111 };
0112 
0113 #define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
0114 #define IGB_VF_FLAG_UNI_PROMISC    0x00000002 /* VF has unicast promisc */
0115 #define IGB_VF_FLAG_MULTI_PROMISC  0x00000004 /* VF has multicast promisc */
0116 #define IGB_VF_FLAG_PF_SET_MAC     0x00000008 /* PF has set MAC address */
0117 
0118 /* RX descriptor control thresholds.
0119  * PTHRESH - MAC will consider prefetch if it has fewer than this number of
0120  *           descriptors available in its onboard memory.
0121  *           Setting this to 0 disables RX descriptor prefetch.
0122  * HTHRESH - MAC will only prefetch if there are at least this many descriptors
0123  *           available in host memory.
0124  *           If PTHRESH is 0, this should also be 0.
0125  * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
0126  *           descriptors until either it has this many to write back, or the
0127  *           ITR timer expires.
0128  */
0129 #define IGB_RX_PTHRESH  ((hw->mac.type == e1000_i354) ? 12 : 8)
0130 #define IGB_RX_HTHRESH  8
0131 #define IGB_TX_PTHRESH  ((hw->mac.type == e1000_i354) ? 20 : 8)
0132 #define IGB_TX_HTHRESH  1
0133 #define IGB_RX_WTHRESH  ((hw->mac.type == e1000_82576 && \
0134               (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
0135 #define IGB_TX_WTHRESH  ((hw->mac.type == e1000_82576 && \
0136               (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
0137 
0138 /* this is the size past which hardware will drop packets when setting LPE=0 */
0139 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
0140 
0141 #define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
0142 
0143 /* Supported Rx Buffer Sizes */
0144 #define IGB_RXBUFFER_256    256
0145 #define IGB_RXBUFFER_1536   1536
0146 #define IGB_RXBUFFER_2048   2048
0147 #define IGB_RXBUFFER_3072   3072
0148 #define IGB_RX_HDR_LEN      IGB_RXBUFFER_256
0149 #define IGB_TS_HDR_LEN      16
0150 
0151 /* Attempt to maximize the headroom available for incoming frames.  We
0152  * use a 2K buffer for receives and need 1536/1534 to store the data for
0153  * the frame.  This leaves us with 512 bytes of room.  From that we need
0154  * to deduct the space needed for the shared info and the padding needed
0155  * to IP align the frame.
0156  *
0157  * Note: For cache line sizes 256 or larger this value is going to end
0158  *   up negative.  In these cases we should fall back to the 3K
0159  *   buffers.
0160  */
0161 #if (PAGE_SIZE < 8192)
0162 #define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN)
0163 #define IGB_2K_TOO_SMALL_WITH_PADDING \
0164 ((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048))
0165 
0166 static inline int igb_compute_pad(int rx_buf_len)
0167 {
0168     int page_size, pad_size;
0169 
0170     page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
0171     pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
0172 
0173     return pad_size;
0174 }
0175 
0176 static inline int igb_skb_pad(void)
0177 {
0178     int rx_buf_len;
0179 
0180     /* If a 2K buffer cannot handle a standard Ethernet frame then
0181      * optimize padding for a 3K buffer instead of a 1.5K buffer.
0182      *
0183      * For a 3K buffer we need to add enough padding to allow for
0184      * tailroom due to NET_IP_ALIGN possibly shifting us out of
0185      * cache-line alignment.
0186      */
0187     if (IGB_2K_TOO_SMALL_WITH_PADDING)
0188         rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
0189     else
0190         rx_buf_len = IGB_RXBUFFER_1536;
0191 
0192     /* if needed make room for NET_IP_ALIGN */
0193     rx_buf_len -= NET_IP_ALIGN;
0194 
0195     return igb_compute_pad(rx_buf_len);
0196 }
0197 
0198 #define IGB_SKB_PAD igb_skb_pad()
0199 #else
0200 #define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
0201 #endif
0202 
0203 /* How many Rx Buffers do we bundle into one write to the hardware ? */
0204 #define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
0205 
0206 #define IGB_RX_DMA_ATTR \
0207     (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
0208 
0209 #define AUTO_ALL_MODES      0
0210 #define IGB_EEPROM_APME     0x0400
0211 
0212 #ifndef IGB_MASTER_SLAVE
0213 /* Switch to override PHY master/slave setting */
0214 #define IGB_MASTER_SLAVE    e1000_ms_hw_default
0215 #endif
0216 
0217 #define IGB_MNG_VLAN_NONE   -1
0218 
0219 enum igb_tx_flags {
0220     /* cmd_type flags */
0221     IGB_TX_FLAGS_VLAN   = 0x01,
0222     IGB_TX_FLAGS_TSO    = 0x02,
0223     IGB_TX_FLAGS_TSTAMP = 0x04,
0224 
0225     /* olinfo flags */
0226     IGB_TX_FLAGS_IPV4   = 0x10,
0227     IGB_TX_FLAGS_CSUM   = 0x20,
0228 };
0229 
0230 /* VLAN info */
0231 #define IGB_TX_FLAGS_VLAN_MASK  0xffff0000
0232 #define IGB_TX_FLAGS_VLAN_SHIFT 16
0233 
0234 /* The largest size we can write to the descriptor is 65535.  In order to
0235  * maintain a power of two alignment we have to limit ourselves to 32K.
0236  */
0237 #define IGB_MAX_TXD_PWR 15
0238 #define IGB_MAX_DATA_PER_TXD    (1u << IGB_MAX_TXD_PWR)
0239 
0240 /* Tx Descriptors needed, worst case */
0241 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
0242 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
0243 
0244 /* EEPROM byte offsets */
0245 #define IGB_SFF_8472_SWAP       0x5C
0246 #define IGB_SFF_8472_COMP       0x5E
0247 
0248 /* Bitmasks */
0249 #define IGB_SFF_ADDRESSING_MODE     0x4
0250 #define IGB_SFF_8472_UNSUP      0x00
0251 
0252 /* TX resources are shared between XDP and netstack
0253  * and we need to tag the buffer type to distinguish them
0254  */
0255 enum igb_tx_buf_type {
0256     IGB_TYPE_SKB = 0,
0257     IGB_TYPE_XDP,
0258 };
0259 
0260 /* wrapper around a pointer to a socket buffer,
0261  * so a DMA handle can be stored along with the buffer
0262  */
0263 struct igb_tx_buffer {
0264     union e1000_adv_tx_desc *next_to_watch;
0265     unsigned long time_stamp;
0266     enum igb_tx_buf_type type;
0267     union {
0268         struct sk_buff *skb;
0269         struct xdp_frame *xdpf;
0270     };
0271     unsigned int bytecount;
0272     u16 gso_segs;
0273     __be16 protocol;
0274 
0275     DEFINE_DMA_UNMAP_ADDR(dma);
0276     DEFINE_DMA_UNMAP_LEN(len);
0277     u32 tx_flags;
0278 };
0279 
0280 struct igb_rx_buffer {
0281     dma_addr_t dma;
0282     struct page *page;
0283 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
0284     __u32 page_offset;
0285 #else
0286     __u16 page_offset;
0287 #endif
0288     __u16 pagecnt_bias;
0289 };
0290 
0291 struct igb_tx_queue_stats {
0292     u64 packets;
0293     u64 bytes;
0294     u64 restart_queue;
0295     u64 restart_queue2;
0296 };
0297 
0298 struct igb_rx_queue_stats {
0299     u64 packets;
0300     u64 bytes;
0301     u64 drops;
0302     u64 csum_err;
0303     u64 alloc_failed;
0304 };
0305 
0306 struct igb_ring_container {
0307     struct igb_ring *ring;      /* pointer to linked list of rings */
0308     unsigned int total_bytes;   /* total bytes processed this int */
0309     unsigned int total_packets; /* total packets processed this int */
0310     u16 work_limit;         /* total work allowed per interrupt */
0311     u8 count;           /* total number of rings in vector */
0312     u8 itr;             /* current ITR setting for ring */
0313 };
0314 
0315 struct igb_ring {
0316     struct igb_q_vector *q_vector;  /* backlink to q_vector */
0317     struct net_device *netdev;  /* back pointer to net_device */
0318     struct bpf_prog *xdp_prog;
0319     struct device *dev;     /* device pointer for dma mapping */
0320     union {             /* array of buffer info structs */
0321         struct igb_tx_buffer *tx_buffer_info;
0322         struct igb_rx_buffer *rx_buffer_info;
0323     };
0324     void *desc;         /* descriptor ring memory */
0325     unsigned long flags;        /* ring specific flags */
0326     void __iomem *tail;     /* pointer to ring tail register */
0327     dma_addr_t dma;         /* phys address of the ring */
0328     unsigned int  size;     /* length of desc. ring in bytes */
0329 
0330     u16 count;          /* number of desc. in the ring */
0331     u8 queue_index;         /* logical index of the ring*/
0332     u8 reg_idx;         /* physical index of the ring */
0333     bool launchtime_enable;     /* true if LaunchTime is enabled */
0334     bool cbs_enable;        /* indicates if CBS is enabled */
0335     s32 idleslope;          /* idleSlope in kbps */
0336     s32 sendslope;          /* sendSlope in kbps */
0337     s32 hicredit;           /* hiCredit in bytes */
0338     s32 locredit;           /* loCredit in bytes */
0339 
0340     /* everything past this point are written often */
0341     u16 next_to_clean;
0342     u16 next_to_use;
0343     u16 next_to_alloc;
0344 
0345     union {
0346         /* TX */
0347         struct {
0348             struct igb_tx_queue_stats tx_stats;
0349             struct u64_stats_sync tx_syncp;
0350             struct u64_stats_sync tx_syncp2;
0351         };
0352         /* RX */
0353         struct {
0354             struct sk_buff *skb;
0355             struct igb_rx_queue_stats rx_stats;
0356             struct u64_stats_sync rx_syncp;
0357         };
0358     };
0359     struct xdp_rxq_info xdp_rxq;
0360 } ____cacheline_internodealigned_in_smp;
0361 
0362 struct igb_q_vector {
0363     struct igb_adapter *adapter;    /* backlink */
0364     int cpu;            /* CPU for DCA */
0365     u32 eims_value;         /* EIMS mask value */
0366 
0367     u16 itr_val;
0368     u8 set_itr;
0369     void __iomem *itr_register;
0370 
0371     struct igb_ring_container rx, tx;
0372 
0373     struct napi_struct napi;
0374     struct rcu_head rcu;    /* to avoid race with update stats on free */
0375     char name[IFNAMSIZ + 9];
0376 
0377     /* for dynamic allocation of rings associated with this q_vector */
0378     struct igb_ring ring[] ____cacheline_internodealigned_in_smp;
0379 };
0380 
0381 enum e1000_ring_flags_t {
0382     IGB_RING_FLAG_RX_3K_BUFFER,
0383     IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
0384     IGB_RING_FLAG_RX_SCTP_CSUM,
0385     IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
0386     IGB_RING_FLAG_TX_CTX_IDX,
0387     IGB_RING_FLAG_TX_DETECT_HANG
0388 };
0389 
0390 #define ring_uses_large_buffer(ring) \
0391     test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
0392 #define set_ring_uses_large_buffer(ring) \
0393     set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
0394 #define clear_ring_uses_large_buffer(ring) \
0395     clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
0396 
0397 #define ring_uses_build_skb(ring) \
0398     test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
0399 #define set_ring_build_skb_enabled(ring) \
0400     set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
0401 #define clear_ring_build_skb_enabled(ring) \
0402     clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
0403 
0404 static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
0405 {
0406 #if (PAGE_SIZE < 8192)
0407     if (ring_uses_large_buffer(ring))
0408         return IGB_RXBUFFER_3072;
0409 
0410     if (ring_uses_build_skb(ring))
0411         return IGB_MAX_FRAME_BUILD_SKB;
0412 #endif
0413     return IGB_RXBUFFER_2048;
0414 }
0415 
0416 static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
0417 {
0418 #if (PAGE_SIZE < 8192)
0419     if (ring_uses_large_buffer(ring))
0420         return 1;
0421 #endif
0422     return 0;
0423 }
0424 
0425 #define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
0426 
0427 #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
0428 
0429 #define IGB_RX_DESC(R, i)   \
0430     (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
0431 #define IGB_TX_DESC(R, i)   \
0432     (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
0433 #define IGB_TX_CTXTDESC(R, i)   \
0434     (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
0435 
0436 /* igb_test_staterr - tests bits within Rx descriptor status and error fields */
0437 static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
0438                       const u32 stat_err_bits)
0439 {
0440     return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
0441 }
0442 
0443 /* igb_desc_unused - calculate if we have unused descriptors */
0444 static inline int igb_desc_unused(struct igb_ring *ring)
0445 {
0446     if (ring->next_to_clean > ring->next_to_use)
0447         return ring->next_to_clean - ring->next_to_use - 1;
0448 
0449     return ring->count + ring->next_to_clean - ring->next_to_use - 1;
0450 }
0451 
0452 #ifdef CONFIG_IGB_HWMON
0453 
0454 #define IGB_HWMON_TYPE_LOC  0
0455 #define IGB_HWMON_TYPE_TEMP 1
0456 #define IGB_HWMON_TYPE_CAUTION  2
0457 #define IGB_HWMON_TYPE_MAX  3
0458 
0459 struct hwmon_attr {
0460     struct device_attribute dev_attr;
0461     struct e1000_hw *hw;
0462     struct e1000_thermal_diode_data *sensor;
0463     char name[12];
0464     };
0465 
0466 struct hwmon_buff {
0467     struct attribute_group group;
0468     const struct attribute_group *groups[2];
0469     struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
0470     struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
0471     unsigned int n_hwmon;
0472     };
0473 #endif
0474 
0475 /* The number of L2 ether-type filter registers, Index 3 is reserved
0476  * for PTP 1588 timestamp
0477  */
0478 #define MAX_ETYPE_FILTER    (4 - 1)
0479 /* ETQF filter list: one static filter per filter consumer. This is
0480  * to avoid filter collisions later. Add new filters here!!
0481  *
0482  * Current filters:     Filter 3
0483  */
0484 #define IGB_ETQF_FILTER_1588    3
0485 
0486 #define IGB_N_EXTTS 2
0487 #define IGB_N_PEROUT    2
0488 #define IGB_N_SDP   4
0489 #define IGB_RETA_SIZE   128
0490 
0491 enum igb_filter_match_flags {
0492     IGB_FILTER_FLAG_ETHER_TYPE = 0x1,
0493     IGB_FILTER_FLAG_VLAN_TCI   = 0x2,
0494     IGB_FILTER_FLAG_SRC_MAC_ADDR   = 0x4,
0495     IGB_FILTER_FLAG_DST_MAC_ADDR   = 0x8,
0496 };
0497 
0498 #define IGB_MAX_RXNFC_FILTERS 16
0499 
0500 /* RX network flow classification data structure */
0501 struct igb_nfc_input {
0502     /* Byte layout in order, all values with MSB first:
0503      * match_flags - 1 byte
0504      * etype - 2 bytes
0505      * vlan_tci - 2 bytes
0506      */
0507     u8 match_flags;
0508     __be16 etype;
0509     __be16 vlan_tci;
0510     u8 src_addr[ETH_ALEN];
0511     u8 dst_addr[ETH_ALEN];
0512 };
0513 
0514 struct igb_nfc_filter {
0515     struct hlist_node nfc_node;
0516     struct igb_nfc_input filter;
0517     unsigned long cookie;
0518     u16 etype_reg_index;
0519     u16 sw_idx;
0520     u16 action;
0521 };
0522 
0523 struct igb_mac_addr {
0524     u8 addr[ETH_ALEN];
0525     u8 queue;
0526     u8 state; /* bitmask */
0527 };
0528 
0529 #define IGB_MAC_STATE_DEFAULT   0x1
0530 #define IGB_MAC_STATE_IN_USE    0x2
0531 #define IGB_MAC_STATE_SRC_ADDR  0x4
0532 #define IGB_MAC_STATE_QUEUE_STEERING 0x8
0533 
0534 /* board specific private data structure */
0535 struct igb_adapter {
0536     unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
0537 
0538     struct net_device *netdev;
0539     struct bpf_prog *xdp_prog;
0540 
0541     unsigned long state;
0542     unsigned int flags;
0543 
0544     unsigned int num_q_vectors;
0545     struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
0546 
0547     /* Interrupt Throttle Rate */
0548     u32 rx_itr_setting;
0549     u32 tx_itr_setting;
0550     u16 tx_itr;
0551     u16 rx_itr;
0552 
0553     /* TX */
0554     u16 tx_work_limit;
0555     u32 tx_timeout_count;
0556     int num_tx_queues;
0557     struct igb_ring *tx_ring[16];
0558 
0559     /* RX */
0560     int num_rx_queues;
0561     struct igb_ring *rx_ring[16];
0562 
0563     u32 max_frame_size;
0564     u32 min_frame_size;
0565 
0566     struct timer_list watchdog_timer;
0567     struct timer_list phy_info_timer;
0568 
0569     u16 mng_vlan_id;
0570     u32 bd_number;
0571     u32 wol;
0572     u32 en_mng_pt;
0573     u16 link_speed;
0574     u16 link_duplex;
0575 
0576     u8 __iomem *io_addr; /* Mainly for iounmap use */
0577 
0578     struct work_struct reset_task;
0579     struct work_struct watchdog_task;
0580     bool fc_autoneg;
0581     u8  tx_timeout_factor;
0582     struct timer_list blink_timer;
0583     unsigned long led_status;
0584 
0585     /* OS defined structs */
0586     struct pci_dev *pdev;
0587 
0588     spinlock_t stats64_lock;
0589     struct rtnl_link_stats64 stats64;
0590 
0591     /* structs defined in e1000_hw.h */
0592     struct e1000_hw hw;
0593     struct e1000_hw_stats stats;
0594     struct e1000_phy_info phy_info;
0595 
0596     u32 test_icr;
0597     struct igb_ring test_tx_ring;
0598     struct igb_ring test_rx_ring;
0599 
0600     int msg_enable;
0601 
0602     struct igb_q_vector *q_vector[MAX_Q_VECTORS];
0603     u32 eims_enable_mask;
0604     u32 eims_other;
0605 
0606     /* to not mess up cache alignment, always add to the bottom */
0607     u16 tx_ring_count;
0608     u16 rx_ring_count;
0609     unsigned int vfs_allocated_count;
0610     struct vf_data_storage *vf_data;
0611     int vf_rate_link_speed;
0612     u32 rss_queues;
0613     u32 wvbr;
0614     u32 *shadow_vfta;
0615 
0616     struct ptp_clock *ptp_clock;
0617     struct ptp_clock_info ptp_caps;
0618     struct delayed_work ptp_overflow_work;
0619     struct work_struct ptp_tx_work;
0620     struct sk_buff *ptp_tx_skb;
0621     struct hwtstamp_config tstamp_config;
0622     unsigned long ptp_tx_start;
0623     unsigned long last_rx_ptp_check;
0624     unsigned long last_rx_timestamp;
0625     unsigned int ptp_flags;
0626     spinlock_t tmreg_lock;
0627     struct cyclecounter cc;
0628     struct timecounter tc;
0629     u32 tx_hwtstamp_timeouts;
0630     u32 tx_hwtstamp_skipped;
0631     u32 rx_hwtstamp_cleared;
0632     bool pps_sys_wrap_on;
0633 
0634     struct ptp_pin_desc sdp_config[IGB_N_SDP];
0635     struct {
0636         struct timespec64 start;
0637         struct timespec64 period;
0638     } perout[IGB_N_PEROUT];
0639 
0640     char fw_version[32];
0641 #ifdef CONFIG_IGB_HWMON
0642     struct hwmon_buff *igb_hwmon_buff;
0643     bool ets;
0644 #endif
0645     struct i2c_algo_bit_data i2c_algo;
0646     struct i2c_adapter i2c_adap;
0647     struct i2c_client *i2c_client;
0648     u32 rss_indir_tbl_init;
0649     u8 rss_indir_tbl[IGB_RETA_SIZE];
0650 
0651     unsigned long link_check_timeout;
0652     int copper_tries;
0653     struct e1000_info ei;
0654     u16 eee_advert;
0655 
0656     /* RX network flow classification support */
0657     struct hlist_head nfc_filter_list;
0658     struct hlist_head cls_flower_list;
0659     unsigned int nfc_filter_count;
0660     /* lock for RX network flow classification filter */
0661     spinlock_t nfc_lock;
0662     bool etype_bitmap[MAX_ETYPE_FILTER];
0663 
0664     struct igb_mac_addr *mac_table;
0665     struct vf_mac_filter vf_macs;
0666     struct vf_mac_filter *vf_mac_list;
0667     /* lock for VF resources */
0668     spinlock_t vfs_lock;
0669 };
0670 
0671 /* flags controlling PTP/1588 function */
0672 #define IGB_PTP_ENABLED     BIT(0)
0673 #define IGB_PTP_OVERFLOW_CHECK  BIT(1)
0674 
0675 #define IGB_FLAG_HAS_MSI        BIT(0)
0676 #define IGB_FLAG_DCA_ENABLED        BIT(1)
0677 #define IGB_FLAG_QUAD_PORT_A        BIT(2)
0678 #define IGB_FLAG_QUEUE_PAIRS        BIT(3)
0679 #define IGB_FLAG_DMAC           BIT(4)
0680 #define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
0681 #define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
0682 #define IGB_FLAG_WOL_SUPPORTED      BIT(8)
0683 #define IGB_FLAG_NEED_LINK_UPDATE   BIT(9)
0684 #define IGB_FLAG_MEDIA_RESET        BIT(10)
0685 #define IGB_FLAG_MAS_CAPABLE        BIT(11)
0686 #define IGB_FLAG_MAS_ENABLE     BIT(12)
0687 #define IGB_FLAG_HAS_MSIX       BIT(13)
0688 #define IGB_FLAG_EEE            BIT(14)
0689 #define IGB_FLAG_VLAN_PROMISC       BIT(15)
0690 #define IGB_FLAG_RX_LEGACY      BIT(16)
0691 #define IGB_FLAG_FQTSS          BIT(17)
0692 
0693 /* Media Auto Sense */
0694 #define IGB_MAS_ENABLE_0        0X0001
0695 #define IGB_MAS_ENABLE_1        0X0002
0696 #define IGB_MAS_ENABLE_2        0X0004
0697 #define IGB_MAS_ENABLE_3        0X0008
0698 
0699 /* DMA Coalescing defines */
0700 #define IGB_MIN_TXPBSIZE    20408
0701 #define IGB_TX_BUF_4096     4096
0702 #define IGB_DMCTLX_DCFLUSH_DIS  0x80000000  /* Disable DMA Coal Flush */
0703 
0704 #define IGB_82576_TSYNC_SHIFT   19
0705 enum e1000_state_t {
0706     __IGB_TESTING,
0707     __IGB_RESETTING,
0708     __IGB_DOWN,
0709     __IGB_PTP_TX_IN_PROGRESS,
0710 };
0711 
0712 enum igb_boards {
0713     board_82575,
0714 };
0715 
0716 extern char igb_driver_name[];
0717 
0718 int igb_xmit_xdp_ring(struct igb_adapter *adapter,
0719               struct igb_ring *ring,
0720               struct xdp_frame *xdpf);
0721 int igb_open(struct net_device *netdev);
0722 int igb_close(struct net_device *netdev);
0723 int igb_up(struct igb_adapter *);
0724 void igb_down(struct igb_adapter *);
0725 void igb_reinit_locked(struct igb_adapter *);
0726 void igb_reset(struct igb_adapter *);
0727 int igb_reinit_queues(struct igb_adapter *);
0728 void igb_write_rss_indir_tbl(struct igb_adapter *);
0729 int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
0730 int igb_setup_tx_resources(struct igb_ring *);
0731 int igb_setup_rx_resources(struct igb_ring *);
0732 void igb_free_tx_resources(struct igb_ring *);
0733 void igb_free_rx_resources(struct igb_ring *);
0734 void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
0735 void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
0736 void igb_setup_tctl(struct igb_adapter *);
0737 void igb_setup_rctl(struct igb_adapter *);
0738 void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
0739 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
0740 void igb_alloc_rx_buffers(struct igb_ring *, u16);
0741 void igb_update_stats(struct igb_adapter *);
0742 bool igb_has_link(struct igb_adapter *adapter);
0743 void igb_set_ethtool_ops(struct net_device *);
0744 void igb_power_up_link(struct igb_adapter *);
0745 void igb_set_fw_version(struct igb_adapter *);
0746 void igb_ptp_init(struct igb_adapter *adapter);
0747 void igb_ptp_stop(struct igb_adapter *adapter);
0748 void igb_ptp_reset(struct igb_adapter *adapter);
0749 void igb_ptp_suspend(struct igb_adapter *adapter);
0750 void igb_ptp_rx_hang(struct igb_adapter *adapter);
0751 void igb_ptp_tx_hang(struct igb_adapter *adapter);
0752 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
0753 int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
0754             ktime_t *timestamp);
0755 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
0756 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
0757 void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
0758 unsigned int igb_get_max_rss_queues(struct igb_adapter *);
0759 #ifdef CONFIG_IGB_HWMON
0760 void igb_sysfs_exit(struct igb_adapter *adapter);
0761 int igb_sysfs_init(struct igb_adapter *adapter);
0762 #endif
0763 static inline s32 igb_reset_phy(struct e1000_hw *hw)
0764 {
0765     if (hw->phy.ops.reset)
0766         return hw->phy.ops.reset(hw);
0767 
0768     return 0;
0769 }
0770 
0771 static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
0772 {
0773     if (hw->phy.ops.read_reg)
0774         return hw->phy.ops.read_reg(hw, offset, data);
0775 
0776     return 0;
0777 }
0778 
0779 static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
0780 {
0781     if (hw->phy.ops.write_reg)
0782         return hw->phy.ops.write_reg(hw, offset, data);
0783 
0784     return 0;
0785 }
0786 
0787 static inline s32 igb_get_phy_info(struct e1000_hw *hw)
0788 {
0789     if (hw->phy.ops.get_phy_info)
0790         return hw->phy.ops.get_phy_info(hw);
0791 
0792     return 0;
0793 }
0794 
0795 static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
0796 {
0797     return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
0798 }
0799 
0800 int igb_add_filter(struct igb_adapter *adapter,
0801            struct igb_nfc_filter *input);
0802 int igb_erase_filter(struct igb_adapter *adapter,
0803              struct igb_nfc_filter *input);
0804 
0805 int igb_add_mac_steering_filter(struct igb_adapter *adapter,
0806                 const u8 *addr, u8 queue, u8 flags);
0807 int igb_del_mac_steering_filter(struct igb_adapter *adapter,
0808                 const u8 *addr, u8 queue, u8 flags);
0809 
0810 #endif /* _IGB_H_ */