Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* Copyright(c) 1999 - 2018 Intel Corporation. */
0003 
0004 #ifndef _IXGBE_H_
0005 #define _IXGBE_H_
0006 
0007 #include <linux/bitops.h>
0008 #include <linux/types.h>
0009 #include <linux/pci.h>
0010 #include <linux/netdevice.h>
0011 #include <linux/cpumask.h>
0012 #include <linux/aer.h>
0013 #include <linux/if_vlan.h>
0014 #include <linux/jiffies.h>
0015 #include <linux/phy.h>
0016 
0017 #include <linux/timecounter.h>
0018 #include <linux/net_tstamp.h>
0019 #include <linux/ptp_clock_kernel.h>
0020 
0021 #include "ixgbe_type.h"
0022 #include "ixgbe_common.h"
0023 #include "ixgbe_dcb.h"
0024 #if IS_ENABLED(CONFIG_FCOE)
0025 #define IXGBE_FCOE
0026 #include "ixgbe_fcoe.h"
0027 #endif /* IS_ENABLED(CONFIG_FCOE) */
0028 #ifdef CONFIG_IXGBE_DCA
0029 #include <linux/dca.h>
0030 #endif
0031 #include "ixgbe_ipsec.h"
0032 
0033 #include <net/xdp.h>
0034 
0035 /* common prefix used by pr_<> macros */
0036 #undef pr_fmt
0037 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0038 
0039 /* TX/RX descriptor defines */
0040 #define IXGBE_DEFAULT_TXD           512
0041 #define IXGBE_DEFAULT_TX_WORK           256
0042 #define IXGBE_MAX_TXD              4096
0043 #define IXGBE_MIN_TXD                64
0044 
0045 #if (PAGE_SIZE < 8192)
0046 #define IXGBE_DEFAULT_RXD           512
0047 #else
0048 #define IXGBE_DEFAULT_RXD           128
0049 #endif
0050 #define IXGBE_MAX_RXD              4096
0051 #define IXGBE_MIN_RXD                64
0052 
0053 /* flow control */
0054 #define IXGBE_MIN_FCRTL            0x40
0055 #define IXGBE_MAX_FCRTL         0x7FF80
0056 #define IXGBE_MIN_FCRTH           0x600
0057 #define IXGBE_MAX_FCRTH         0x7FFF0
0058 #define IXGBE_DEFAULT_FCPAUSE        0xFFFF
0059 #define IXGBE_MIN_FCPAUSE             0
0060 #define IXGBE_MAX_FCPAUSE        0xFFFF
0061 
0062 /* Supported Rx Buffer Sizes */
0063 #define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
0064 #define IXGBE_RXBUFFER_1536  1536
0065 #define IXGBE_RXBUFFER_2K    2048
0066 #define IXGBE_RXBUFFER_3K    3072
0067 #define IXGBE_RXBUFFER_4K    4096
0068 #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
0069 
0070 /* Attempt to maximize the headroom available for incoming frames.  We
0071  * use a 2K buffer for receives and need 1536/1534 to store the data for
0072  * the frame.  This leaves us with 512 bytes of room.  From that we need
0073  * to deduct the space needed for the shared info and the padding needed
0074  * to IP align the frame.
0075  *
0076  * Note: For cache line sizes 256 or larger this value is going to end
0077  *   up negative.  In these cases we should fall back to the 3K
0078  *   buffers.
0079  */
0080 #if (PAGE_SIZE < 8192)
0081 #define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
0082 #define IXGBE_2K_TOO_SMALL_WITH_PADDING \
0083 ((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
0084 
0085 static inline int ixgbe_compute_pad(int rx_buf_len)
0086 {
0087     int page_size, pad_size;
0088 
0089     page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
0090     pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
0091 
0092     return pad_size;
0093 }
0094 
0095 static inline int ixgbe_skb_pad(void)
0096 {
0097     int rx_buf_len;
0098 
0099     /* If a 2K buffer cannot handle a standard Ethernet frame then
0100      * optimize padding for a 3K buffer instead of a 1.5K buffer.
0101      *
0102      * For a 3K buffer we need to add enough padding to allow for
0103      * tailroom due to NET_IP_ALIGN possibly shifting us out of
0104      * cache-line alignment.
0105      */
0106     if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
0107         rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
0108     else
0109         rx_buf_len = IXGBE_RXBUFFER_1536;
0110 
0111     /* if needed make room for NET_IP_ALIGN */
0112     rx_buf_len -= NET_IP_ALIGN;
0113 
0114     return ixgbe_compute_pad(rx_buf_len);
0115 }
0116 
0117 #define IXGBE_SKB_PAD   ixgbe_skb_pad()
0118 #else
0119 #define IXGBE_SKB_PAD   (NET_SKB_PAD + NET_IP_ALIGN)
0120 #endif
0121 
0122 /*
0123  * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
0124  * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
0125  * this adds up to 448 bytes of extra data.
0126  *
0127  * Since netdev_alloc_skb now allocates a page fragment we can use a value
0128  * of 256 and the resultant skb will have a truesize of 960 or less.
0129  */
0130 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
0131 
0132 /* How many Rx Buffers do we bundle into one write to the hardware ? */
0133 #define IXGBE_RX_BUFFER_WRITE   16  /* Must be power of 2 */
0134 
0135 #define IXGBE_RX_DMA_ATTR \
0136     (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
0137 
0138 enum ixgbe_tx_flags {
0139     /* cmd_type flags */
0140     IXGBE_TX_FLAGS_HW_VLAN  = 0x01,
0141     IXGBE_TX_FLAGS_TSO  = 0x02,
0142     IXGBE_TX_FLAGS_TSTAMP   = 0x04,
0143 
0144     /* olinfo flags */
0145     IXGBE_TX_FLAGS_CC   = 0x08,
0146     IXGBE_TX_FLAGS_IPV4 = 0x10,
0147     IXGBE_TX_FLAGS_CSUM = 0x20,
0148     IXGBE_TX_FLAGS_IPSEC    = 0x40,
0149 
0150     /* software defined flags */
0151     IXGBE_TX_FLAGS_SW_VLAN  = 0x80,
0152     IXGBE_TX_FLAGS_FCOE = 0x100,
0153 };
0154 
0155 /* VLAN info */
0156 #define IXGBE_TX_FLAGS_VLAN_MASK    0xffff0000
0157 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
0158 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
0159 #define IXGBE_TX_FLAGS_VLAN_SHIFT   16
0160 
0161 #define IXGBE_MAX_VF_MC_ENTRIES         30
0162 #define IXGBE_MAX_VF_FUNCTIONS          64
0163 #define IXGBE_MAX_VFTA_ENTRIES          128
0164 #define MAX_EMULATION_MAC_ADDRS         16
0165 #define IXGBE_MAX_PF_MACVLANS           15
0166 #define VMDQ_P(p)   ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
0167 #define IXGBE_82599_VF_DEVICE_ID        0x10ED
0168 #define IXGBE_X540_VF_DEVICE_ID         0x1515
0169 
0170 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
0171     {                           \
0172         u32 current_counter = IXGBE_READ_REG(hw, reg);  \
0173         if (current_counter < last_counter)     \
0174             counter += 0x100000000LL;       \
0175         last_counter = current_counter;         \
0176         counter &= 0xFFFFFFFF00000000LL;        \
0177         counter |= current_counter;         \
0178     }
0179 
0180 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
0181     {                                \
0182         u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);   \
0183         u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);   \
0184         u64 current_counter = (current_counter_msb << 32) |  \
0185             current_counter_lsb;                 \
0186         if (current_counter < last_counter)          \
0187             counter += 0x1000000000LL;           \
0188         last_counter = current_counter;              \
0189         counter &= 0xFFFFFFF000000000LL;             \
0190         counter |= current_counter;              \
0191     }
0192 
0193 struct vf_stats {
0194     u64 gprc;
0195     u64 gorc;
0196     u64 gptc;
0197     u64 gotc;
0198     u64 mprc;
0199 };
0200 
0201 struct vf_data_storage {
0202     struct pci_dev *vfdev;
0203     unsigned char vf_mac_addresses[ETH_ALEN];
0204     u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
0205     u16 num_vf_mc_hashes;
0206     bool clear_to_send;
0207     struct vf_stats vfstats;
0208     struct vf_stats last_vfstats;
0209     struct vf_stats saved_rst_vfstats;
0210     bool pf_set_mac;
0211     u16 pf_vlan; /* When set, guest VLAN config not allowed. */
0212     u16 pf_qos;
0213     u16 tx_rate;
0214     int link_enable;
0215     int link_state;
0216     u8 spoofchk_enabled;
0217     bool rss_query_enabled;
0218     u8 trusted;
0219     int xcast_mode;
0220     unsigned int vf_api;
0221     u8 primary_abort_count;
0222 };
0223 
0224 enum ixgbevf_xcast_modes {
0225     IXGBEVF_XCAST_MODE_NONE = 0,
0226     IXGBEVF_XCAST_MODE_MULTI,
0227     IXGBEVF_XCAST_MODE_ALLMULTI,
0228     IXGBEVF_XCAST_MODE_PROMISC,
0229 };
0230 
0231 struct vf_macvlans {
0232     struct list_head l;
0233     int vf;
0234     bool free;
0235     bool is_macvlan;
0236     u8 vf_macvlan[ETH_ALEN];
0237 };
0238 
0239 #define IXGBE_MAX_TXD_PWR   14
0240 #define IXGBE_MAX_DATA_PER_TXD  (1u << IXGBE_MAX_TXD_PWR)
0241 
0242 /* Tx Descriptors needed, worst case */
0243 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
0244 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
0245 
0246 /* wrapper around a pointer to a socket buffer,
0247  * so a DMA handle can be stored along with the buffer */
0248 struct ixgbe_tx_buffer {
0249     union ixgbe_adv_tx_desc *next_to_watch;
0250     unsigned long time_stamp;
0251     union {
0252         struct sk_buff *skb;
0253         struct xdp_frame *xdpf;
0254     };
0255     unsigned int bytecount;
0256     unsigned short gso_segs;
0257     __be16 protocol;
0258     DEFINE_DMA_UNMAP_ADDR(dma);
0259     DEFINE_DMA_UNMAP_LEN(len);
0260     u32 tx_flags;
0261 };
0262 
0263 struct ixgbe_rx_buffer {
0264     union {
0265         struct {
0266             struct sk_buff *skb;
0267             dma_addr_t dma;
0268             struct page *page;
0269             __u32 page_offset;
0270             __u16 pagecnt_bias;
0271         };
0272         struct {
0273             bool discard;
0274             struct xdp_buff *xdp;
0275         };
0276     };
0277 };
0278 
0279 struct ixgbe_queue_stats {
0280     u64 packets;
0281     u64 bytes;
0282 };
0283 
0284 struct ixgbe_tx_queue_stats {
0285     u64 restart_queue;
0286     u64 tx_busy;
0287     u64 tx_done_old;
0288 };
0289 
0290 struct ixgbe_rx_queue_stats {
0291     u64 rsc_count;
0292     u64 rsc_flush;
0293     u64 non_eop_descs;
0294     u64 alloc_rx_page;
0295     u64 alloc_rx_page_failed;
0296     u64 alloc_rx_buff_failed;
0297     u64 csum_err;
0298 };
0299 
0300 #define IXGBE_TS_HDR_LEN 8
0301 
0302 enum ixgbe_ring_state_t {
0303     __IXGBE_RX_3K_BUFFER,
0304     __IXGBE_RX_BUILD_SKB_ENABLED,
0305     __IXGBE_RX_RSC_ENABLED,
0306     __IXGBE_RX_CSUM_UDP_ZERO_ERR,
0307     __IXGBE_RX_FCOE,
0308     __IXGBE_TX_FDIR_INIT_DONE,
0309     __IXGBE_TX_XPS_INIT_DONE,
0310     __IXGBE_TX_DETECT_HANG,
0311     __IXGBE_HANG_CHECK_ARMED,
0312     __IXGBE_TX_XDP_RING,
0313     __IXGBE_TX_DISABLED,
0314 };
0315 
0316 #define ring_uses_build_skb(ring) \
0317     test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
0318 
0319 struct ixgbe_fwd_adapter {
0320     unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
0321     struct net_device *netdev;
0322     unsigned int tx_base_queue;
0323     unsigned int rx_base_queue;
0324     int pool;
0325 };
0326 
0327 #define check_for_tx_hang(ring) \
0328     test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
0329 #define set_check_for_tx_hang(ring) \
0330     set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
0331 #define clear_check_for_tx_hang(ring) \
0332     clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
0333 #define ring_is_rsc_enabled(ring) \
0334     test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
0335 #define set_ring_rsc_enabled(ring) \
0336     set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
0337 #define clear_ring_rsc_enabled(ring) \
0338     clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
0339 #define ring_is_xdp(ring) \
0340     test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
0341 #define set_ring_xdp(ring) \
0342     set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
0343 #define clear_ring_xdp(ring) \
0344     clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
0345 struct ixgbe_ring {
0346     struct ixgbe_ring *next;    /* pointer to next ring in q_vector */
0347     struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
0348     struct net_device *netdev;  /* netdev ring belongs to */
0349     struct bpf_prog *xdp_prog;
0350     struct device *dev;     /* device for DMA mapping */
0351     void *desc;         /* descriptor ring memory */
0352     union {
0353         struct ixgbe_tx_buffer *tx_buffer_info;
0354         struct ixgbe_rx_buffer *rx_buffer_info;
0355     };
0356     unsigned long state;
0357     u8 __iomem *tail;
0358     dma_addr_t dma;         /* phys. address of descriptor ring */
0359     unsigned int size;      /* length in bytes */
0360 
0361     u16 count;          /* amount of descriptors */
0362 
0363     u8 queue_index; /* needed for multiqueue queue management */
0364     u8 reg_idx;         /* holds the special value that gets
0365                      * the hardware register offset
0366                      * associated with this ring, which is
0367                      * different for DCB and RSS modes
0368                      */
0369     u16 next_to_use;
0370     u16 next_to_clean;
0371 
0372     unsigned long last_rx_timestamp;
0373 
0374     union {
0375         u16 next_to_alloc;
0376         struct {
0377             u8 atr_sample_rate;
0378             u8 atr_count;
0379         };
0380     };
0381 
0382     u8 dcb_tc;
0383     struct ixgbe_queue_stats stats;
0384     struct u64_stats_sync syncp;
0385     union {
0386         struct ixgbe_tx_queue_stats tx_stats;
0387         struct ixgbe_rx_queue_stats rx_stats;
0388     };
0389     u16 rx_offset;
0390     struct xdp_rxq_info xdp_rxq;
0391     spinlock_t tx_lock; /* used in XDP mode */
0392     struct xsk_buff_pool *xsk_pool;
0393     u16 ring_idx;       /* {rx,tx,xdp}_ring back reference idx */
0394     u16 rx_buf_len;
0395 } ____cacheline_internodealigned_in_smp;
0396 
0397 enum ixgbe_ring_f_enum {
0398     RING_F_NONE = 0,
0399     RING_F_VMDQ,  /* SR-IOV uses the same ring feature */
0400     RING_F_RSS,
0401     RING_F_FDIR,
0402 #ifdef IXGBE_FCOE
0403     RING_F_FCOE,
0404 #endif /* IXGBE_FCOE */
0405 
0406     RING_F_ARRAY_SIZE      /* must be last in enum set */
0407 };
0408 
0409 #define IXGBE_MAX_RSS_INDICES       16
0410 #define IXGBE_MAX_RSS_INDICES_X550  63
0411 #define IXGBE_MAX_VMDQ_INDICES      64
0412 #define IXGBE_MAX_FDIR_INDICES      63  /* based on q_vector limit */
0413 #define IXGBE_MAX_FCOE_INDICES      8
0414 #define MAX_RX_QUEUES           (IXGBE_MAX_FDIR_INDICES + 1)
0415 #define MAX_TX_QUEUES           (IXGBE_MAX_FDIR_INDICES + 1)
0416 #define IXGBE_MAX_XDP_QS        (IXGBE_MAX_FDIR_INDICES + 1)
0417 #define IXGBE_MAX_L2A_QUEUES        4
0418 #define IXGBE_BAD_L2A_QUEUE     3
0419 #define IXGBE_MAX_MACVLANS      63
0420 
0421 DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
0422 
0423 struct ixgbe_ring_feature {
0424     u16 limit;  /* upper limit on feature indices */
0425     u16 indices;    /* current value of indices */
0426     u16 mask;   /* Mask used for feature to ring mapping */
0427     u16 offset; /* offset to start of feature */
0428 } ____cacheline_internodealigned_in_smp;
0429 
0430 #define IXGBE_82599_VMDQ_8Q_MASK 0x78
0431 #define IXGBE_82599_VMDQ_4Q_MASK 0x7C
0432 #define IXGBE_82599_VMDQ_2Q_MASK 0x7E
0433 
0434 /*
0435  * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
0436  * this is twice the size of a half page we need to double the page order
0437  * for FCoE enabled Rx queues.
0438  */
0439 static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
0440 {
0441     if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
0442         return IXGBE_RXBUFFER_3K;
0443 #if (PAGE_SIZE < 8192)
0444     if (ring_uses_build_skb(ring))
0445         return IXGBE_MAX_2K_FRAME_BUILD_SKB;
0446 #endif
0447     return IXGBE_RXBUFFER_2K;
0448 }
0449 
0450 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
0451 {
0452 #if (PAGE_SIZE < 8192)
0453     if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
0454         return 1;
0455 #endif
0456     return 0;
0457 }
0458 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
0459 
0460 #define IXGBE_ITR_ADAPTIVE_MIN_INC  2
0461 #define IXGBE_ITR_ADAPTIVE_MIN_USECS    10
0462 #define IXGBE_ITR_ADAPTIVE_MAX_USECS    126
0463 #define IXGBE_ITR_ADAPTIVE_LATENCY  0x80
0464 #define IXGBE_ITR_ADAPTIVE_BULK     0x00
0465 
0466 struct ixgbe_ring_container {
0467     struct ixgbe_ring *ring;    /* pointer to linked list of rings */
0468     unsigned long next_update;  /* jiffies value of last update */
0469     unsigned int total_bytes;   /* total bytes processed this int */
0470     unsigned int total_packets; /* total packets processed this int */
0471     u16 work_limit;         /* total work allowed per interrupt */
0472     u8 count;           /* total number of rings in vector */
0473     u8 itr;             /* current ITR setting for ring */
0474 };
0475 
0476 /* iterator for handling rings in ring container */
0477 #define ixgbe_for_each_ring(pos, head) \
0478     for (pos = (head).ring; pos != NULL; pos = pos->next)
0479 
0480 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
0481                   ? 8 : 1)
0482 #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
0483 
0484 /* MAX_Q_VECTORS of these are allocated,
0485  * but we only use one per queue-specific vector.
0486  */
0487 struct ixgbe_q_vector {
0488     struct ixgbe_adapter *adapter;
0489 #ifdef CONFIG_IXGBE_DCA
0490     int cpu;        /* CPU for DCA */
0491 #endif
0492     u16 v_idx;      /* index of q_vector within array, also used for
0493                  * finding the bit in EICR and friends that
0494                  * represents the vector for this ring */
0495     u16 itr;        /* Interrupt throttle rate written to EITR */
0496     struct ixgbe_ring_container rx, tx;
0497 
0498     struct napi_struct napi;
0499     cpumask_t affinity_mask;
0500     int numa_node;
0501     struct rcu_head rcu;    /* to avoid race with update stats on free */
0502     char name[IFNAMSIZ + 9];
0503 
0504     /* for dynamic allocation of rings associated with this q_vector */
0505     struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp;
0506 };
0507 
0508 #ifdef CONFIG_IXGBE_HWMON
0509 
0510 #define IXGBE_HWMON_TYPE_LOC        0
0511 #define IXGBE_HWMON_TYPE_TEMP       1
0512 #define IXGBE_HWMON_TYPE_CAUTION    2
0513 #define IXGBE_HWMON_TYPE_MAX        3
0514 
0515 struct hwmon_attr {
0516     struct device_attribute dev_attr;
0517     struct ixgbe_hw *hw;
0518     struct ixgbe_thermal_diode_data *sensor;
0519     char name[12];
0520 };
0521 
0522 struct hwmon_buff {
0523     struct attribute_group group;
0524     const struct attribute_group *groups[2];
0525     struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
0526     struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
0527     unsigned int n_hwmon;
0528 };
0529 #endif /* CONFIG_IXGBE_HWMON */
0530 
0531 /*
0532  * microsecond values for various ITR rates shifted by 2 to fit itr register
0533  * with the first 3 bits reserved 0
0534  */
0535 #define IXGBE_MIN_RSC_ITR   24
0536 #define IXGBE_100K_ITR      40
0537 #define IXGBE_20K_ITR       200
0538 #define IXGBE_12K_ITR       336
0539 
0540 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
0541 static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
0542                     const u32 stat_err_bits)
0543 {
0544     return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
0545 }
0546 
0547 static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
0548 {
0549     u16 ntc = ring->next_to_clean;
0550     u16 ntu = ring->next_to_use;
0551 
0552     return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
0553 }
0554 
0555 #define IXGBE_RX_DESC(R, i)     \
0556     (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
0557 #define IXGBE_TX_DESC(R, i)     \
0558     (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
0559 #define IXGBE_TX_CTXTDESC(R, i)     \
0560     (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
0561 
0562 #define IXGBE_MAX_JUMBO_FRAME_SIZE  9728 /* Maximum Supported Size 9.5KB */
0563 #ifdef IXGBE_FCOE
0564 /* Use 3K as the baby jumbo frame size for FCoE */
0565 #define IXGBE_FCOE_JUMBO_FRAME_SIZE       3072
0566 #endif /* IXGBE_FCOE */
0567 
0568 #define OTHER_VECTOR 1
0569 #define NON_Q_VECTORS (OTHER_VECTOR)
0570 
0571 #define MAX_MSIX_VECTORS_82599 64
0572 #define MAX_Q_VECTORS_82599 64
0573 #define MAX_MSIX_VECTORS_82598 18
0574 #define MAX_Q_VECTORS_82598 16
0575 
0576 struct ixgbe_mac_addr {
0577     u8 addr[ETH_ALEN];
0578     u16 pool;
0579     u16 state; /* bitmask */
0580 };
0581 
0582 #define IXGBE_MAC_STATE_DEFAULT     0x1
0583 #define IXGBE_MAC_STATE_MODIFIED    0x2
0584 #define IXGBE_MAC_STATE_IN_USE      0x4
0585 
0586 #define MAX_Q_VECTORS MAX_Q_VECTORS_82599
0587 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
0588 
0589 #define MIN_MSIX_Q_VECTORS 1
0590 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
0591 
0592 /* default to trying for four seconds */
0593 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
0594 #define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
0595 
0596 #define IXGBE_PRIMARY_ABORT_LIMIT   5
0597 
0598 /* board specific private data structure */
0599 struct ixgbe_adapter {
0600     unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
0601     /* OS defined structs */
0602     struct net_device *netdev;
0603     struct bpf_prog *xdp_prog;
0604     struct pci_dev *pdev;
0605     struct mii_bus *mii_bus;
0606 
0607     unsigned long state;
0608 
0609     /* Some features need tri-state capability,
0610      * thus the additional *_CAPABLE flags.
0611      */
0612     u32 flags;
0613 #define IXGBE_FLAG_MSI_ENABLED          BIT(1)
0614 #define IXGBE_FLAG_MSIX_ENABLED         BIT(3)
0615 #define IXGBE_FLAG_RX_1BUF_CAPABLE      BIT(4)
0616 #define IXGBE_FLAG_RX_PS_CAPABLE        BIT(5)
0617 #define IXGBE_FLAG_RX_PS_ENABLED        BIT(6)
0618 #define IXGBE_FLAG_DCA_ENABLED          BIT(8)
0619 #define IXGBE_FLAG_DCA_CAPABLE          BIT(9)
0620 #define IXGBE_FLAG_IMIR_ENABLED         BIT(10)
0621 #define IXGBE_FLAG_MQ_CAPABLE           BIT(11)
0622 #define IXGBE_FLAG_DCB_ENABLED          BIT(12)
0623 #define IXGBE_FLAG_VMDQ_CAPABLE         BIT(13)
0624 #define IXGBE_FLAG_VMDQ_ENABLED         BIT(14)
0625 #define IXGBE_FLAG_FAN_FAIL_CAPABLE     BIT(15)
0626 #define IXGBE_FLAG_NEED_LINK_UPDATE     BIT(16)
0627 #define IXGBE_FLAG_NEED_LINK_CONFIG     BIT(17)
0628 #define IXGBE_FLAG_FDIR_HASH_CAPABLE        BIT(18)
0629 #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE     BIT(19)
0630 #define IXGBE_FLAG_FCOE_CAPABLE         BIT(20)
0631 #define IXGBE_FLAG_FCOE_ENABLED         BIT(21)
0632 #define IXGBE_FLAG_SRIOV_CAPABLE        BIT(22)
0633 #define IXGBE_FLAG_SRIOV_ENABLED        BIT(23)
0634 #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED      BIT(25)
0635 #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER  BIT(26)
0636 #define IXGBE_FLAG_DCB_CAPABLE          BIT(27)
0637 
0638     u32 flags2;
0639 #define IXGBE_FLAG2_RSC_CAPABLE         BIT(0)
0640 #define IXGBE_FLAG2_RSC_ENABLED         BIT(1)
0641 #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE     BIT(2)
0642 #define IXGBE_FLAG2_TEMP_SENSOR_EVENT       BIT(3)
0643 #define IXGBE_FLAG2_SEARCH_FOR_SFP      BIT(4)
0644 #define IXGBE_FLAG2_SFP_NEEDS_RESET     BIT(5)
0645 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT    BIT(7)
0646 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP      BIT(8)
0647 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP      BIT(9)
0648 #define IXGBE_FLAG2_PTP_PPS_ENABLED     BIT(10)
0649 #define IXGBE_FLAG2_PHY_INTERRUPT       BIT(11)
0650 #define IXGBE_FLAG2_VLAN_PROMISC        BIT(13)
0651 #define IXGBE_FLAG2_EEE_CAPABLE         BIT(14)
0652 #define IXGBE_FLAG2_EEE_ENABLED         BIT(15)
0653 #define IXGBE_FLAG2_RX_LEGACY           BIT(16)
0654 #define IXGBE_FLAG2_IPSEC_ENABLED       BIT(17)
0655 #define IXGBE_FLAG2_VF_IPSEC_ENABLED        BIT(18)
0656 #define IXGBE_FLAG2_AUTO_DISABLE_VF     BIT(19)
0657 
0658     /* Tx fast path data */
0659     int num_tx_queues;
0660     u16 tx_itr_setting;
0661     u16 tx_work_limit;
0662     u64 tx_ipsec;
0663 
0664     /* Rx fast path data */
0665     int num_rx_queues;
0666     u16 rx_itr_setting;
0667     u64 rx_ipsec;
0668 
0669     /* Port number used to identify VXLAN traffic */
0670     __be16 vxlan_port;
0671     __be16 geneve_port;
0672 
0673     /* XDP */
0674     int num_xdp_queues;
0675     struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS];
0676     unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
0677 
0678     /* TX */
0679     struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
0680 
0681     u64 restart_queue;
0682     u64 lsc_int;
0683     u32 tx_timeout_count;
0684 
0685     /* RX */
0686     struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
0687     int num_rx_pools;       /* == num_rx_queues in 82598 */
0688     int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
0689     u64 hw_csum_rx_error;
0690     u64 hw_rx_no_dma_resources;
0691     u64 rsc_total_count;
0692     u64 rsc_total_flush;
0693     u64 non_eop_descs;
0694     u32 alloc_rx_page;
0695     u32 alloc_rx_page_failed;
0696     u32 alloc_rx_buff_failed;
0697 
0698     struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
0699 
0700     /* DCB parameters */
0701     struct ieee_pfc *ixgbe_ieee_pfc;
0702     struct ieee_ets *ixgbe_ieee_ets;
0703     struct ixgbe_dcb_config dcb_cfg;
0704     struct ixgbe_dcb_config temp_dcb_cfg;
0705     u8 hw_tcs;
0706     u8 dcb_set_bitmap;
0707     u8 dcbx_cap;
0708     enum ixgbe_fc_mode last_lfc_mode;
0709 
0710     int num_q_vectors;  /* current number of q_vectors for device */
0711     int max_q_vectors;  /* true count of q_vectors for device */
0712     struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
0713     struct msix_entry *msix_entries;
0714 
0715     u32 test_icr;
0716     struct ixgbe_ring test_tx_ring;
0717     struct ixgbe_ring test_rx_ring;
0718 
0719     /* structs defined in ixgbe_hw.h */
0720     struct ixgbe_hw hw;
0721     u16 msg_enable;
0722     struct ixgbe_hw_stats stats;
0723 
0724     u64 tx_busy;
0725     unsigned int tx_ring_count;
0726     unsigned int xdp_ring_count;
0727     unsigned int rx_ring_count;
0728 
0729     u32 link_speed;
0730     bool link_up;
0731     unsigned long sfp_poll_time;
0732     unsigned long link_check_timeout;
0733 
0734     struct timer_list service_timer;
0735     struct work_struct service_task;
0736 
0737     struct hlist_head fdir_filter_list;
0738     unsigned long fdir_overflow; /* number of times ATR was backed off */
0739     union ixgbe_atr_input fdir_mask;
0740     int fdir_filter_count;
0741     u32 fdir_pballoc;
0742     u32 atr_sample_rate;
0743     spinlock_t fdir_perfect_lock;
0744 
0745 #ifdef IXGBE_FCOE
0746     struct ixgbe_fcoe fcoe;
0747 #endif /* IXGBE_FCOE */
0748     u8 __iomem *io_addr; /* Mainly for iounmap use */
0749     u32 wol;
0750 
0751     u16 bridge_mode;
0752 
0753     char eeprom_id[NVM_VER_SIZE];
0754     u16 eeprom_cap;
0755 
0756     u32 interrupt_event;
0757     u32 led_reg;
0758 
0759     struct ptp_clock *ptp_clock;
0760     struct ptp_clock_info ptp_caps;
0761     struct work_struct ptp_tx_work;
0762     struct sk_buff *ptp_tx_skb;
0763     struct hwtstamp_config tstamp_config;
0764     unsigned long ptp_tx_start;
0765     unsigned long last_overflow_check;
0766     unsigned long last_rx_ptp_check;
0767     unsigned long last_rx_timestamp;
0768     spinlock_t tmreg_lock;
0769     struct cyclecounter hw_cc;
0770     struct timecounter hw_tc;
0771     u32 base_incval;
0772     u32 tx_hwtstamp_timeouts;
0773     u32 tx_hwtstamp_skipped;
0774     u32 rx_hwtstamp_cleared;
0775     void (*ptp_setup_sdp)(struct ixgbe_adapter *);
0776 
0777     /* SR-IOV */
0778     DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
0779     unsigned int num_vfs;
0780     struct vf_data_storage *vfinfo;
0781     int vf_rate_link_speed;
0782     struct vf_macvlans vf_mvs;
0783     struct vf_macvlans *mv_list;
0784 
0785     u32 timer_event_accumulator;
0786     u32 vferr_refcount;
0787     struct ixgbe_mac_addr *mac_table;
0788     struct kobject *info_kobj;
0789 #ifdef CONFIG_IXGBE_HWMON
0790     struct hwmon_buff *ixgbe_hwmon_buff;
0791 #endif /* CONFIG_IXGBE_HWMON */
0792 #ifdef CONFIG_DEBUG_FS
0793     struct dentry *ixgbe_dbg_adapter;
0794 #endif /*CONFIG_DEBUG_FS*/
0795 
0796     u8 default_up;
0797     /* Bitmask indicating in use pools */
0798     DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
0799 
0800 #define IXGBE_MAX_LINK_HANDLE 10
0801     struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
0802     unsigned long tables;
0803 
0804 /* maximum number of RETA entries among all devices supported by ixgbe
0805  * driver: currently it's x550 device in non-SRIOV mode
0806  */
0807 #define IXGBE_MAX_RETA_ENTRIES 512
0808     u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
0809 
0810 #define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
0811     u32 *rss_key;
0812 
0813 #ifdef CONFIG_IXGBE_IPSEC
0814     struct ixgbe_ipsec *ipsec;
0815 #endif /* CONFIG_IXGBE_IPSEC */
0816     spinlock_t vfs_lock;
0817 };
0818 
0819 static inline int ixgbe_determine_xdp_q_idx(int cpu)
0820 {
0821     if (static_key_enabled(&ixgbe_xdp_locking_key))
0822         return cpu % IXGBE_MAX_XDP_QS;
0823     else
0824         return cpu;
0825 }
0826 
0827 static inline
0828 struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
0829 {
0830     int index = ixgbe_determine_xdp_q_idx(smp_processor_id());
0831 
0832     return adapter->xdp_ring[index];
0833 }
0834 
0835 static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
0836 {
0837     switch (adapter->hw.mac.type) {
0838     case ixgbe_mac_82598EB:
0839     case ixgbe_mac_82599EB:
0840     case ixgbe_mac_X540:
0841         return IXGBE_MAX_RSS_INDICES;
0842     case ixgbe_mac_X550:
0843     case ixgbe_mac_X550EM_x:
0844     case ixgbe_mac_x550em_a:
0845         return IXGBE_MAX_RSS_INDICES_X550;
0846     default:
0847         return 0;
0848     }
0849 }
0850 
0851 struct ixgbe_fdir_filter {
0852     struct hlist_node fdir_node;
0853     union ixgbe_atr_input filter;
0854     u16 sw_idx;
0855     u64 action;
0856 };
0857 
0858 enum ixgbe_state_t {
0859     __IXGBE_TESTING,
0860     __IXGBE_RESETTING,
0861     __IXGBE_DOWN,
0862     __IXGBE_DISABLED,
0863     __IXGBE_REMOVING,
0864     __IXGBE_SERVICE_SCHED,
0865     __IXGBE_SERVICE_INITED,
0866     __IXGBE_IN_SFP_INIT,
0867     __IXGBE_PTP_RUNNING,
0868     __IXGBE_PTP_TX_IN_PROGRESS,
0869     __IXGBE_RESET_REQUESTED,
0870 };
0871 
0872 struct ixgbe_cb {
0873     union {             /* Union defining head/tail partner */
0874         struct sk_buff *head;
0875         struct sk_buff *tail;
0876     };
0877     dma_addr_t dma;
0878     u16 append_cnt;
0879     bool page_released;
0880 };
0881 #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
0882 
0883 enum ixgbe_boards {
0884     board_82598,
0885     board_82599,
0886     board_X540,
0887     board_X550,
0888     board_X550EM_x,
0889     board_x550em_x_fw,
0890     board_x550em_a,
0891     board_x550em_a_fw,
0892 };
0893 
0894 extern const struct ixgbe_info ixgbe_82598_info;
0895 extern const struct ixgbe_info ixgbe_82599_info;
0896 extern const struct ixgbe_info ixgbe_X540_info;
0897 extern const struct ixgbe_info ixgbe_X550_info;
0898 extern const struct ixgbe_info ixgbe_X550EM_x_info;
0899 extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
0900 extern const struct ixgbe_info ixgbe_x550em_a_info;
0901 extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
0902 #ifdef CONFIG_IXGBE_DCB
0903 extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
0904 #endif
0905 
0906 extern char ixgbe_driver_name[];
0907 #ifdef IXGBE_FCOE
0908 extern char ixgbe_default_device_descr[];
0909 #endif /* IXGBE_FCOE */
0910 
0911 int ixgbe_open(struct net_device *netdev);
0912 int ixgbe_close(struct net_device *netdev);
0913 void ixgbe_up(struct ixgbe_adapter *adapter);
0914 void ixgbe_down(struct ixgbe_adapter *adapter);
0915 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
0916 void ixgbe_reset(struct ixgbe_adapter *adapter);
0917 void ixgbe_set_ethtool_ops(struct net_device *netdev);
0918 int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
0919 int ixgbe_setup_tx_resources(struct ixgbe_ring *);
0920 void ixgbe_free_rx_resources(struct ixgbe_ring *);
0921 void ixgbe_free_tx_resources(struct ixgbe_ring *);
0922 void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
0923 void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
0924 void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
0925 void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
0926 void ixgbe_update_stats(struct ixgbe_adapter *adapter);
0927 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
0928 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
0929              u16 subdevice_id);
0930 #ifdef CONFIG_PCI_IOV
0931 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
0932 #endif
0933 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
0934              const u8 *addr, u16 queue);
0935 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
0936              const u8 *addr, u16 queue);
0937 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
0938 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
0939 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
0940                   struct ixgbe_ring *);
0941 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
0942                       struct ixgbe_tx_buffer *);
0943 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
0944 void ixgbe_write_eitr(struct ixgbe_q_vector *);
0945 int ixgbe_poll(struct napi_struct *napi, int budget);
0946 int ethtool_ioctl(struct ifreq *ifr);
0947 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
0948 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
0949 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
0950 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
0951                       union ixgbe_atr_hash_dword input,
0952                       union ixgbe_atr_hash_dword common,
0953                       u8 queue);
0954 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
0955                     union ixgbe_atr_input *input_mask);
0956 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
0957                       union ixgbe_atr_input *input,
0958                       u16 soft_id, u8 queue);
0959 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
0960                       union ixgbe_atr_input *input,
0961                       u16 soft_id);
0962 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
0963                       union ixgbe_atr_input *mask);
0964 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
0965                     struct ixgbe_fdir_filter *input,
0966                     u16 sw_idx);
0967 void ixgbe_set_rx_mode(struct net_device *netdev);
0968 #ifdef CONFIG_IXGBE_DCB
0969 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
0970 #endif
0971 int ixgbe_setup_tc(struct net_device *dev, u8 tc);
0972 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
0973 void ixgbe_do_reset(struct net_device *netdev);
0974 #ifdef CONFIG_IXGBE_HWMON
0975 void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
0976 int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
0977 #endif /* CONFIG_IXGBE_HWMON */
0978 #ifdef IXGBE_FCOE
0979 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
0980 int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
0981           u8 *hdr_len);
0982 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
0983            union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
0984 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
0985                struct scatterlist *sgl, unsigned int sgc);
0986 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
0987               struct scatterlist *sgl, unsigned int sgc);
0988 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
0989 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
0990 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
0991 int ixgbe_fcoe_enable(struct net_device *netdev);
0992 int ixgbe_fcoe_disable(struct net_device *netdev);
0993 #ifdef CONFIG_IXGBE_DCB
0994 u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
0995 u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
0996 #endif /* CONFIG_IXGBE_DCB */
0997 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
0998 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
0999                struct netdev_fcoe_hbainfo *info);
1000 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
1001 #endif /* IXGBE_FCOE */
1002 #ifdef CONFIG_DEBUG_FS
1003 void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
1004 void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
1005 void ixgbe_dbg_init(void);
1006 void ixgbe_dbg_exit(void);
1007 #else
1008 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
1009 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
1010 static inline void ixgbe_dbg_init(void) {}
1011 static inline void ixgbe_dbg_exit(void) {}
1012 #endif /* CONFIG_DEBUG_FS */
1013 static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
1014 {
1015     return netdev_get_tx_queue(ring->netdev, ring->queue_index);
1016 }
1017 
1018 void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
1019 void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
1020 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
1021 void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
1022 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
1023 void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
1024 void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
1025 void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
1026 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
1027                      union ixgbe_adv_rx_desc *rx_desc,
1028                      struct sk_buff *skb)
1029 {
1030     if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) {
1031         ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
1032         return;
1033     }
1034 
1035     if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1036         return;
1037 
1038     ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
1039 
1040     /* Update the last_rx_timestamp timer in order to enable watchdog check
1041      * for error case of latched timestamp on a dropped packet.
1042      */
1043     rx_ring->last_rx_timestamp = jiffies;
1044 }
1045 
1046 int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
1047 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
1048 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
1049 void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
1050 void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
1051 #ifdef CONFIG_PCI_IOV
1052 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
1053 #endif
1054 
1055 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
1056                   struct ixgbe_adapter *adapter,
1057                   struct ixgbe_ring *tx_ring);
1058 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
1059 void ixgbe_store_key(struct ixgbe_adapter *adapter);
1060 void ixgbe_store_reta(struct ixgbe_adapter *adapter);
1061 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1062                u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
1063 #ifdef CONFIG_IXGBE_IPSEC
1064 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
1065 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
1066 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
1067 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1068             union ixgbe_adv_rx_desc *rx_desc,
1069             struct sk_buff *skb);
1070 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
1071            struct ixgbe_ipsec_tx_data *itd);
1072 void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf);
1073 int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1074 int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1075 #else
1076 static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }
1077 static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }
1078 static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }
1079 static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1080                   union ixgbe_adv_rx_desc *rx_desc,
1081                   struct sk_buff *skb) { }
1082 static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
1083                  struct ixgbe_tx_buffer *first,
1084                  struct ixgbe_ipsec_tx_data *itd) { return 0; }
1085 static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter,
1086                     u32 vf) { }
1087 static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
1088                     u32 *mbuf, u32 vf) { return -EACCES; }
1089 static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
1090                     u32 *mbuf, u32 vf) { return -EACCES; }
1091 #endif /* CONFIG_IXGBE_IPSEC */
1092 
1093 static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter)
1094 {
1095     return !!adapter->xdp_prog;
1096 }
1097 
1098 #endif /* _IXGBE_H_ */