Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
0002 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
0003 
0004 /*
0005  * nfp_net.h
0006  * Declarations for Netronome network device driver.
0007  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
0008  *          Jason McMullan <jason.mcmullan@netronome.com>
0009  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
0010  */
0011 
0012 #ifndef _NFP_NET_H_
0013 #define _NFP_NET_H_
0014 
0015 #include <linux/atomic.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/list.h>
0018 #include <linux/netdevice.h>
0019 #include <linux/pci.h>
0020 #include <linux/dim.h>
0021 #include <linux/io-64-nonatomic-hi-lo.h>
0022 #include <linux/semaphore.h>
0023 #include <linux/workqueue.h>
0024 #include <net/xdp.h>
0025 
0026 #include "nfp_net_ctrl.h"
0027 
0028 #define nn_pr(nn, lvl, fmt, args...)                    \
0029     ({                              \
0030         struct nfp_net *__nn = (nn);                \
0031                                     \
0032         if (__nn->dp.netdev)                    \
0033             netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \
0034         else                            \
0035             dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \
0036     })
0037 
0038 #define nn_err(nn, fmt, args...)    nn_pr(nn, KERN_ERR, fmt, ## args)
0039 #define nn_warn(nn, fmt, args...)   nn_pr(nn, KERN_WARNING, fmt, ## args)
0040 #define nn_info(nn, fmt, args...)   nn_pr(nn, KERN_INFO, fmt, ## args)
0041 #define nn_dbg(nn, fmt, args...)    nn_pr(nn, KERN_DEBUG, fmt, ## args)
0042 
0043 #define nn_dp_warn(dp, fmt, args...)                    \
0044     ({                              \
0045         struct nfp_net_dp *__dp = (dp);             \
0046                                     \
0047         if (unlikely(net_ratelimit())) {            \
0048             if (__dp->netdev)               \
0049                 netdev_warn(__dp->netdev, fmt, ## args); \
0050             else                        \
0051                 dev_warn(__dp->dev, fmt, ## args);  \
0052         }                           \
0053     })
0054 
0055 /* Max time to wait for NFP to respond on updates (in seconds) */
0056 #define NFP_NET_POLL_TIMEOUT    5
0057 
0058 /* Interval for reading offloaded filter stats */
0059 #define NFP_NET_STAT_POLL_IVL   msecs_to_jiffies(100)
0060 
0061 /* Bar allocation */
0062 #define NFP_NET_CTRL_BAR    0
0063 #define NFP_NET_Q0_BAR      2
0064 #define NFP_NET_Q1_BAR      4   /* OBSOLETE */
0065 
0066 /* Default size for MTU and freelist buffer sizes */
0067 #define NFP_NET_DEFAULT_MTU     1500U
0068 
0069 /* Maximum number of bytes prepended to a packet */
0070 #define NFP_NET_MAX_PREPEND     64
0071 
0072 /* Interrupt definitions */
0073 #define NFP_NET_NON_Q_VECTORS       2
0074 #define NFP_NET_IRQ_LSC_IDX     0
0075 #define NFP_NET_IRQ_EXN_IDX     1
0076 #define NFP_NET_MIN_VNIC_IRQS       (NFP_NET_NON_Q_VECTORS + 1)
0077 
0078 /* Queue/Ring definitions */
0079 #define NFP_NET_MAX_TX_RINGS    64  /* Max. # of Tx rings per device */
0080 #define NFP_NET_MAX_RX_RINGS    64  /* Max. # of Rx rings per device */
0081 #define NFP_NET_MAX_R_VECS  (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
0082                  NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
0083 #define NFP_NET_MAX_IRQS    (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
0084 
0085 #define NFP_NET_TX_DESCS_DEFAULT 4096   /* Default # of Tx descs per ring */
0086 #define NFP_NET_RX_DESCS_DEFAULT 4096   /* Default # of Rx descs per ring */
0087 
0088 #define NFP_NET_FL_BATCH    16  /* Add freelist in this Batch size */
0089 #define NFP_NET_XDP_MAX_COMPLETE 2048   /* XDP bufs to reclaim in NAPI poll */
0090 
0091 /* Offload definitions */
0092 #define NFP_NET_N_VXLAN_PORTS   (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
0093 
0094 #define NFP_NET_RX_BUF_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
0095 #define NFP_NET_RX_BUF_NON_DATA (NFP_NET_RX_BUF_HEADROOM +      \
0096                  SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
0097 
0098 /* Forward declarations */
0099 struct nfp_cpp;
0100 struct nfp_dev_info;
0101 struct nfp_dp_ops;
0102 struct nfp_eth_table_port;
0103 struct nfp_net;
0104 struct nfp_net_r_vector;
0105 struct nfp_port;
0106 struct xsk_buff_pool;
0107 
0108 struct nfp_nfd3_tx_desc;
0109 struct nfp_nfd3_tx_buf;
0110 
0111 struct nfp_nfdk_tx_desc;
0112 struct nfp_nfdk_tx_buf;
0113 
0114 /* Convenience macro for wrapping descriptor index on ring size */
0115 #define D_IDX(ring, idx)    ((idx) & ((ring)->cnt - 1))
0116 
0117 /* Convenience macro for writing dma address into RX/TX descriptors */
0118 #define nfp_desc_set_dma_addr_40b(desc, dma_addr)           \
0119     do {                                \
0120         __typeof__(desc) __d = (desc);              \
0121         dma_addr_t __addr = (dma_addr);             \
0122                                     \
0123         __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr));  \
0124         __d->dma_addr_hi = upper_32_bits(__addr) & 0xff;    \
0125     } while (0)
0126 
0127 #define nfp_desc_set_dma_addr_48b(desc, dma_addr)           \
0128     do {                                \
0129         __typeof__(desc) __d = (desc);              \
0130         dma_addr_t __addr = (dma_addr);             \
0131                                     \
0132         __d->dma_addr_hi = cpu_to_le16(upper_32_bits(__addr));  \
0133         __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr));  \
0134     } while (0)
0135 
0136 /**
0137  * struct nfp_net_tx_ring - TX ring structure
0138  * @r_vec:      Back pointer to ring vector structure
0139  * @idx:        Ring index from Linux's perspective
0140  * @data_pending: number of bytes added to current block (NFDK only)
0141  * @qcp_q:      Pointer to base of the QCP TX queue
0142  * @txrwb:  TX pointer write back area
0143  * @cnt:        Size of the queue in number of descriptors
0144  * @wr_p:       TX ring write pointer (free running)
0145  * @rd_p:       TX ring read pointer (free running)
0146  * @qcp_rd_p:   Local copy of QCP TX queue read pointer
0147  * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
0148  *      (used for .xmit_more delayed kick)
0149  * @txbufs: Array of transmitted TX buffers, to free on transmit (NFD3)
0150  * @ktxbufs:    Array of transmitted TX buffers, to free on transmit (NFDK)
0151  * @txds:   Virtual address of TX ring in host memory (NFD3)
0152  * @ktxds:  Virtual address of TX ring in host memory (NFDK)
0153  *
0154  * @qcidx:      Queue Controller Peripheral (QCP) queue index for the TX queue
0155  * @dma:        DMA address of the TX ring
0156  * @size:       Size, in bytes, of the TX ring (needed to free)
0157  * @is_xdp: Is this a XDP TX ring?
0158  */
0159 struct nfp_net_tx_ring {
0160     struct nfp_net_r_vector *r_vec;
0161 
0162     u16 idx;
0163     u16 data_pending;
0164     u8 __iomem *qcp_q;
0165     u64 *txrwb;
0166 
0167     u32 cnt;
0168     u32 wr_p;
0169     u32 rd_p;
0170     u32 qcp_rd_p;
0171 
0172     u32 wr_ptr_add;
0173 
0174     union {
0175         struct nfp_nfd3_tx_buf *txbufs;
0176         struct nfp_nfdk_tx_buf *ktxbufs;
0177     };
0178     union {
0179         struct nfp_nfd3_tx_desc *txds;
0180         struct nfp_nfdk_tx_desc *ktxds;
0181     };
0182 
0183     /* Cold data follows */
0184     int qcidx;
0185 
0186     dma_addr_t dma;
0187     size_t size;
0188     bool is_xdp;
0189 } ____cacheline_aligned;
0190 
0191 /* RX and freelist descriptor format */
0192 
0193 #define PCIE_DESC_RX_DD         BIT(7)
0194 #define PCIE_DESC_RX_META_LEN_MASK  GENMASK(6, 0)
0195 
0196 /* Flags in the RX descriptor */
0197 #define PCIE_DESC_RX_RSS        cpu_to_le16(BIT(15))
0198 #define PCIE_DESC_RX_I_IP4_CSUM     cpu_to_le16(BIT(14))
0199 #define PCIE_DESC_RX_I_IP4_CSUM_OK  cpu_to_le16(BIT(13))
0200 #define PCIE_DESC_RX_I_TCP_CSUM     cpu_to_le16(BIT(12))
0201 #define PCIE_DESC_RX_I_TCP_CSUM_OK  cpu_to_le16(BIT(11))
0202 #define PCIE_DESC_RX_I_UDP_CSUM     cpu_to_le16(BIT(10))
0203 #define PCIE_DESC_RX_I_UDP_CSUM_OK  cpu_to_le16(BIT(9))
0204 #define PCIE_DESC_RX_DECRYPTED      cpu_to_le16(BIT(8))
0205 #define PCIE_DESC_RX_EOP        cpu_to_le16(BIT(7))
0206 #define PCIE_DESC_RX_IP4_CSUM       cpu_to_le16(BIT(6))
0207 #define PCIE_DESC_RX_IP4_CSUM_OK    cpu_to_le16(BIT(5))
0208 #define PCIE_DESC_RX_TCP_CSUM       cpu_to_le16(BIT(4))
0209 #define PCIE_DESC_RX_TCP_CSUM_OK    cpu_to_le16(BIT(3))
0210 #define PCIE_DESC_RX_UDP_CSUM       cpu_to_le16(BIT(2))
0211 #define PCIE_DESC_RX_UDP_CSUM_OK    cpu_to_le16(BIT(1))
0212 #define PCIE_DESC_RX_VLAN       cpu_to_le16(BIT(0))
0213 
0214 #define PCIE_DESC_RX_CSUM_ALL       (PCIE_DESC_RX_IP4_CSUM |    \
0215                      PCIE_DESC_RX_TCP_CSUM |    \
0216                      PCIE_DESC_RX_UDP_CSUM |    \
0217                      PCIE_DESC_RX_I_IP4_CSUM |  \
0218                      PCIE_DESC_RX_I_TCP_CSUM |  \
0219                      PCIE_DESC_RX_I_UDP_CSUM)
0220 #define PCIE_DESC_RX_CSUM_OK_SHIFT  1
0221 #define __PCIE_DESC_RX_CSUM_ALL     le16_to_cpu(PCIE_DESC_RX_CSUM_ALL)
0222 #define __PCIE_DESC_RX_CSUM_ALL_OK  (__PCIE_DESC_RX_CSUM_ALL >> \
0223                      PCIE_DESC_RX_CSUM_OK_SHIFT)
0224 
0225 struct nfp_net_rx_desc {
0226     union {
0227         struct {
0228             __le16 dma_addr_hi; /* High bits of the buf address */
0229             u8 reserved; /* Must be zero */
0230             u8 meta_len_dd; /* Must be zero */
0231 
0232             __le32 dma_addr_lo; /* Low bits of the buffer address */
0233         } __packed fld;
0234 
0235         struct {
0236             __le16 data_len; /* Length of the frame + meta data */
0237             u8 reserved;
0238             u8 meta_len_dd; /* Length of meta data prepended +
0239                      * descriptor done flag.
0240                      */
0241 
0242             __le16 flags;   /* RX flags. See @PCIE_DESC_RX_* */
0243             __le16 vlan;    /* VLAN if stripped */
0244         } __packed rxd;
0245 
0246         __le32 vals[2];
0247     };
0248 };
0249 
0250 #define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
0251 #define NFP_NET_VLAN_CTAG   0
0252 #define NFP_NET_VLAN_STAG   1
0253 
0254 struct nfp_meta_parsed {
0255     u8 hash_type;
0256     u8 csum_type;
0257     u32 hash;
0258     u32 mark;
0259     u32 portid;
0260     __wsum csum;
0261     struct {
0262         bool stripped;
0263         u8 tpid;
0264         u16 tci;
0265     } vlan;
0266 };
0267 
0268 struct nfp_net_rx_hash {
0269     __be32 hash_type;
0270     __be32 hash;
0271 };
0272 
0273 /**
0274  * struct nfp_net_rx_buf - software RX buffer descriptor
0275  * @frag:   page fragment buffer
0276  * @dma_addr:   DMA mapping address of the buffer
0277  */
0278 struct nfp_net_rx_buf {
0279     void *frag;
0280     dma_addr_t dma_addr;
0281 };
0282 
0283 /**
0284  * struct nfp_net_xsk_rx_buf - software RX XSK buffer descriptor
0285  * @dma_addr:   DMA mapping address of the buffer
0286  * @xdp:    XSK buffer pool handle (for AF_XDP)
0287  */
0288 struct nfp_net_xsk_rx_buf {
0289     dma_addr_t dma_addr;
0290     struct xdp_buff *xdp;
0291 };
0292 
0293 /**
0294  * struct nfp_net_rx_ring - RX ring structure
0295  * @r_vec:      Back pointer to ring vector structure
0296  * @cnt:        Size of the queue in number of descriptors
0297  * @wr_p:       FL/RX ring write pointer (free running)
0298  * @rd_p:       FL/RX ring read pointer (free running)
0299  * @idx:        Ring index from Linux's perspective
0300  * @fl_qcidx:   Queue Controller Peripheral (QCP) queue index for the freelist
0301  * @qcp_fl:     Pointer to base of the QCP freelist queue
0302  * @rxbufs:     Array of transmitted FL/RX buffers
0303  * @xsk_rxbufs: Array of transmitted FL/RX buffers (for AF_XDP)
0304  * @rxds:       Virtual address of FL/RX ring in host memory
0305  * @xdp_rxq:    RX-ring info avail for XDP
0306  * @dma:        DMA address of the FL/RX ring
0307  * @size:       Size, in bytes, of the FL/RX ring (needed to free)
0308  */
0309 struct nfp_net_rx_ring {
0310     struct nfp_net_r_vector *r_vec;
0311 
0312     u32 cnt;
0313     u32 wr_p;
0314     u32 rd_p;
0315 
0316     u32 idx;
0317 
0318     int fl_qcidx;
0319     u8 __iomem *qcp_fl;
0320 
0321     struct nfp_net_rx_buf *rxbufs;
0322     struct nfp_net_xsk_rx_buf *xsk_rxbufs;
0323     struct nfp_net_rx_desc *rxds;
0324 
0325     struct xdp_rxq_info xdp_rxq;
0326 
0327     dma_addr_t dma;
0328     size_t size;
0329 } ____cacheline_aligned;
0330 
0331 /**
0332  * struct nfp_net_r_vector - Per ring interrupt vector configuration
0333  * @nfp_net:        Backpointer to nfp_net structure
0334  * @napi:           NAPI structure for this ring vec
0335  * @tasklet:        ctrl vNIC, tasklet for servicing the r_vec
0336  * @queue:          ctrl vNIC, send queue
0337  * @lock:           ctrl vNIC, r_vec lock protects @queue
0338  * @tx_ring:        Pointer to TX ring
0339  * @rx_ring:        Pointer to RX ring
0340  * @xdp_ring:       Pointer to an extra TX ring for XDP
0341  * @xsk_pool:       XSK buffer pool active on vector queue pair (for AF_XDP)
0342  * @irq_entry:      MSI-X table entry (use for talking to the device)
0343  * @event_ctr:      Number of interrupt
0344  * @rx_dim:     Dynamic interrupt moderation structure for RX
0345  * @tx_dim:     Dynamic interrupt moderation structure for TX
0346  * @rx_sync:        Seqlock for atomic updates of RX stats
0347  * @rx_pkts:        Number of received packets
0348  * @rx_bytes:       Number of received bytes
0349  * @rx_drops:       Number of packets dropped on RX due to lack of resources
0350  * @hw_csum_rx_ok:  Counter of packets where the HW checksum was OK
0351  * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
0352  * @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported
0353  * @hw_csum_rx_error:    Counter of packets with bad checksums
0354  * @hw_tls_rx:      Number of packets with TLS decrypted by hardware
0355  * @tx_sync:        Seqlock for atomic updates of TX stats
0356  * @tx_pkts:        Number of Transmitted packets
0357  * @tx_bytes:       Number of Transmitted bytes
0358  * @hw_csum_tx:     Counter of packets with TX checksum offload requested
0359  * @hw_csum_tx_inner:    Counter of inner TX checksum offload requests
0360  * @tx_gather:      Counter of packets with Gather DMA
0361  * @tx_lso:     Counter of LSO packets sent
0362  * @hw_tls_tx:      Counter of TLS packets sent with crypto offloaded to HW
0363  * @tls_tx_fallback:    Counter of TLS packets sent which had to be encrypted
0364  *          by the fallback path because packets came out of order
0365  * @tls_tx_no_fallback: Counter of TLS packets not sent because the fallback
0366  *          path could not encrypt them
0367  * @tx_errors:      How many TX errors were encountered
0368  * @tx_busy:        How often was TX busy (no space)?
0369  * @rx_replace_buf_alloc_fail:  Counter of RX buffer allocation failures
0370  * @irq_vector:     Interrupt vector number (use for talking to the OS)
0371  * @handler:        Interrupt handler for this ring vector
0372  * @name:           Name of the interrupt vector
0373  * @affinity_mask:  SMP affinity mask for this vector
0374  *
0375  * This structure ties RX and TX rings to interrupt vectors and a NAPI
0376  * context. This currently only supports one RX and TX ring per
0377  * interrupt vector but might be extended in the future to allow
0378  * association of multiple rings per vector.
0379  */
0380 struct nfp_net_r_vector {
0381     struct nfp_net *nfp_net;
0382     union {
0383         struct napi_struct napi;
0384         struct {
0385             struct tasklet_struct tasklet;
0386             struct sk_buff_head queue;
0387             spinlock_t lock;
0388         };
0389     };
0390 
0391     struct nfp_net_tx_ring *tx_ring;
0392     struct nfp_net_rx_ring *rx_ring;
0393 
0394     u16 irq_entry;
0395 
0396     u16 event_ctr;
0397     struct dim rx_dim;
0398     struct dim tx_dim;
0399 
0400     struct u64_stats_sync rx_sync;
0401     u64 rx_pkts;
0402     u64 rx_bytes;
0403     u64 rx_drops;
0404     u64 hw_csum_rx_ok;
0405     u64 hw_csum_rx_inner_ok;
0406     u64 hw_csum_rx_complete;
0407     u64 hw_tls_rx;
0408 
0409     u64 hw_csum_rx_error;
0410     u64 rx_replace_buf_alloc_fail;
0411 
0412     struct nfp_net_tx_ring *xdp_ring;
0413     struct xsk_buff_pool *xsk_pool;
0414 
0415     struct u64_stats_sync tx_sync;
0416     u64 tx_pkts;
0417     u64 tx_bytes;
0418 
0419     u64 ____cacheline_aligned_in_smp hw_csum_tx;
0420     u64 hw_csum_tx_inner;
0421     u64 tx_gather;
0422     u64 tx_lso;
0423     u64 hw_tls_tx;
0424 
0425     u64 tls_tx_fallback;
0426     u64 tls_tx_no_fallback;
0427     u64 tx_errors;
0428     u64 tx_busy;
0429 
0430     /* Cold data follows */
0431 
0432     u32 irq_vector;
0433     irq_handler_t handler;
0434     char name[IFNAMSIZ + 8];
0435     cpumask_t affinity_mask;
0436 } ____cacheline_aligned;
0437 
0438 /* Firmware version as it is written in the 32bit value in the BAR */
0439 struct nfp_net_fw_version {
0440     u8 minor;
0441     u8 major;
0442     u8 class;
0443 
0444     /* This byte can be exploited for more use, currently,
0445      * BIT0: dp type, BIT[7:1]: reserved
0446      */
0447     u8 extend;
0448 } __packed;
0449 
0450 static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
0451                      u8 extend, u8 class, u8 major, u8 minor)
0452 {
0453     return fw_ver->extend == extend &&
0454            fw_ver->class == class &&
0455            fw_ver->major == major &&
0456            fw_ver->minor == minor;
0457 }
0458 
0459 struct nfp_stat_pair {
0460     u64 pkts;
0461     u64 bytes;
0462 };
0463 
0464 /**
0465  * struct nfp_net_dp - NFP network device datapath data structure
0466  * @dev:        Backpointer to struct device
0467  * @netdev:     Backpointer to net_device structure
0468  * @is_vf:      Is the driver attached to a VF?
0469  * @chained_metadata_format:  Firemware will use new metadata format
0470  * @ktls_tx:        Is kTLS TX enabled?
0471  * @rx_dma_dir:     Mapping direction for RX buffers
0472  * @rx_dma_off:     Offset at which DMA packets (for XDP headroom)
0473  * @rx_offset:      Offset in the RX buffers where packet data starts
0474  * @ctrl:       Local copy of the control register/word.
0475  * @fl_bufsz:       Currently configured size of the freelist buffers
0476  * @xdp_prog:       Installed XDP program
0477  * @tx_rings:       Array of pre-allocated TX ring structures
0478  * @rx_rings:       Array of pre-allocated RX ring structures
0479  * @ctrl_bar:       Pointer to mapped control BAR
0480  *
0481  * @ops:        Callbacks and parameters for this vNIC's NFD version
0482  * @txrwb:      TX pointer write back area (indexed by queue id)
0483  * @txrwb_dma:      TX pointer write back area DMA address
0484  * @txd_cnt:        Size of the TX ring in number of min size packets
0485  * @rxd_cnt:        Size of the RX ring in number of min size packets
0486  * @num_r_vecs:     Number of used ring vectors
0487  * @num_tx_rings:   Currently configured number of TX rings
0488  * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
0489  * @num_rx_rings:   Currently configured number of RX rings
0490  * @mtu:        Device MTU
0491  * @xsk_pools:      XSK buffer pools, @max_r_vecs in size (for AF_XDP).
0492  */
0493 struct nfp_net_dp {
0494     struct device *dev;
0495     struct net_device *netdev;
0496 
0497     u8 is_vf:1;
0498     u8 chained_metadata_format:1;
0499     u8 ktls_tx:1;
0500 
0501     u8 rx_dma_dir;
0502     u8 rx_offset;
0503 
0504     u32 rx_dma_off;
0505 
0506     u32 ctrl;
0507     u32 fl_bufsz;
0508 
0509     struct bpf_prog *xdp_prog;
0510 
0511     struct nfp_net_tx_ring *tx_rings;
0512     struct nfp_net_rx_ring *rx_rings;
0513 
0514     u8 __iomem *ctrl_bar;
0515 
0516     /* Cold data follows */
0517 
0518     const struct nfp_dp_ops *ops;
0519 
0520     u64 *txrwb;
0521     dma_addr_t txrwb_dma;
0522 
0523     unsigned int txd_cnt;
0524     unsigned int rxd_cnt;
0525 
0526     unsigned int num_r_vecs;
0527 
0528     unsigned int num_tx_rings;
0529     unsigned int num_stack_tx_rings;
0530     unsigned int num_rx_rings;
0531 
0532     unsigned int mtu;
0533 
0534     struct xsk_buff_pool **xsk_pools;
0535 };
0536 
0537 /**
0538  * struct nfp_net - NFP network device structure
0539  * @dp:         Datapath structure
0540  * @dev_info:       NFP ASIC params
0541  * @id:         vNIC id within the PF (0 for VFs)
0542  * @fw_ver:     Firmware version
0543  * @cap:                Capabilities advertised by the Firmware
0544  * @max_mtu:            Maximum support MTU advertised by the Firmware
0545  * @rss_hfunc:      RSS selected hash function
0546  * @rss_cfg:            RSS configuration
0547  * @rss_key:            RSS secret key
0548  * @rss_itbl:           RSS indirection table
0549  * @xdp:        Information about the driver XDP program
0550  * @xdp_hw:     Information about the HW XDP program
0551  * @max_r_vecs:     Number of allocated interrupt vectors for RX/TX
0552  * @max_tx_rings:       Maximum number of TX rings supported by the Firmware
0553  * @max_rx_rings:       Maximum number of RX rings supported by the Firmware
0554  * @stride_rx:      Queue controller RX queue spacing
0555  * @stride_tx:      Queue controller TX queue spacing
0556  * @r_vecs:             Pre-allocated array of ring vectors
0557  * @irq_entries:        Pre-allocated array of MSI-X entries
0558  * @lsc_handler:        Handler for Link State Change interrupt
0559  * @lsc_name:           Name for Link State Change interrupt
0560  * @exn_handler:        Handler for Exception interrupt
0561  * @exn_name:           Name for Exception interrupt
0562  * @shared_handler:     Handler for shared interrupts
0563  * @shared_name:        Name for shared interrupt
0564  * @reconfig_lock:  Protects @reconfig_posted, @reconfig_timer_active,
0565  *          @reconfig_sync_present and HW reconfiguration request
0566  *          regs/machinery from async requests (sync must take
0567  *          @bar_lock)
0568  * @reconfig_posted:    Pending reconfig bits coming from async sources
0569  * @reconfig_timer_active:  Timer for reading reconfiguration results is pending
0570  * @reconfig_sync_present:  Some thread is performing synchronous reconfig
0571  * @reconfig_timer: Timer for async reading of reconfig results
0572  * @reconfig_in_progress_update:    Update FW is processing now (debug only)
0573  * @bar_lock:       vNIC config BAR access lock, protects: update,
0574  *          mailbox area, crypto TLV
0575  * @link_up:            Is the link up?
0576  * @link_status_lock:   Protects @link_* and ensures atomicity with BAR reading
0577  * @rx_coalesce_adapt_on:   Is RX interrupt moderation adaptive?
0578  * @tx_coalesce_adapt_on:   Is TX interrupt moderation adaptive?
0579  * @rx_coalesce_usecs:      RX interrupt moderation usecs delay parameter
0580  * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
0581  * @tx_coalesce_usecs:      TX interrupt moderation usecs delay parameter
0582  * @tx_coalesce_max_frames: TX interrupt moderation frame count parameter
0583  * @qcp_cfg:            Pointer to QCP queue used for configuration notification
0584  * @tx_bar:             Pointer to mapped TX queues
0585  * @rx_bar:             Pointer to mapped FL/RX queues
0586  * @tlv_caps:       Parsed TLV capabilities
0587  * @ktls_tx_conn_cnt:   Number of offloaded kTLS TX connections
0588  * @ktls_rx_conn_cnt:   Number of offloaded kTLS RX connections
0589  * @ktls_conn_id_gen:   Trivial generator for kTLS connection ids (for TX)
0590  * @ktls_no_space:  Counter of firmware rejecting kTLS connection due to
0591  *          lack of space
0592  * @ktls_rx_resync_req: Counter of TLS RX resync requested
0593  * @ktls_rx_resync_ign: Counter of TLS RX resync requests ignored
0594  * @ktls_rx_resync_sent:    Counter of TLS RX resync completed
0595  * @mbox_cmsg:      Common Control Message via vNIC mailbox state
0596  * @mbox_cmsg.queue:    CCM mbox queue of pending messages
0597  * @mbox_cmsg.wq:   CCM mbox wait queue of waiting processes
0598  * @mbox_cmsg.workq:    CCM mbox work queue for @wait_work and @runq_work
0599  * @mbox_cmsg.wait_work:    CCM mbox posted msg reconfig wait work
0600  * @mbox_cmsg.runq_work:    CCM mbox posted msg queue runner work
0601  * @mbox_cmsg.tag:  CCM mbox message tag allocator
0602  * @debugfs_dir:    Device directory in debugfs
0603  * @vnic_list:      Entry on device vNIC list
0604  * @pdev:       Backpointer to PCI device
0605  * @app:        APP handle if available
0606  * @vnic_no_name:   For non-port PF vNIC make ndo_get_phys_port_name return
0607  *          -EOPNOTSUPP to keep backwards compatibility (set by app)
0608  * @port:       Pointer to nfp_port structure if vNIC is a port
0609  * @app_priv:       APP private data for this vNIC
0610  */
0611 struct nfp_net {
0612     struct nfp_net_dp dp;
0613 
0614     const struct nfp_dev_info *dev_info;
0615     struct nfp_net_fw_version fw_ver;
0616 
0617     u32 id;
0618 
0619     u32 cap;
0620     u32 max_mtu;
0621 
0622     u8 rss_hfunc;
0623     u32 rss_cfg;
0624     u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
0625     u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
0626 
0627     struct xdp_attachment_info xdp;
0628     struct xdp_attachment_info xdp_hw;
0629 
0630     unsigned int max_tx_rings;
0631     unsigned int max_rx_rings;
0632 
0633     int stride_tx;
0634     int stride_rx;
0635 
0636     unsigned int max_r_vecs;
0637     struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
0638     struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
0639 
0640     irq_handler_t lsc_handler;
0641     char lsc_name[IFNAMSIZ + 8];
0642 
0643     irq_handler_t exn_handler;
0644     char exn_name[IFNAMSIZ + 8];
0645 
0646     irq_handler_t shared_handler;
0647     char shared_name[IFNAMSIZ + 8];
0648 
0649     bool link_up;
0650     spinlock_t link_status_lock;
0651 
0652     spinlock_t reconfig_lock;
0653     u32 reconfig_posted;
0654     bool reconfig_timer_active;
0655     bool reconfig_sync_present;
0656     struct timer_list reconfig_timer;
0657     u32 reconfig_in_progress_update;
0658 
0659     struct semaphore bar_lock;
0660 
0661     bool rx_coalesce_adapt_on;
0662     bool tx_coalesce_adapt_on;
0663     u32 rx_coalesce_usecs;
0664     u32 rx_coalesce_max_frames;
0665     u32 tx_coalesce_usecs;
0666     u32 tx_coalesce_max_frames;
0667 
0668     u8 __iomem *qcp_cfg;
0669 
0670     u8 __iomem *tx_bar;
0671     u8 __iomem *rx_bar;
0672 
0673     struct nfp_net_tlv_caps tlv_caps;
0674 
0675     unsigned int ktls_tx_conn_cnt;
0676     unsigned int ktls_rx_conn_cnt;
0677 
0678     atomic64_t ktls_conn_id_gen;
0679 
0680     atomic_t ktls_no_space;
0681     atomic_t ktls_rx_resync_req;
0682     atomic_t ktls_rx_resync_ign;
0683     atomic_t ktls_rx_resync_sent;
0684 
0685     struct {
0686         struct sk_buff_head queue;
0687         wait_queue_head_t wq;
0688         struct workqueue_struct *workq;
0689         struct work_struct wait_work;
0690         struct work_struct runq_work;
0691         u16 tag;
0692     } mbox_cmsg;
0693 
0694     struct dentry *debugfs_dir;
0695 
0696     struct list_head vnic_list;
0697 
0698     struct pci_dev *pdev;
0699     struct nfp_app *app;
0700 
0701     bool vnic_no_name;
0702 
0703     struct nfp_port *port;
0704 
0705     void *app_priv;
0706 };
0707 
0708 /* Functions to read/write from/to a BAR
0709  * Performs any endian conversion necessary.
0710  */
0711 static inline u16 nn_readb(struct nfp_net *nn, int off)
0712 {
0713     return readb(nn->dp.ctrl_bar + off);
0714 }
0715 
0716 static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
0717 {
0718     writeb(val, nn->dp.ctrl_bar + off);
0719 }
0720 
0721 static inline u16 nn_readw(struct nfp_net *nn, int off)
0722 {
0723     return readw(nn->dp.ctrl_bar + off);
0724 }
0725 
0726 static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
0727 {
0728     writew(val, nn->dp.ctrl_bar + off);
0729 }
0730 
0731 static inline u32 nn_readl(struct nfp_net *nn, int off)
0732 {
0733     return readl(nn->dp.ctrl_bar + off);
0734 }
0735 
0736 static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
0737 {
0738     writel(val, nn->dp.ctrl_bar + off);
0739 }
0740 
0741 static inline u64 nn_readq(struct nfp_net *nn, int off)
0742 {
0743     return readq(nn->dp.ctrl_bar + off);
0744 }
0745 
0746 static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
0747 {
0748     writeq(val, nn->dp.ctrl_bar + off);
0749 }
0750 
0751 /* Flush posted PCI writes by reading something without side effects */
0752 static inline void nn_pci_flush(struct nfp_net *nn)
0753 {
0754     nn_readl(nn, NFP_NET_CFG_VERSION);
0755 }
0756 
0757 /* Queue Controller Peripheral access functions and definitions.
0758  *
0759  * Some of the BARs of the NFP are mapped to portions of the Queue
0760  * Controller Peripheral (QCP) address space on the NFP.  A QCP queue
0761  * has a read and a write pointer (as well as a size and flags,
0762  * indicating overflow etc).  The QCP offers a number of different
0763  * operation on queue pointers, but here we only offer function to
0764  * either add to a pointer or to read the pointer value.
0765  */
0766 #define NFP_QCP_QUEUE_ADDR_SZ           0x800
0767 #define NFP_QCP_QUEUE_OFF(_x)           ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
0768 #define NFP_QCP_QUEUE_ADD_RPTR          0x0000
0769 #define NFP_QCP_QUEUE_ADD_WPTR          0x0004
0770 #define NFP_QCP_QUEUE_STS_LO            0x0008
0771 #define NFP_QCP_QUEUE_STS_LO_READPTR_mask   0x3ffff
0772 #define NFP_QCP_QUEUE_STS_HI            0x000c
0773 #define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask  0x3ffff
0774 
0775 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
0776 enum nfp_qcp_ptr {
0777     NFP_QCP_READ_PTR = 0,
0778     NFP_QCP_WRITE_PTR
0779 };
0780 
0781 /**
0782  * nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue
0783  *
0784  * @q:   Base address for queue structure
0785  * @val: Value to add to the queue pointer
0786  */
0787 static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
0788 {
0789     writel(val, q + NFP_QCP_QUEUE_ADD_RPTR);
0790 }
0791 
0792 /**
0793  * nfp_qcp_wr_ptr_add() - Add the value to the write pointer of a queue
0794  *
0795  * @q:   Base address for queue structure
0796  * @val: Value to add to the queue pointer
0797  */
0798 static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
0799 {
0800     writel(val, q + NFP_QCP_QUEUE_ADD_WPTR);
0801 }
0802 
0803 static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
0804 {
0805     u32 off;
0806     u32 val;
0807 
0808     if (ptr == NFP_QCP_READ_PTR)
0809         off = NFP_QCP_QUEUE_STS_LO;
0810     else
0811         off = NFP_QCP_QUEUE_STS_HI;
0812 
0813     val = readl(q + off);
0814 
0815     if (ptr == NFP_QCP_READ_PTR)
0816         return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
0817     else
0818         return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
0819 }
0820 
0821 /**
0822  * nfp_qcp_rd_ptr_read() - Read the current read pointer value for a queue
0823  * @q:  Base address for queue structure
0824  *
0825  * Return: Value read.
0826  */
0827 static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
0828 {
0829     return _nfp_qcp_read(q, NFP_QCP_READ_PTR);
0830 }
0831 
0832 /**
0833  * nfp_qcp_wr_ptr_read() - Read the current write pointer value for a queue
0834  * @q:  Base address for queue structure
0835  *
0836  * Return: Value read.
0837  */
0838 static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
0839 {
0840     return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
0841 }
0842 
0843 u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue);
0844 
0845 static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
0846 {
0847     WARN_ON_ONCE(!nn->dp.netdev && nn->port);
0848     return !!nn->dp.netdev;
0849 }
0850 
0851 static inline bool nfp_net_running(struct nfp_net *nn)
0852 {
0853     return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
0854 }
0855 
0856 static inline const char *nfp_net_name(struct nfp_net *nn)
0857 {
0858     return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
0859 }
0860 
0861 static inline void nfp_ctrl_lock(struct nfp_net *nn)
0862     __acquires(&nn->r_vecs[0].lock)
0863 {
0864     spin_lock_bh(&nn->r_vecs[0].lock);
0865 }
0866 
0867 static inline void nfp_ctrl_unlock(struct nfp_net *nn)
0868     __releases(&nn->r_vecs[0].lock)
0869 {
0870     spin_unlock_bh(&nn->r_vecs[0].lock);
0871 }
0872 
0873 static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
0874 {
0875     down(&nn->bar_lock);
0876 }
0877 
0878 static inline bool nn_ctrl_bar_trylock(struct nfp_net *nn)
0879 {
0880     return !down_trylock(&nn->bar_lock);
0881 }
0882 
0883 static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
0884 {
0885     up(&nn->bar_lock);
0886 }
0887 
0888 /* Globals */
0889 extern const char nfp_driver_version[];
0890 
0891 extern const struct net_device_ops nfp_nfd3_netdev_ops;
0892 extern const struct net_device_ops nfp_nfdk_netdev_ops;
0893 
0894 static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
0895 {
0896     return netdev->netdev_ops == &nfp_nfd3_netdev_ops ||
0897            netdev->netdev_ops == &nfp_nfdk_netdev_ops;
0898 }
0899 
0900 static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
0901 {
0902     if ((usecs >= ((1 << 16) - 1)) || (pkts >= ((1 << 16) - 1)))
0903         return -EINVAL;
0904 
0905     return 0;
0906 }
0907 
0908 /* Prototypes */
0909 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
0910                 void __iomem *ctrl_bar);
0911 
0912 struct nfp_net *
0913 nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
0914           void __iomem *ctrl_bar, bool needs_netdev,
0915           unsigned int max_tx_rings, unsigned int max_rx_rings);
0916 void nfp_net_free(struct nfp_net *nn);
0917 
0918 int nfp_net_init(struct nfp_net *nn);
0919 void nfp_net_clean(struct nfp_net *nn);
0920 
0921 int nfp_ctrl_open(struct nfp_net *nn);
0922 void nfp_ctrl_close(struct nfp_net *nn);
0923 
0924 void nfp_net_set_ethtool_ops(struct net_device *netdev);
0925 void nfp_net_info(struct nfp_net *nn);
0926 int __nfp_net_reconfig(struct nfp_net *nn, u32 update);
0927 int nfp_net_reconfig(struct nfp_net *nn, u32 update);
0928 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
0929 void nfp_net_rss_write_itbl(struct nfp_net *nn);
0930 void nfp_net_rss_write_key(struct nfp_net *nn);
0931 void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
0932 int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
0933 int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
0934 int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
0935 void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
0936 int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
0937 
0938 unsigned int
0939 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
0940            unsigned int min_irqs, unsigned int want_irqs);
0941 void nfp_net_irqs_disable(struct pci_dev *pdev);
0942 void
0943 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
0944             unsigned int n);
0945 struct sk_buff *
0946 nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
0947            struct sk_buff *skb, u64 *tls_handle, int *nr_frags);
0948 void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle);
0949 
0950 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
0951 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
0952               struct netlink_ext_ack *extack);
0953 
0954 #ifdef CONFIG_NFP_DEBUG
0955 void nfp_net_debugfs_create(void);
0956 void nfp_net_debugfs_destroy(void);
0957 struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
0958 void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir);
0959 void nfp_net_debugfs_dir_clean(struct dentry **dir);
0960 #else
0961 static inline void nfp_net_debugfs_create(void)
0962 {
0963 }
0964 
0965 static inline void nfp_net_debugfs_destroy(void)
0966 {
0967 }
0968 
0969 static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
0970 {
0971     return NULL;
0972 }
0973 
0974 static inline void
0975 nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
0976 {
0977 }
0978 
0979 static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
0980 {
0981 }
0982 #endif /* CONFIG_NFP_DEBUG */
0983 
0984 #endif /* _NFP_NET_H_ */