0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef _NFP_NET_H_
0013 #define _NFP_NET_H_
0014
0015 #include <linux/atomic.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/list.h>
0018 #include <linux/netdevice.h>
0019 #include <linux/pci.h>
0020 #include <linux/dim.h>
0021 #include <linux/io-64-nonatomic-hi-lo.h>
0022 #include <linux/semaphore.h>
0023 #include <linux/workqueue.h>
0024 #include <net/xdp.h>
0025
0026 #include "nfp_net_ctrl.h"
0027
0028 #define nn_pr(nn, lvl, fmt, args...) \
0029 ({ \
0030 struct nfp_net *__nn = (nn); \
0031 \
0032 if (__nn->dp.netdev) \
0033 netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \
0034 else \
0035 dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \
0036 })
0037
0038 #define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args)
0039 #define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args)
0040 #define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args)
0041 #define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args)
0042
0043 #define nn_dp_warn(dp, fmt, args...) \
0044 ({ \
0045 struct nfp_net_dp *__dp = (dp); \
0046 \
0047 if (unlikely(net_ratelimit())) { \
0048 if (__dp->netdev) \
0049 netdev_warn(__dp->netdev, fmt, ## args); \
0050 else \
0051 dev_warn(__dp->dev, fmt, ## args); \
0052 } \
0053 })
0054
0055
0056 #define NFP_NET_POLL_TIMEOUT 5
0057
0058
0059 #define NFP_NET_STAT_POLL_IVL msecs_to_jiffies(100)
0060
0061
0062 #define NFP_NET_CTRL_BAR 0
0063 #define NFP_NET_Q0_BAR 2
0064 #define NFP_NET_Q1_BAR 4
0065
0066
0067 #define NFP_NET_DEFAULT_MTU 1500U
0068
0069
0070 #define NFP_NET_MAX_PREPEND 64
0071
0072
0073 #define NFP_NET_NON_Q_VECTORS 2
0074 #define NFP_NET_IRQ_LSC_IDX 0
0075 #define NFP_NET_IRQ_EXN_IDX 1
0076 #define NFP_NET_MIN_VNIC_IRQS (NFP_NET_NON_Q_VECTORS + 1)
0077
0078
0079 #define NFP_NET_MAX_TX_RINGS 64
0080 #define NFP_NET_MAX_RX_RINGS 64
0081 #define NFP_NET_MAX_R_VECS (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
0082 NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
0083 #define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
0084
0085 #define NFP_NET_TX_DESCS_DEFAULT 4096
0086 #define NFP_NET_RX_DESCS_DEFAULT 4096
0087
0088 #define NFP_NET_FL_BATCH 16
0089 #define NFP_NET_XDP_MAX_COMPLETE 2048
0090
0091
0092 #define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
0093
0094 #define NFP_NET_RX_BUF_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
0095 #define NFP_NET_RX_BUF_NON_DATA (NFP_NET_RX_BUF_HEADROOM + \
0096 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
0097
0098
0099 struct nfp_cpp;
0100 struct nfp_dev_info;
0101 struct nfp_dp_ops;
0102 struct nfp_eth_table_port;
0103 struct nfp_net;
0104 struct nfp_net_r_vector;
0105 struct nfp_port;
0106 struct xsk_buff_pool;
0107
0108 struct nfp_nfd3_tx_desc;
0109 struct nfp_nfd3_tx_buf;
0110
0111 struct nfp_nfdk_tx_desc;
0112 struct nfp_nfdk_tx_buf;
0113
0114
0115 #define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
0116
0117
0118 #define nfp_desc_set_dma_addr_40b(desc, dma_addr) \
0119 do { \
0120 __typeof__(desc) __d = (desc); \
0121 dma_addr_t __addr = (dma_addr); \
0122 \
0123 __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
0124 __d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
0125 } while (0)
0126
0127 #define nfp_desc_set_dma_addr_48b(desc, dma_addr) \
0128 do { \
0129 __typeof__(desc) __d = (desc); \
0130 dma_addr_t __addr = (dma_addr); \
0131 \
0132 __d->dma_addr_hi = cpu_to_le16(upper_32_bits(__addr)); \
0133 __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
0134 } while (0)
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159 struct nfp_net_tx_ring {
0160 struct nfp_net_r_vector *r_vec;
0161
0162 u16 idx;
0163 u16 data_pending;
0164 u8 __iomem *qcp_q;
0165 u64 *txrwb;
0166
0167 u32 cnt;
0168 u32 wr_p;
0169 u32 rd_p;
0170 u32 qcp_rd_p;
0171
0172 u32 wr_ptr_add;
0173
0174 union {
0175 struct nfp_nfd3_tx_buf *txbufs;
0176 struct nfp_nfdk_tx_buf *ktxbufs;
0177 };
0178 union {
0179 struct nfp_nfd3_tx_desc *txds;
0180 struct nfp_nfdk_tx_desc *ktxds;
0181 };
0182
0183
0184 int qcidx;
0185
0186 dma_addr_t dma;
0187 size_t size;
0188 bool is_xdp;
0189 } ____cacheline_aligned;
0190
0191
0192
0193 #define PCIE_DESC_RX_DD BIT(7)
0194 #define PCIE_DESC_RX_META_LEN_MASK GENMASK(6, 0)
0195
0196
0197 #define PCIE_DESC_RX_RSS cpu_to_le16(BIT(15))
0198 #define PCIE_DESC_RX_I_IP4_CSUM cpu_to_le16(BIT(14))
0199 #define PCIE_DESC_RX_I_IP4_CSUM_OK cpu_to_le16(BIT(13))
0200 #define PCIE_DESC_RX_I_TCP_CSUM cpu_to_le16(BIT(12))
0201 #define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
0202 #define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
0203 #define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
0204 #define PCIE_DESC_RX_DECRYPTED cpu_to_le16(BIT(8))
0205 #define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
0206 #define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
0207 #define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
0208 #define PCIE_DESC_RX_TCP_CSUM cpu_to_le16(BIT(4))
0209 #define PCIE_DESC_RX_TCP_CSUM_OK cpu_to_le16(BIT(3))
0210 #define PCIE_DESC_RX_UDP_CSUM cpu_to_le16(BIT(2))
0211 #define PCIE_DESC_RX_UDP_CSUM_OK cpu_to_le16(BIT(1))
0212 #define PCIE_DESC_RX_VLAN cpu_to_le16(BIT(0))
0213
0214 #define PCIE_DESC_RX_CSUM_ALL (PCIE_DESC_RX_IP4_CSUM | \
0215 PCIE_DESC_RX_TCP_CSUM | \
0216 PCIE_DESC_RX_UDP_CSUM | \
0217 PCIE_DESC_RX_I_IP4_CSUM | \
0218 PCIE_DESC_RX_I_TCP_CSUM | \
0219 PCIE_DESC_RX_I_UDP_CSUM)
0220 #define PCIE_DESC_RX_CSUM_OK_SHIFT 1
0221 #define __PCIE_DESC_RX_CSUM_ALL le16_to_cpu(PCIE_DESC_RX_CSUM_ALL)
0222 #define __PCIE_DESC_RX_CSUM_ALL_OK (__PCIE_DESC_RX_CSUM_ALL >> \
0223 PCIE_DESC_RX_CSUM_OK_SHIFT)
0224
0225 struct nfp_net_rx_desc {
0226 union {
0227 struct {
0228 __le16 dma_addr_hi;
0229 u8 reserved;
0230 u8 meta_len_dd;
0231
0232 __le32 dma_addr_lo;
0233 } __packed fld;
0234
0235 struct {
0236 __le16 data_len;
0237 u8 reserved;
0238 u8 meta_len_dd;
0239
0240
0241
0242 __le16 flags;
0243 __le16 vlan;
0244 } __packed rxd;
0245
0246 __le32 vals[2];
0247 };
0248 };
0249
0250 #define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
0251 #define NFP_NET_VLAN_CTAG 0
0252 #define NFP_NET_VLAN_STAG 1
0253
0254 struct nfp_meta_parsed {
0255 u8 hash_type;
0256 u8 csum_type;
0257 u32 hash;
0258 u32 mark;
0259 u32 portid;
0260 __wsum csum;
0261 struct {
0262 bool stripped;
0263 u8 tpid;
0264 u16 tci;
0265 } vlan;
0266 };
0267
0268 struct nfp_net_rx_hash {
0269 __be32 hash_type;
0270 __be32 hash;
0271 };
0272
0273
0274
0275
0276
0277
0278 struct nfp_net_rx_buf {
0279 void *frag;
0280 dma_addr_t dma_addr;
0281 };
0282
0283
0284
0285
0286
0287
0288 struct nfp_net_xsk_rx_buf {
0289 dma_addr_t dma_addr;
0290 struct xdp_buff *xdp;
0291 };
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309 struct nfp_net_rx_ring {
0310 struct nfp_net_r_vector *r_vec;
0311
0312 u32 cnt;
0313 u32 wr_p;
0314 u32 rd_p;
0315
0316 u32 idx;
0317
0318 int fl_qcidx;
0319 u8 __iomem *qcp_fl;
0320
0321 struct nfp_net_rx_buf *rxbufs;
0322 struct nfp_net_xsk_rx_buf *xsk_rxbufs;
0323 struct nfp_net_rx_desc *rxds;
0324
0325 struct xdp_rxq_info xdp_rxq;
0326
0327 dma_addr_t dma;
0328 size_t size;
0329 } ____cacheline_aligned;
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380 struct nfp_net_r_vector {
0381 struct nfp_net *nfp_net;
0382 union {
0383 struct napi_struct napi;
0384 struct {
0385 struct tasklet_struct tasklet;
0386 struct sk_buff_head queue;
0387 spinlock_t lock;
0388 };
0389 };
0390
0391 struct nfp_net_tx_ring *tx_ring;
0392 struct nfp_net_rx_ring *rx_ring;
0393
0394 u16 irq_entry;
0395
0396 u16 event_ctr;
0397 struct dim rx_dim;
0398 struct dim tx_dim;
0399
0400 struct u64_stats_sync rx_sync;
0401 u64 rx_pkts;
0402 u64 rx_bytes;
0403 u64 rx_drops;
0404 u64 hw_csum_rx_ok;
0405 u64 hw_csum_rx_inner_ok;
0406 u64 hw_csum_rx_complete;
0407 u64 hw_tls_rx;
0408
0409 u64 hw_csum_rx_error;
0410 u64 rx_replace_buf_alloc_fail;
0411
0412 struct nfp_net_tx_ring *xdp_ring;
0413 struct xsk_buff_pool *xsk_pool;
0414
0415 struct u64_stats_sync tx_sync;
0416 u64 tx_pkts;
0417 u64 tx_bytes;
0418
0419 u64 ____cacheline_aligned_in_smp hw_csum_tx;
0420 u64 hw_csum_tx_inner;
0421 u64 tx_gather;
0422 u64 tx_lso;
0423 u64 hw_tls_tx;
0424
0425 u64 tls_tx_fallback;
0426 u64 tls_tx_no_fallback;
0427 u64 tx_errors;
0428 u64 tx_busy;
0429
0430
0431
0432 u32 irq_vector;
0433 irq_handler_t handler;
0434 char name[IFNAMSIZ + 8];
0435 cpumask_t affinity_mask;
0436 } ____cacheline_aligned;
0437
0438
0439 struct nfp_net_fw_version {
0440 u8 minor;
0441 u8 major;
0442 u8 class;
0443
0444
0445
0446
0447 u8 extend;
0448 } __packed;
0449
0450 static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
0451 u8 extend, u8 class, u8 major, u8 minor)
0452 {
0453 return fw_ver->extend == extend &&
0454 fw_ver->class == class &&
0455 fw_ver->major == major &&
0456 fw_ver->minor == minor;
0457 }
0458
0459 struct nfp_stat_pair {
0460 u64 pkts;
0461 u64 bytes;
0462 };
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493 struct nfp_net_dp {
0494 struct device *dev;
0495 struct net_device *netdev;
0496
0497 u8 is_vf:1;
0498 u8 chained_metadata_format:1;
0499 u8 ktls_tx:1;
0500
0501 u8 rx_dma_dir;
0502 u8 rx_offset;
0503
0504 u32 rx_dma_off;
0505
0506 u32 ctrl;
0507 u32 fl_bufsz;
0508
0509 struct bpf_prog *xdp_prog;
0510
0511 struct nfp_net_tx_ring *tx_rings;
0512 struct nfp_net_rx_ring *rx_rings;
0513
0514 u8 __iomem *ctrl_bar;
0515
0516
0517
0518 const struct nfp_dp_ops *ops;
0519
0520 u64 *txrwb;
0521 dma_addr_t txrwb_dma;
0522
0523 unsigned int txd_cnt;
0524 unsigned int rxd_cnt;
0525
0526 unsigned int num_r_vecs;
0527
0528 unsigned int num_tx_rings;
0529 unsigned int num_stack_tx_rings;
0530 unsigned int num_rx_rings;
0531
0532 unsigned int mtu;
0533
0534 struct xsk_buff_pool **xsk_pools;
0535 };
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 struct nfp_net {
0612 struct nfp_net_dp dp;
0613
0614 const struct nfp_dev_info *dev_info;
0615 struct nfp_net_fw_version fw_ver;
0616
0617 u32 id;
0618
0619 u32 cap;
0620 u32 max_mtu;
0621
0622 u8 rss_hfunc;
0623 u32 rss_cfg;
0624 u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
0625 u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
0626
0627 struct xdp_attachment_info xdp;
0628 struct xdp_attachment_info xdp_hw;
0629
0630 unsigned int max_tx_rings;
0631 unsigned int max_rx_rings;
0632
0633 int stride_tx;
0634 int stride_rx;
0635
0636 unsigned int max_r_vecs;
0637 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
0638 struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
0639
0640 irq_handler_t lsc_handler;
0641 char lsc_name[IFNAMSIZ + 8];
0642
0643 irq_handler_t exn_handler;
0644 char exn_name[IFNAMSIZ + 8];
0645
0646 irq_handler_t shared_handler;
0647 char shared_name[IFNAMSIZ + 8];
0648
0649 bool link_up;
0650 spinlock_t link_status_lock;
0651
0652 spinlock_t reconfig_lock;
0653 u32 reconfig_posted;
0654 bool reconfig_timer_active;
0655 bool reconfig_sync_present;
0656 struct timer_list reconfig_timer;
0657 u32 reconfig_in_progress_update;
0658
0659 struct semaphore bar_lock;
0660
0661 bool rx_coalesce_adapt_on;
0662 bool tx_coalesce_adapt_on;
0663 u32 rx_coalesce_usecs;
0664 u32 rx_coalesce_max_frames;
0665 u32 tx_coalesce_usecs;
0666 u32 tx_coalesce_max_frames;
0667
0668 u8 __iomem *qcp_cfg;
0669
0670 u8 __iomem *tx_bar;
0671 u8 __iomem *rx_bar;
0672
0673 struct nfp_net_tlv_caps tlv_caps;
0674
0675 unsigned int ktls_tx_conn_cnt;
0676 unsigned int ktls_rx_conn_cnt;
0677
0678 atomic64_t ktls_conn_id_gen;
0679
0680 atomic_t ktls_no_space;
0681 atomic_t ktls_rx_resync_req;
0682 atomic_t ktls_rx_resync_ign;
0683 atomic_t ktls_rx_resync_sent;
0684
0685 struct {
0686 struct sk_buff_head queue;
0687 wait_queue_head_t wq;
0688 struct workqueue_struct *workq;
0689 struct work_struct wait_work;
0690 struct work_struct runq_work;
0691 u16 tag;
0692 } mbox_cmsg;
0693
0694 struct dentry *debugfs_dir;
0695
0696 struct list_head vnic_list;
0697
0698 struct pci_dev *pdev;
0699 struct nfp_app *app;
0700
0701 bool vnic_no_name;
0702
0703 struct nfp_port *port;
0704
0705 void *app_priv;
0706 };
0707
0708
0709
0710
0711 static inline u16 nn_readb(struct nfp_net *nn, int off)
0712 {
0713 return readb(nn->dp.ctrl_bar + off);
0714 }
0715
0716 static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
0717 {
0718 writeb(val, nn->dp.ctrl_bar + off);
0719 }
0720
0721 static inline u16 nn_readw(struct nfp_net *nn, int off)
0722 {
0723 return readw(nn->dp.ctrl_bar + off);
0724 }
0725
0726 static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
0727 {
0728 writew(val, nn->dp.ctrl_bar + off);
0729 }
0730
0731 static inline u32 nn_readl(struct nfp_net *nn, int off)
0732 {
0733 return readl(nn->dp.ctrl_bar + off);
0734 }
0735
0736 static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
0737 {
0738 writel(val, nn->dp.ctrl_bar + off);
0739 }
0740
0741 static inline u64 nn_readq(struct nfp_net *nn, int off)
0742 {
0743 return readq(nn->dp.ctrl_bar + off);
0744 }
0745
0746 static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
0747 {
0748 writeq(val, nn->dp.ctrl_bar + off);
0749 }
0750
0751
0752 static inline void nn_pci_flush(struct nfp_net *nn)
0753 {
0754 nn_readl(nn, NFP_NET_CFG_VERSION);
0755 }
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766 #define NFP_QCP_QUEUE_ADDR_SZ 0x800
0767 #define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
0768 #define NFP_QCP_QUEUE_ADD_RPTR 0x0000
0769 #define NFP_QCP_QUEUE_ADD_WPTR 0x0004
0770 #define NFP_QCP_QUEUE_STS_LO 0x0008
0771 #define NFP_QCP_QUEUE_STS_LO_READPTR_mask 0x3ffff
0772 #define NFP_QCP_QUEUE_STS_HI 0x000c
0773 #define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff
0774
0775
0776 enum nfp_qcp_ptr {
0777 NFP_QCP_READ_PTR = 0,
0778 NFP_QCP_WRITE_PTR
0779 };
0780
0781
0782
0783
0784
0785
0786
0787 static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
0788 {
0789 writel(val, q + NFP_QCP_QUEUE_ADD_RPTR);
0790 }
0791
0792
0793
0794
0795
0796
0797
0798 static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
0799 {
0800 writel(val, q + NFP_QCP_QUEUE_ADD_WPTR);
0801 }
0802
0803 static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
0804 {
0805 u32 off;
0806 u32 val;
0807
0808 if (ptr == NFP_QCP_READ_PTR)
0809 off = NFP_QCP_QUEUE_STS_LO;
0810 else
0811 off = NFP_QCP_QUEUE_STS_HI;
0812
0813 val = readl(q + off);
0814
0815 if (ptr == NFP_QCP_READ_PTR)
0816 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
0817 else
0818 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
0819 }
0820
0821
0822
0823
0824
0825
0826
0827 static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
0828 {
0829 return _nfp_qcp_read(q, NFP_QCP_READ_PTR);
0830 }
0831
0832
0833
0834
0835
0836
0837
0838 static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
0839 {
0840 return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
0841 }
0842
0843 u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue);
0844
0845 static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
0846 {
0847 WARN_ON_ONCE(!nn->dp.netdev && nn->port);
0848 return !!nn->dp.netdev;
0849 }
0850
0851 static inline bool nfp_net_running(struct nfp_net *nn)
0852 {
0853 return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
0854 }
0855
0856 static inline const char *nfp_net_name(struct nfp_net *nn)
0857 {
0858 return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
0859 }
0860
0861 static inline void nfp_ctrl_lock(struct nfp_net *nn)
0862 __acquires(&nn->r_vecs[0].lock)
0863 {
0864 spin_lock_bh(&nn->r_vecs[0].lock);
0865 }
0866
0867 static inline void nfp_ctrl_unlock(struct nfp_net *nn)
0868 __releases(&nn->r_vecs[0].lock)
0869 {
0870 spin_unlock_bh(&nn->r_vecs[0].lock);
0871 }
0872
0873 static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
0874 {
0875 down(&nn->bar_lock);
0876 }
0877
0878 static inline bool nn_ctrl_bar_trylock(struct nfp_net *nn)
0879 {
0880 return !down_trylock(&nn->bar_lock);
0881 }
0882
0883 static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
0884 {
0885 up(&nn->bar_lock);
0886 }
0887
0888
0889 extern const char nfp_driver_version[];
0890
0891 extern const struct net_device_ops nfp_nfd3_netdev_ops;
0892 extern const struct net_device_ops nfp_nfdk_netdev_ops;
0893
0894 static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
0895 {
0896 return netdev->netdev_ops == &nfp_nfd3_netdev_ops ||
0897 netdev->netdev_ops == &nfp_nfdk_netdev_ops;
0898 }
0899
0900 static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
0901 {
0902 if ((usecs >= ((1 << 16) - 1)) || (pkts >= ((1 << 16) - 1)))
0903 return -EINVAL;
0904
0905 return 0;
0906 }
0907
0908
0909 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
0910 void __iomem *ctrl_bar);
0911
0912 struct nfp_net *
0913 nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
0914 void __iomem *ctrl_bar, bool needs_netdev,
0915 unsigned int max_tx_rings, unsigned int max_rx_rings);
0916 void nfp_net_free(struct nfp_net *nn);
0917
0918 int nfp_net_init(struct nfp_net *nn);
0919 void nfp_net_clean(struct nfp_net *nn);
0920
0921 int nfp_ctrl_open(struct nfp_net *nn);
0922 void nfp_ctrl_close(struct nfp_net *nn);
0923
0924 void nfp_net_set_ethtool_ops(struct net_device *netdev);
0925 void nfp_net_info(struct nfp_net *nn);
0926 int __nfp_net_reconfig(struct nfp_net *nn, u32 update);
0927 int nfp_net_reconfig(struct nfp_net *nn, u32 update);
0928 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
0929 void nfp_net_rss_write_itbl(struct nfp_net *nn);
0930 void nfp_net_rss_write_key(struct nfp_net *nn);
0931 void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
0932 int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
0933 int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
0934 int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
0935 void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
0936 int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
0937
0938 unsigned int
0939 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
0940 unsigned int min_irqs, unsigned int want_irqs);
0941 void nfp_net_irqs_disable(struct pci_dev *pdev);
0942 void
0943 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
0944 unsigned int n);
0945 struct sk_buff *
0946 nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
0947 struct sk_buff *skb, u64 *tls_handle, int *nr_frags);
0948 void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle);
0949
0950 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
0951 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
0952 struct netlink_ext_ack *extack);
0953
0954 #ifdef CONFIG_NFP_DEBUG
0955 void nfp_net_debugfs_create(void);
0956 void nfp_net_debugfs_destroy(void);
0957 struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
0958 void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir);
0959 void nfp_net_debugfs_dir_clean(struct dentry **dir);
0960 #else
0961 static inline void nfp_net_debugfs_create(void)
0962 {
0963 }
0964
0965 static inline void nfp_net_debugfs_destroy(void)
0966 {
0967 }
0968
0969 static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
0970 {
0971 return NULL;
0972 }
0973
0974 static inline void
0975 nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
0976 {
0977 }
0978
0979 static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
0980 {
0981 }
0982 #endif
0983
0984 #endif