Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
0002 /* Copyright (C) 2019 Netronome Systems, Inc. */
0003 
0004 #ifndef _NFP_NET_DP_
0005 #define _NFP_NET_DP_
0006 
0007 #include "nfp_net.h"
0008 
0009 static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
0010 {
0011     return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
0012                     dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
0013                     dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
0014 }
0015 
0016 static inline void
0017 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
0018 {
0019     dma_sync_single_for_device(dp->dev, dma_addr,
0020                    dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
0021                    dp->rx_dma_dir);
0022 }
0023 
0024 static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
0025                     dma_addr_t dma_addr)
0026 {
0027     dma_unmap_single_attrs(dp->dev, dma_addr,
0028                    dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
0029                    dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
0030 }
0031 
0032 static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
0033                        dma_addr_t dma_addr,
0034                        unsigned int len)
0035 {
0036     dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
0037                 len, dp->rx_dma_dir);
0038 }
0039 
0040 /**
0041  * nfp_net_tx_full() - check if the TX ring is full
0042  * @tx_ring: TX ring to check
0043  * @dcnt:    Number of descriptors that need to be enqueued (must be >= 1)
0044  *
0045  * This function checks, based on the *host copy* of read/write
0046  * pointer if a given TX ring is full.  The real TX queue may have
0047  * some newly made available slots.
0048  *
0049  * Return: True if the ring is full.
0050  */
0051 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
0052 {
0053     return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
0054 }
0055 
0056 static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
0057 {
0058     wmb(); /* drain writebuffer */
0059     nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
0060     tx_ring->wr_ptr_add = 0;
0061 }
0062 
0063 static inline u32
0064 nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
0065 {
0066     if (tx_ring->txrwb)
0067         return *tx_ring->txrwb;
0068     return nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
0069 }
0070 
0071 static inline void nfp_net_free_frag(void *frag, bool xdp)
0072 {
0073     if (!xdp)
0074         skb_free_frag(frag);
0075     else
0076         __free_page(virt_to_page(frag));
0077 }
0078 
0079 /**
0080  * nfp_net_irq_unmask() - Unmask automasked interrupt
0081  * @nn:       NFP Network structure
0082  * @entry_nr: MSI-X table entry
0083  *
0084  * Clear the ICR for the IRQ entry.
0085  */
0086 static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
0087 {
0088     nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
0089     nn_pci_flush(nn);
0090 }
0091 
0092 struct seq_file;
0093 
0094 /* Common */
0095 void
0096 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
0097                  struct nfp_net_rx_ring *rx_ring, unsigned int idx);
0098 void
0099 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
0100                  struct nfp_net_tx_ring *tx_ring, unsigned int idx);
0101 void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
0102 
0103 void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
0104 int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
0105 int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
0106 void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
0107 void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
0108 void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
0109 bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
0110             const struct nfp_meta_parsed *meta);
0111 
0112 enum nfp_nfd_version {
0113     NFP_NFD_VER_NFD3,
0114     NFP_NFD_VER_NFDK,
0115 };
0116 
0117 /**
0118  * struct nfp_dp_ops - Hooks to wrap different implementation of different dp
0119  * @version:            Indicate dp type
0120  * @tx_min_desc_per_pkt:    Minimal TX descs needed for each packet
0121  * @cap_mask:           Mask of supported features
0122  * @dma_mask:           DMA addressing capability
0123  * @poll:           Napi poll for normal rx/tx
0124  * @xsk_poll:           Napi poll when xsk is enabled
0125  * @ctrl_poll:          Tasklet poll for ctrl rx/tx
0126  * @xmit:           Xmit for normal path
0127  * @ctrl_tx_one:        Xmit for ctrl path
0128  * @rx_ring_fill_freelist:  Give buffers from the ring to FW
0129  * @tx_ring_alloc:      Allocate resource for a TX ring
0130  * @tx_ring_reset:      Free any untransmitted buffers and reset pointers
0131  * @tx_ring_free:       Free resources allocated to a TX ring
0132  * @tx_ring_bufs_alloc:     Allocate resource for each TX buffer
0133  * @tx_ring_bufs_free:      Free resources allocated to each TX buffer
0134  * @print_tx_descs:     Show TX ring's info for debug purpose
0135  */
0136 struct nfp_dp_ops {
0137     enum nfp_nfd_version version;
0138     unsigned int tx_min_desc_per_pkt;
0139     u32 cap_mask;
0140     u64 dma_mask;
0141 
0142     int (*poll)(struct napi_struct *napi, int budget);
0143     int (*xsk_poll)(struct napi_struct *napi, int budget);
0144     void (*ctrl_poll)(struct tasklet_struct *t);
0145     netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
0146     bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
0147                 struct sk_buff *skb, bool old);
0148     void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
0149                       struct nfp_net_rx_ring *rx_ring);
0150     int (*tx_ring_alloc)(struct nfp_net_dp *dp,
0151                  struct nfp_net_tx_ring *tx_ring);
0152     void (*tx_ring_reset)(struct nfp_net_dp *dp,
0153                   struct nfp_net_tx_ring *tx_ring);
0154     void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
0155     int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
0156                   struct nfp_net_tx_ring *tx_ring);
0157     void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
0158                   struct nfp_net_tx_ring *tx_ring);
0159 
0160     void (*print_tx_descs)(struct seq_file *file,
0161                    struct nfp_net_r_vector *r_vec,
0162                    struct nfp_net_tx_ring *tx_ring,
0163                    u32 d_rd_p, u32 d_wr_p);
0164 };
0165 
0166 static inline void
0167 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
0168 {
0169     return dp->ops->tx_ring_reset(dp, tx_ring);
0170 }
0171 
0172 static inline void
0173 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
0174                   struct nfp_net_rx_ring *rx_ring)
0175 {
0176     dp->ops->rx_ring_fill_freelist(dp, rx_ring);
0177 }
0178 
0179 static inline int
0180 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
0181 {
0182     return dp->ops->tx_ring_alloc(dp, tx_ring);
0183 }
0184 
0185 static inline void
0186 nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
0187 {
0188     dp->ops->tx_ring_free(tx_ring);
0189 }
0190 
0191 static inline int
0192 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
0193                struct nfp_net_tx_ring *tx_ring)
0194 {
0195     return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
0196 }
0197 
0198 static inline void
0199 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
0200               struct nfp_net_tx_ring *tx_ring)
0201 {
0202     dp->ops->tx_ring_bufs_free(dp, tx_ring);
0203 }
0204 
0205 static inline void
0206 nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
0207                    struct nfp_net_r_vector *r_vec,
0208                    struct nfp_net_tx_ring *tx_ring,
0209                    u32 d_rd_p, u32 d_wr_p)
0210 {
0211     dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
0212 }
0213 
0214 extern const struct nfp_dp_ops nfp_nfd3_ops;
0215 extern const struct nfp_dp_ops nfp_nfdk_ops;
0216 
0217 netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
0218 
0219 #endif /* _NFP_NET_DP_ */