0001
0002
0003
0004 #ifndef XSK_BUFF_POOL_H_
0005 #define XSK_BUFF_POOL_H_
0006
0007 #include <linux/if_xdp.h>
0008 #include <linux/types.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/bpf.h>
0011 #include <net/xdp.h>
0012
0013 struct xsk_buff_pool;
0014 struct xdp_rxq_info;
0015 struct xsk_queue;
0016 struct xdp_desc;
0017 struct xdp_umem;
0018 struct xdp_sock;
0019 struct device;
0020 struct page;
0021
0022 struct xdp_buff_xsk {
0023 struct xdp_buff xdp;
0024 dma_addr_t dma;
0025 dma_addr_t frame_dma;
0026 struct xsk_buff_pool *pool;
0027 u64 orig_addr;
0028 struct list_head free_list_node;
0029 };
0030
0031 struct xsk_dma_map {
0032 dma_addr_t *dma_pages;
0033 struct device *dev;
0034 struct net_device *netdev;
0035 refcount_t users;
0036 struct list_head list;
0037 u32 dma_pages_cnt;
0038 bool dma_need_sync;
0039 };
0040
0041 struct xsk_buff_pool {
0042
0043 struct device *dev;
0044 struct net_device *netdev;
0045 struct list_head xsk_tx_list;
0046
0047 spinlock_t xsk_tx_list_lock;
0048 refcount_t users;
0049 struct xdp_umem *umem;
0050 struct work_struct work;
0051 struct list_head free_list;
0052 u32 heads_cnt;
0053 u16 queue_id;
0054
0055
0056 struct xsk_queue *fq ____cacheline_aligned_in_smp;
0057 struct xsk_queue *cq;
0058
0059
0060
0061 dma_addr_t *dma_pages;
0062 struct xdp_buff_xsk *heads;
0063 struct xdp_desc *tx_descs;
0064 u64 chunk_mask;
0065 u64 addrs_cnt;
0066 u32 free_list_cnt;
0067 u32 dma_pages_cnt;
0068 u32 free_heads_cnt;
0069 u32 headroom;
0070 u32 chunk_size;
0071 u32 chunk_shift;
0072 u32 frame_len;
0073 u8 cached_need_wakeup;
0074 bool uses_need_wakeup;
0075 bool dma_need_sync;
0076 bool unaligned;
0077 void *addrs;
0078
0079
0080
0081
0082 spinlock_t cq_lock;
0083 struct xdp_buff_xsk *free_heads[];
0084 };
0085
0086
0087
0088
0089
0090 #define XSK_NEXT_PG_CONTIG_SHIFT 0
0091 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
0092
0093
0094 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
0095 struct xdp_umem *umem);
0096 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
0097 u16 queue_id, u16 flags);
0098 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
0099 struct net_device *dev, u16 queue_id);
0100 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
0101 void xp_destroy(struct xsk_buff_pool *pool);
0102 void xp_get_pool(struct xsk_buff_pool *pool);
0103 bool xp_put_pool(struct xsk_buff_pool *pool);
0104 void xp_clear_dev(struct xsk_buff_pool *pool);
0105 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
0106 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
0107
0108
0109 void xp_free(struct xdp_buff_xsk *xskb);
0110
0111 static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
0112 u64 addr)
0113 {
0114 xskb->orig_addr = addr;
0115 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
0116 }
0117
0118 static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
0119 dma_addr_t *dma_pages, u64 addr)
0120 {
0121 xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
0122 (addr & ~PAGE_MASK);
0123 xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
0124 }
0125
0126
0127 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
0128 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
0129 unsigned long attrs, struct page **pages, u32 nr_pages);
0130 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
0131 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
0132 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
0133 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
0134 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
0135 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
0136 static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
0137 {
0138 return xskb->dma;
0139 }
0140
0141 static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
0142 {
0143 return xskb->frame_dma;
0144 }
0145
0146 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
0147 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
0148 {
0149 xp_dma_sync_for_cpu_slow(xskb);
0150 }
0151
0152 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
0153 size_t size);
0154 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
0155 dma_addr_t dma, size_t size)
0156 {
0157 if (!pool->dma_need_sync)
0158 return;
0159
0160 xp_dma_sync_for_device_slow(pool, dma, size);
0161 }
0162
0163
0164
0165
0166
0167 #define XSK_NEXT_PG_CONTIG_SHIFT 0
0168 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
0169
0170 static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
0171 u64 addr, u32 len)
0172 {
0173 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
0174
0175 if (likely(!cross_pg))
0176 return false;
0177
0178 if (pool->dma_pages_cnt) {
0179 return !(pool->dma_pages[addr >> PAGE_SHIFT] &
0180 XSK_NEXT_PG_CONTIG_MASK);
0181 }
0182
0183
0184 return addr + len > pool->addrs_cnt;
0185 }
0186
0187 static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
0188 {
0189 return addr & pool->chunk_mask;
0190 }
0191
0192 static inline u64 xp_unaligned_extract_addr(u64 addr)
0193 {
0194 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
0195 }
0196
0197 static inline u64 xp_unaligned_extract_offset(u64 addr)
0198 {
0199 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
0200 }
0201
0202 static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
0203 {
0204 return xp_unaligned_extract_addr(addr) +
0205 xp_unaligned_extract_offset(addr);
0206 }
0207
0208 static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
0209 {
0210 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
0211 }
0212
0213 static inline void xp_release(struct xdp_buff_xsk *xskb)
0214 {
0215 if (xskb->pool->unaligned)
0216 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
0217 }
0218
0219 static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
0220 {
0221 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
0222
0223 offset += xskb->pool->headroom;
0224 if (!xskb->pool->unaligned)
0225 return xskb->orig_addr + offset;
0226 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
0227 }
0228
0229 #endif