0001
0002
0003
0004
0005
0006 #ifndef __LINUX_NET_XDP_H__
0007 #define __LINUX_NET_XDP_H__
0008
0009 #include <linux/skbuff.h> /* skb_shared_info */
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 enum xdp_mem_type {
0039 MEM_TYPE_PAGE_SHARED = 0,
0040 MEM_TYPE_PAGE_ORDER0,
0041 MEM_TYPE_PAGE_POOL,
0042 MEM_TYPE_XSK_BUFF_POOL,
0043 MEM_TYPE_MAX,
0044 };
0045
0046
0047 #define XDP_XMIT_FLUSH (1U << 0)
0048 #define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH
0049
0050 struct xdp_mem_info {
0051 u32 type;
0052 u32 id;
0053 };
0054
0055 struct page_pool;
0056
0057 struct xdp_rxq_info {
0058 struct net_device *dev;
0059 u32 queue_index;
0060 u32 reg_state;
0061 struct xdp_mem_info mem;
0062 unsigned int napi_id;
0063 u32 frag_size;
0064 } ____cacheline_aligned;
0065
0066 struct xdp_txq_info {
0067 struct net_device *dev;
0068 };
0069
0070 enum xdp_buff_flags {
0071 XDP_FLAGS_HAS_FRAGS = BIT(0),
0072 XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1),
0073
0074
0075 };
0076
0077 struct xdp_buff {
0078 void *data;
0079 void *data_end;
0080 void *data_meta;
0081 void *data_hard_start;
0082 struct xdp_rxq_info *rxq;
0083 struct xdp_txq_info *txq;
0084 u32 frame_sz;
0085 u32 flags;
0086 };
0087
0088 static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
0089 {
0090 return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
0091 }
0092
0093 static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
0094 {
0095 xdp->flags |= XDP_FLAGS_HAS_FRAGS;
0096 }
0097
0098 static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
0099 {
0100 xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
0101 }
0102
0103 static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp)
0104 {
0105 return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
0106 }
0107
0108 static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
0109 {
0110 xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
0111 }
0112
0113 static __always_inline void
0114 xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
0115 {
0116 xdp->frame_sz = frame_sz;
0117 xdp->rxq = rxq;
0118 xdp->flags = 0;
0119 }
0120
0121 static __always_inline void
0122 xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
0123 int headroom, int data_len, const bool meta_valid)
0124 {
0125 unsigned char *data = hard_start + headroom;
0126
0127 xdp->data_hard_start = hard_start;
0128 xdp->data = data;
0129 xdp->data_end = data + data_len;
0130 xdp->data_meta = meta_valid ? data : data + 1;
0131 }
0132
0133
0134
0135
0136
0137
0138
0139 #define xdp_data_hard_end(xdp) \
0140 ((xdp)->data_hard_start + (xdp)->frame_sz - \
0141 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
0142
0143 static inline struct skb_shared_info *
0144 xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
0145 {
0146 return (struct skb_shared_info *)xdp_data_hard_end(xdp);
0147 }
0148
0149 static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp)
0150 {
0151 unsigned int len = xdp->data_end - xdp->data;
0152 struct skb_shared_info *sinfo;
0153
0154 if (likely(!xdp_buff_has_frags(xdp)))
0155 goto out;
0156
0157 sinfo = xdp_get_shared_info_from_buff(xdp);
0158 len += sinfo->xdp_frags_size;
0159 out:
0160 return len;
0161 }
0162
0163 struct xdp_frame {
0164 void *data;
0165 u16 len;
0166 u16 headroom;
0167 u32 metasize:8;
0168 u32 frame_sz:24;
0169
0170
0171
0172 struct xdp_mem_info mem;
0173 struct net_device *dev_rx;
0174 u32 flags;
0175 };
0176
0177 static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
0178 {
0179 return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
0180 }
0181
0182 static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame)
0183 {
0184 return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
0185 }
0186
0187 #define XDP_BULK_QUEUE_SIZE 16
0188 struct xdp_frame_bulk {
0189 int count;
0190 void *xa;
0191 void *q[XDP_BULK_QUEUE_SIZE];
0192 };
0193
0194 static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
0195 {
0196
0197 bq->xa = NULL;
0198 }
0199
0200 static inline struct skb_shared_info *
0201 xdp_get_shared_info_from_frame(struct xdp_frame *frame)
0202 {
0203 void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
0204
0205 return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
0206 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
0207 }
0208
0209 struct xdp_cpumap_stats {
0210 unsigned int redirect;
0211 unsigned int pass;
0212 unsigned int drop;
0213 };
0214
0215
0216 static inline void xdp_scrub_frame(struct xdp_frame *frame)
0217 {
0218 frame->data = NULL;
0219 frame->dev_rx = NULL;
0220 }
0221
0222 static inline void
0223 xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
0224 unsigned int size, unsigned int truesize,
0225 bool pfmemalloc)
0226 {
0227 skb_shinfo(skb)->nr_frags = nr_frags;
0228
0229 skb->len += size;
0230 skb->data_len += size;
0231 skb->truesize += truesize;
0232 skb->pfmemalloc |= pfmemalloc;
0233 }
0234
0235
0236 void xdp_warn(const char *msg, const char *func, const int line);
0237 #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
0238
0239 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
0240 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
0241 struct sk_buff *skb,
0242 struct net_device *dev);
0243 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
0244 struct net_device *dev);
0245 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
0246 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);
0247
0248 static inline
0249 void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
0250 {
0251 xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
0252 xdp->data = frame->data;
0253 xdp->data_end = frame->data + frame->len;
0254 xdp->data_meta = frame->data - frame->metasize;
0255 xdp->frame_sz = frame->frame_sz;
0256 xdp->flags = frame->flags;
0257 }
0258
0259 static inline
0260 int xdp_update_frame_from_buff(struct xdp_buff *xdp,
0261 struct xdp_frame *xdp_frame)
0262 {
0263 int metasize, headroom;
0264
0265
0266 headroom = xdp->data - xdp->data_hard_start;
0267 metasize = xdp->data - xdp->data_meta;
0268 metasize = metasize > 0 ? metasize : 0;
0269 if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
0270 return -ENOSPC;
0271
0272
0273 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
0274 XDP_WARN("Driver BUG: missing reserved tailroom");
0275 return -ENOSPC;
0276 }
0277
0278 xdp_frame->data = xdp->data;
0279 xdp_frame->len = xdp->data_end - xdp->data;
0280 xdp_frame->headroom = headroom - sizeof(*xdp_frame);
0281 xdp_frame->metasize = metasize;
0282 xdp_frame->frame_sz = xdp->frame_sz;
0283 xdp_frame->flags = xdp->flags;
0284
0285 return 0;
0286 }
0287
0288
0289 static inline
0290 struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
0291 {
0292 struct xdp_frame *xdp_frame;
0293
0294 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
0295 return xdp_convert_zc_to_xdp_frame(xdp);
0296
0297
0298 xdp_frame = xdp->data_hard_start;
0299 if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
0300 return NULL;
0301
0302
0303 xdp_frame->mem = xdp->rxq->mem;
0304
0305 return xdp_frame;
0306 }
0307
0308 void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
0309 struct xdp_buff *xdp);
0310 void xdp_return_frame(struct xdp_frame *xdpf);
0311 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
0312 void xdp_return_buff(struct xdp_buff *xdp);
0313 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
0314 void xdp_return_frame_bulk(struct xdp_frame *xdpf,
0315 struct xdp_frame_bulk *bq);
0316
0317
0318
0319
0320
0321
0322 void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
0323 static inline void xdp_release_frame(struct xdp_frame *xdpf)
0324 {
0325 struct xdp_mem_info *mem = &xdpf->mem;
0326 struct skb_shared_info *sinfo;
0327 int i;
0328
0329
0330 if (mem->type != MEM_TYPE_PAGE_POOL)
0331 return;
0332
0333 if (likely(!xdp_frame_has_frags(xdpf)))
0334 goto out;
0335
0336 sinfo = xdp_get_shared_info_from_frame(xdpf);
0337 for (i = 0; i < sinfo->nr_frags; i++) {
0338 struct page *page = skb_frag_page(&sinfo->frags[i]);
0339
0340 __xdp_release_frame(page_address(page), mem);
0341 }
0342 out:
0343 __xdp_release_frame(xdpf->data, mem);
0344 }
0345
0346 static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
0347 {
0348 struct skb_shared_info *sinfo;
0349 unsigned int len = xdpf->len;
0350
0351 if (likely(!xdp_frame_has_frags(xdpf)))
0352 goto out;
0353
0354 sinfo = xdp_get_shared_info_from_frame(xdpf);
0355 len += sinfo->xdp_frags_size;
0356 out:
0357 return len;
0358 }
0359
0360 int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
0361 struct net_device *dev, u32 queue_index,
0362 unsigned int napi_id, u32 frag_size);
0363 static inline int
0364 xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
0365 struct net_device *dev, u32 queue_index,
0366 unsigned int napi_id)
0367 {
0368 return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0);
0369 }
0370
0371 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
0372 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
0373 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
0374 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
0375 enum xdp_mem_type type, void *allocator);
0376 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
0377 int xdp_reg_mem_model(struct xdp_mem_info *mem,
0378 enum xdp_mem_type type, void *allocator);
0379 void xdp_unreg_mem_model(struct xdp_mem_info *mem);
0380
0381
0382
0383
0384 static __always_inline void
0385 xdp_set_data_meta_invalid(struct xdp_buff *xdp)
0386 {
0387 xdp->data_meta = xdp->data + 1;
0388 }
0389
0390 static __always_inline bool
0391 xdp_data_meta_unsupported(const struct xdp_buff *xdp)
0392 {
0393 return unlikely(xdp->data_meta > xdp->data);
0394 }
0395
0396 static inline bool xdp_metalen_invalid(unsigned long metalen)
0397 {
0398 return (metalen & (sizeof(__u32) - 1)) || (metalen > 32);
0399 }
0400
0401 struct xdp_attachment_info {
0402 struct bpf_prog *prog;
0403 u32 flags;
0404 };
0405
0406 struct netdev_bpf;
0407 void xdp_attachment_setup(struct xdp_attachment_info *info,
0408 struct netdev_bpf *bpf);
0409
0410 #define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
0411
0412 #endif