0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef __XSK_H
0013 #define __XSK_H
0014
0015 #include <stdio.h>
0016 #include <stdint.h>
0017 #include <stdbool.h>
0018 #include <linux/if_xdp.h>
0019
0020 #include <bpf/libbpf.h>
0021
0022 #ifdef __cplusplus
0023 extern "C" {
0024 #endif
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #define __XSK_READ_ONCE(x) (*(volatile typeof(x) *)&x)
0040 #define __XSK_WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
0041
0042 #if defined(__i386__) || defined(__x86_64__)
0043 # define libbpf_smp_store_release(p, v) \
0044 do { \
0045 asm volatile("" : : : "memory"); \
0046 __XSK_WRITE_ONCE(*p, v); \
0047 } while (0)
0048 # define libbpf_smp_load_acquire(p) \
0049 ({ \
0050 typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
0051 asm volatile("" : : : "memory"); \
0052 ___p1; \
0053 })
0054 #elif defined(__aarch64__)
0055 # define libbpf_smp_store_release(p, v) \
0056 asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory")
0057 # define libbpf_smp_load_acquire(p) \
0058 ({ \
0059 typeof(*p) ___p1; \
0060 asm volatile ("ldar %w0, %1" \
0061 : "=r" (___p1) : "Q" (*p) : "memory"); \
0062 ___p1; \
0063 })
0064 #elif defined(__riscv)
0065 # define libbpf_smp_store_release(p, v) \
0066 do { \
0067 asm volatile ("fence rw,w" : : : "memory"); \
0068 __XSK_WRITE_ONCE(*p, v); \
0069 } while (0)
0070 # define libbpf_smp_load_acquire(p) \
0071 ({ \
0072 typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
0073 asm volatile ("fence r,rw" : : : "memory"); \
0074 ___p1; \
0075 })
0076 #endif
0077
0078 #ifndef libbpf_smp_store_release
0079 #define libbpf_smp_store_release(p, v) \
0080 do { \
0081 __sync_synchronize(); \
0082 __XSK_WRITE_ONCE(*p, v); \
0083 } while (0)
0084 #endif
0085
0086 #ifndef libbpf_smp_load_acquire
0087 #define libbpf_smp_load_acquire(p) \
0088 ({ \
0089 typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
0090 __sync_synchronize(); \
0091 ___p1; \
0092 })
0093 #endif
0094
0095
0096
0097
0098 #define DEFINE_XSK_RING(name) \
0099 struct name { \
0100 __u32 cached_prod; \
0101 __u32 cached_cons; \
0102 __u32 mask; \
0103 __u32 size; \
0104 __u32 *producer; \
0105 __u32 *consumer; \
0106 void *ring; \
0107 __u32 *flags; \
0108 }
0109
0110 DEFINE_XSK_RING(xsk_ring_prod);
0111 DEFINE_XSK_RING(xsk_ring_cons);
0112
0113
0114
0115
0116
0117 struct xsk_umem;
0118 struct xsk_socket;
0119
0120 static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
0121 __u32 idx)
0122 {
0123 __u64 *addrs = (__u64 *)fill->ring;
0124
0125 return &addrs[idx & fill->mask];
0126 }
0127
0128 static inline const __u64 *
0129 xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
0130 {
0131 const __u64 *addrs = (const __u64 *)comp->ring;
0132
0133 return &addrs[idx & comp->mask];
0134 }
0135
0136 static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
0137 __u32 idx)
0138 {
0139 struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
0140
0141 return &descs[idx & tx->mask];
0142 }
0143
0144 static inline const struct xdp_desc *
0145 xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
0146 {
0147 const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
0148
0149 return &descs[idx & rx->mask];
0150 }
0151
0152 static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
0153 {
0154 return *r->flags & XDP_RING_NEED_WAKEUP;
0155 }
0156
0157 static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
0158 {
0159 __u32 free_entries = r->cached_cons - r->cached_prod;
0160
0161 if (free_entries >= nb)
0162 return free_entries;
0163
0164
0165
0166
0167
0168
0169
0170
0171 r->cached_cons = libbpf_smp_load_acquire(r->consumer);
0172 r->cached_cons += r->size;
0173
0174 return r->cached_cons - r->cached_prod;
0175 }
0176
0177 static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
0178 {
0179 __u32 entries = r->cached_prod - r->cached_cons;
0180
0181 if (entries == 0) {
0182 r->cached_prod = libbpf_smp_load_acquire(r->producer);
0183 entries = r->cached_prod - r->cached_cons;
0184 }
0185
0186 return (entries > nb) ? nb : entries;
0187 }
0188
0189 static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx)
0190 {
0191 if (xsk_prod_nb_free(prod, nb) < nb)
0192 return 0;
0193
0194 *idx = prod->cached_prod;
0195 prod->cached_prod += nb;
0196
0197 return nb;
0198 }
0199
0200 static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
0201 {
0202
0203
0204
0205 libbpf_smp_store_release(prod->producer, *prod->producer + nb);
0206 }
0207
0208 static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
0209 {
0210 __u32 entries = xsk_cons_nb_avail(cons, nb);
0211
0212 if (entries > 0) {
0213 *idx = cons->cached_cons;
0214 cons->cached_cons += entries;
0215 }
0216
0217 return entries;
0218 }
0219
0220 static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb)
0221 {
0222 cons->cached_cons -= nb;
0223 }
0224
0225 static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
0226 {
0227
0228
0229
0230 libbpf_smp_store_release(cons->consumer, *cons->consumer + nb);
0231
0232 }
0233
0234 static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
0235 {
0236 return &((char *)umem_area)[addr];
0237 }
0238
0239 static inline __u64 xsk_umem__extract_addr(__u64 addr)
0240 {
0241 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
0242 }
0243
0244 static inline __u64 xsk_umem__extract_offset(__u64 addr)
0245 {
0246 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
0247 }
0248
0249 static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
0250 {
0251 return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
0252 }
0253
0254 int xsk_umem__fd(const struct xsk_umem *umem);
0255 int xsk_socket__fd(const struct xsk_socket *xsk);
0256
0257 #define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
0258 #define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
0259 #define XSK_UMEM__DEFAULT_FRAME_SHIFT 12
0260 #define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
0261 #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
0262 #define XSK_UMEM__DEFAULT_FLAGS 0
0263
0264 struct xsk_umem_config {
0265 __u32 fill_size;
0266 __u32 comp_size;
0267 __u32 frame_size;
0268 __u32 frame_headroom;
0269 __u32 flags;
0270 };
0271
0272 int xsk_setup_xdp_prog_xsk(struct xsk_socket *xsk, int *xsks_map_fd);
0273 int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd);
0274 int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd);
0275
0276
0277 #define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
0278
0279 struct xsk_socket_config {
0280 __u32 rx_size;
0281 __u32 tx_size;
0282 __u32 libbpf_flags;
0283 __u32 xdp_flags;
0284 __u16 bind_flags;
0285 };
0286
0287
0288 int xsk_umem__create(struct xsk_umem **umem,
0289 void *umem_area, __u64 size,
0290 struct xsk_ring_prod *fill,
0291 struct xsk_ring_cons *comp,
0292 const struct xsk_umem_config *config);
0293 int xsk_socket__create(struct xsk_socket **xsk,
0294 const char *ifname, __u32 queue_id,
0295 struct xsk_umem *umem,
0296 struct xsk_ring_cons *rx,
0297 struct xsk_ring_prod *tx,
0298 const struct xsk_socket_config *config);
0299 int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
0300 const char *ifname,
0301 __u32 queue_id, struct xsk_umem *umem,
0302 struct xsk_ring_cons *rx,
0303 struct xsk_ring_prod *tx,
0304 struct xsk_ring_prod *fill,
0305 struct xsk_ring_cons *comp,
0306 const struct xsk_socket_config *config);
0307
0308
0309 int xsk_umem__delete(struct xsk_umem *umem);
0310 void xsk_socket__delete(struct xsk_socket *xsk);
0311
0312 #ifdef __cplusplus
0313 }
0314 #endif
0315
0316 #endif