Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* AF_XDP internal functions
0003  * Copyright(c) 2018 Intel Corporation.
0004  */
0005 
0006 #ifndef _LINUX_XDP_SOCK_H
0007 #define _LINUX_XDP_SOCK_H
0008 
0009 #include <linux/bpf.h>
0010 #include <linux/workqueue.h>
0011 #include <linux/if_xdp.h>
0012 #include <linux/mutex.h>
0013 #include <linux/spinlock.h>
0014 #include <linux/mm.h>
0015 #include <net/sock.h>
0016 
0017 struct net_device;
0018 struct xsk_queue;
0019 struct xdp_buff;
0020 
0021 struct xdp_umem {
0022     void *addrs;
0023     u64 size;
0024     u32 headroom;
0025     u32 chunk_size;
0026     u32 chunks;
0027     u32 npgs;
0028     struct user_struct *user;
0029     refcount_t users;
0030     u8 flags;
0031     bool zc;
0032     struct page **pgs;
0033     int id;
0034     struct list_head xsk_dma_list;
0035     struct work_struct work;
0036 };
0037 
0038 struct xsk_map {
0039     struct bpf_map map;
0040     spinlock_t lock; /* Synchronize map updates */
0041     struct xdp_sock __rcu *xsk_map[];
0042 };
0043 
0044 struct xdp_sock {
0045     /* struct sock must be the first member of struct xdp_sock */
0046     struct sock sk;
0047     struct xsk_queue *rx ____cacheline_aligned_in_smp;
0048     struct net_device *dev;
0049     struct xdp_umem *umem;
0050     struct list_head flush_node;
0051     struct xsk_buff_pool *pool;
0052     u16 queue_id;
0053     bool zc;
0054     enum {
0055         XSK_READY = 0,
0056         XSK_BOUND,
0057         XSK_UNBOUND,
0058     } state;
0059 
0060     struct xsk_queue *tx ____cacheline_aligned_in_smp;
0061     struct list_head tx_list;
0062     /* Protects generic receive. */
0063     spinlock_t rx_lock;
0064 
0065     /* Statistics */
0066     u64 rx_dropped;
0067     u64 rx_queue_full;
0068 
0069     struct list_head map_list;
0070     /* Protects map_list */
0071     spinlock_t map_list_lock;
0072     /* Protects multiple processes in the control path */
0073     struct mutex mutex;
0074     struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
0075     struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
0076 };
0077 
0078 #ifdef CONFIG_XDP_SOCKETS
0079 
0080 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
0081 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
0082 void __xsk_map_flush(void);
0083 
0084 #else
0085 
0086 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
0087 {
0088     return -ENOTSUPP;
0089 }
0090 
0091 static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
0092 {
0093     return -EOPNOTSUPP;
0094 }
0095 
0096 static inline void __xsk_map_flush(void)
0097 {
0098 }
0099 
0100 #endif /* CONFIG_XDP_SOCKETS */
0101 
0102 #endif /* _LINUX_XDP_SOCK_H */