Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * net busy poll support
0004  * Copyright(c) 2013 Intel Corporation.
0005  *
0006  * Author: Eliezer Tamir
0007  *
0008  * Contact Information:
0009  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
0010  */
0011 
0012 #ifndef _LINUX_NET_BUSY_POLL_H
0013 #define _LINUX_NET_BUSY_POLL_H
0014 
0015 #include <linux/netdevice.h>
0016 #include <linux/sched/clock.h>
0017 #include <linux/sched/signal.h>
0018 #include <net/ip.h>
0019 
0020 /*      0 - Reserved to indicate value not set
0021  *     1..NR_CPUS - Reserved for sender_cpu
0022  *  NR_CPUS+1..~0 - Region available for NAPI IDs
0023  */
0024 #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
0025 
0026 #define BUSY_POLL_BUDGET 8
0027 
0028 #ifdef CONFIG_NET_RX_BUSY_POLL
0029 
0030 struct napi_struct;
0031 extern unsigned int sysctl_net_busy_read __read_mostly;
0032 extern unsigned int sysctl_net_busy_poll __read_mostly;
0033 
0034 static inline bool net_busy_loop_on(void)
0035 {
0036     return READ_ONCE(sysctl_net_busy_poll);
0037 }
0038 
0039 static inline bool sk_can_busy_loop(const struct sock *sk)
0040 {
0041     return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
0042 }
0043 
0044 bool sk_busy_loop_end(void *p, unsigned long start_time);
0045 
0046 void napi_busy_loop(unsigned int napi_id,
0047             bool (*loop_end)(void *, unsigned long),
0048             void *loop_end_arg, bool prefer_busy_poll, u16 budget);
0049 
0050 #else /* CONFIG_NET_RX_BUSY_POLL */
0051 static inline unsigned long net_busy_loop_on(void)
0052 {
0053     return 0;
0054 }
0055 
0056 static inline bool sk_can_busy_loop(struct sock *sk)
0057 {
0058     return false;
0059 }
0060 
0061 #endif /* CONFIG_NET_RX_BUSY_POLL */
0062 
0063 static inline unsigned long busy_loop_current_time(void)
0064 {
0065 #ifdef CONFIG_NET_RX_BUSY_POLL
0066     return (unsigned long)(local_clock() >> 10);
0067 #else
0068     return 0;
0069 #endif
0070 }
0071 
0072 /* in poll/select we use the global sysctl_net_ll_poll value */
0073 static inline bool busy_loop_timeout(unsigned long start_time)
0074 {
0075 #ifdef CONFIG_NET_RX_BUSY_POLL
0076     unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
0077 
0078     if (bp_usec) {
0079         unsigned long end_time = start_time + bp_usec;
0080         unsigned long now = busy_loop_current_time();
0081 
0082         return time_after(now, end_time);
0083     }
0084 #endif
0085     return true;
0086 }
0087 
0088 static inline bool sk_busy_loop_timeout(struct sock *sk,
0089                     unsigned long start_time)
0090 {
0091 #ifdef CONFIG_NET_RX_BUSY_POLL
0092     unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
0093 
0094     if (bp_usec) {
0095         unsigned long end_time = start_time + bp_usec;
0096         unsigned long now = busy_loop_current_time();
0097 
0098         return time_after(now, end_time);
0099     }
0100 #endif
0101     return true;
0102 }
0103 
0104 static inline void sk_busy_loop(struct sock *sk, int nonblock)
0105 {
0106 #ifdef CONFIG_NET_RX_BUSY_POLL
0107     unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
0108 
0109     if (napi_id >= MIN_NAPI_ID)
0110         napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk,
0111                    READ_ONCE(sk->sk_prefer_busy_poll),
0112                    READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET);
0113 #endif
0114 }
0115 
0116 /* used in the NIC receive handler to mark the skb */
0117 static inline void skb_mark_napi_id(struct sk_buff *skb,
0118                     struct napi_struct *napi)
0119 {
0120 #ifdef CONFIG_NET_RX_BUSY_POLL
0121     /* If the skb was already marked with a valid NAPI ID, avoid overwriting
0122      * it.
0123      */
0124     if (skb->napi_id < MIN_NAPI_ID)
0125         skb->napi_id = napi->napi_id;
0126 #endif
0127 }
0128 
0129 /* used in the protocol hanlder to propagate the napi_id to the socket */
0130 static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
0131 {
0132 #ifdef CONFIG_NET_RX_BUSY_POLL
0133     if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
0134         WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
0135 #endif
0136     sk_rx_queue_update(sk, skb);
0137 }
0138 
0139 /* Variant of sk_mark_napi_id() for passive flow setup,
0140  * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
0141  * needs to be set.
0142  */
0143 static inline void sk_mark_napi_id_set(struct sock *sk,
0144                        const struct sk_buff *skb)
0145 {
0146 #ifdef CONFIG_NET_RX_BUSY_POLL
0147     WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
0148 #endif
0149     sk_rx_queue_set(sk, skb);
0150 }
0151 
0152 static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
0153 {
0154 #ifdef CONFIG_NET_RX_BUSY_POLL
0155     if (!READ_ONCE(sk->sk_napi_id))
0156         WRITE_ONCE(sk->sk_napi_id, napi_id);
0157 #endif
0158 }
0159 
0160 /* variant used for unconnected sockets */
0161 static inline void sk_mark_napi_id_once(struct sock *sk,
0162                     const struct sk_buff *skb)
0163 {
0164 #ifdef CONFIG_NET_RX_BUSY_POLL
0165     __sk_mark_napi_id_once(sk, skb->napi_id);
0166 #endif
0167 }
0168 
0169 static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
0170                         const struct xdp_buff *xdp)
0171 {
0172 #ifdef CONFIG_NET_RX_BUSY_POLL
0173     __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
0174 #endif
0175 }
0176 
0177 #endif /* _LINUX_NET_BUSY_POLL_H */