0001
0002
0003
0004
0005
0006 #ifndef _WG_QUEUEING_H
0007 #define _WG_QUEUEING_H
0008
0009 #include "peer.h"
0010 #include <linux/types.h>
0011 #include <linux/skbuff.h>
0012 #include <linux/ip.h>
0013 #include <linux/ipv6.h>
0014 #include <net/ip_tunnels.h>
0015
0016 struct wg_device;
0017 struct wg_peer;
0018 struct multicore_worker;
0019 struct crypt_queue;
0020 struct prev_queue;
0021 struct sk_buff;
0022
0023
0024 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
0025 unsigned int len);
0026 void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
0027 struct multicore_worker __percpu *
0028 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
0029
0030
0031 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
0032 void wg_packet_handshake_receive_worker(struct work_struct *work);
0033
0034 int wg_packet_rx_poll(struct napi_struct *napi, int budget);
0035
0036 void wg_packet_decrypt_worker(struct work_struct *work);
0037
0038
0039 void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
0040 bool is_retry);
0041 void wg_packet_send_handshake_response(struct wg_peer *peer);
0042 void wg_packet_send_handshake_cookie(struct wg_device *wg,
0043 struct sk_buff *initiating_skb,
0044 __le32 sender_index);
0045 void wg_packet_send_keepalive(struct wg_peer *peer);
0046 void wg_packet_purge_staged_packets(struct wg_peer *peer);
0047 void wg_packet_send_staged_packets(struct wg_peer *peer);
0048
0049 void wg_packet_handshake_send_worker(struct work_struct *work);
0050 void wg_packet_tx_worker(struct work_struct *work);
0051 void wg_packet_encrypt_worker(struct work_struct *work);
0052
0053 enum packet_state {
0054 PACKET_STATE_UNCRYPTED,
0055 PACKET_STATE_CRYPTED,
0056 PACKET_STATE_DEAD
0057 };
0058
0059 struct packet_cb {
0060 u64 nonce;
0061 struct noise_keypair *keypair;
0062 atomic_t state;
0063 u32 mtu;
0064 u8 ds;
0065 };
0066
0067 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
0068 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
0069
0070 static inline bool wg_check_packet_protocol(struct sk_buff *skb)
0071 {
0072 __be16 real_protocol = ip_tunnel_parse_protocol(skb);
0073 return real_protocol && skb->protocol == real_protocol;
0074 }
0075
0076 static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
0077 {
0078 u8 l4_hash = skb->l4_hash;
0079 u8 sw_hash = skb->sw_hash;
0080 u32 hash = skb->hash;
0081 skb_scrub_packet(skb, true);
0082 memset(&skb->headers, 0, sizeof(skb->headers));
0083 if (encapsulating) {
0084 skb->l4_hash = l4_hash;
0085 skb->sw_hash = sw_hash;
0086 skb->hash = hash;
0087 }
0088 skb->queue_mapping = 0;
0089 skb->nohdr = 0;
0090 skb->peeked = 0;
0091 skb->mac_len = 0;
0092 skb->dev = NULL;
0093 #ifdef CONFIG_NET_SCHED
0094 skb->tc_index = 0;
0095 #endif
0096 skb_reset_redirect(skb);
0097 skb->hdr_len = skb_headroom(skb);
0098 skb_reset_mac_header(skb);
0099 skb_reset_network_header(skb);
0100 skb_reset_transport_header(skb);
0101 skb_probe_transport_header(skb);
0102 skb_reset_inner_headers(skb);
0103 }
0104
0105 static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
0106 {
0107 unsigned int cpu = *stored_cpu, cpu_index, i;
0108
0109 if (unlikely(cpu == nr_cpumask_bits ||
0110 !cpumask_test_cpu(cpu, cpu_online_mask))) {
0111 cpu_index = id % cpumask_weight(cpu_online_mask);
0112 cpu = cpumask_first(cpu_online_mask);
0113 for (i = 0; i < cpu_index; ++i)
0114 cpu = cpumask_next(cpu, cpu_online_mask);
0115 *stored_cpu = cpu;
0116 }
0117 return cpu;
0118 }
0119
0120
0121
0122
0123
0124
0125
0126
0127 static inline int wg_cpumask_next_online(int *next)
0128 {
0129 int cpu = *next;
0130
0131 while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
0132 cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
0133 *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
0134 return cpu;
0135 }
0136
0137 void wg_prev_queue_init(struct prev_queue *queue);
0138
0139
0140 bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
0141
0142
0143 struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
0144
0145
0146 static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
0147 {
0148 if (queue->peeked)
0149 return queue->peeked;
0150 queue->peeked = wg_prev_queue_dequeue(queue);
0151 return queue->peeked;
0152 }
0153
0154
0155 static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
0156 {
0157 queue->peeked = NULL;
0158 }
0159
0160 static inline int wg_queue_enqueue_per_device_and_peer(
0161 struct crypt_queue *device_queue, struct prev_queue *peer_queue,
0162 struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
0163 {
0164 int cpu;
0165
0166 atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
0167
0168
0169
0170 if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
0171 return -ENOSPC;
0172
0173
0174
0175
0176 cpu = wg_cpumask_next_online(next_cpu);
0177 if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
0178 return -EPIPE;
0179 queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
0180 return 0;
0181 }
0182
0183 static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
0184 {
0185
0186
0187
0188 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
0189
0190 atomic_set_release(&PACKET_CB(skb)->state, state);
0191 queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
0192 peer->device->packet_crypt_wq, &peer->transmit_packet_work);
0193 wg_peer_put(peer);
0194 }
0195
0196 static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
0197 {
0198
0199
0200
0201 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
0202
0203 atomic_set_release(&PACKET_CB(skb)->state, state);
0204 napi_schedule(&peer->napi);
0205 wg_peer_put(peer);
0206 }
0207
0208 #ifdef DEBUG
0209 bool wg_packet_counter_selftest(void);
0210 #endif
0211
0212 #endif