Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
0004  */
0005 
0006 #include "queueing.h"
0007 #include <linux/skb_array.h>
0008 
0009 struct multicore_worker __percpu *
0010 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
0011 {
0012     int cpu;
0013     struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
0014 
0015     if (!worker)
0016         return NULL;
0017 
0018     for_each_possible_cpu(cpu) {
0019         per_cpu_ptr(worker, cpu)->ptr = ptr;
0020         INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
0021     }
0022     return worker;
0023 }
0024 
0025 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
0026              unsigned int len)
0027 {
0028     int ret;
0029 
0030     memset(queue, 0, sizeof(*queue));
0031     ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
0032     if (ret)
0033         return ret;
0034     queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
0035     if (!queue->worker) {
0036         ptr_ring_cleanup(&queue->ring, NULL);
0037         return -ENOMEM;
0038     }
0039     return 0;
0040 }
0041 
0042 void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
0043 {
0044     free_percpu(queue->worker);
0045     WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
0046     ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
0047 }
0048 
0049 #define NEXT(skb) ((skb)->prev)
0050 #define STUB(queue) ((struct sk_buff *)&queue->empty)
0051 
0052 void wg_prev_queue_init(struct prev_queue *queue)
0053 {
0054     NEXT(STUB(queue)) = NULL;
0055     queue->head = queue->tail = STUB(queue);
0056     queue->peeked = NULL;
0057     atomic_set(&queue->count, 0);
0058     BUILD_BUG_ON(
0059         offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
0060                             offsetof(struct prev_queue, empty) ||
0061         offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
0062                              offsetof(struct prev_queue, empty));
0063 }
0064 
0065 static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
0066 {
0067     WRITE_ONCE(NEXT(skb), NULL);
0068     WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
0069 }
0070 
0071 bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
0072 {
0073     if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
0074         return false;
0075     __wg_prev_queue_enqueue(queue, skb);
0076     return true;
0077 }
0078 
0079 struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
0080 {
0081     struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
0082 
0083     if (tail == STUB(queue)) {
0084         if (!next)
0085             return NULL;
0086         queue->tail = next;
0087         tail = next;
0088         next = smp_load_acquire(&NEXT(next));
0089     }
0090     if (next) {
0091         queue->tail = next;
0092         atomic_dec(&queue->count);
0093         return tail;
0094     }
0095     if (tail != READ_ONCE(queue->head))
0096         return NULL;
0097     __wg_prev_queue_enqueue(queue, STUB(queue));
0098     next = smp_load_acquire(&NEXT(tail));
0099     if (next) {
0100         queue->tail = next;
0101         atomic_dec(&queue->count);
0102         return tail;
0103     }
0104     return NULL;
0105 }
0106 
0107 #undef NEXT
0108 #undef STUB