1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
#include "queueing.h"
#include <linux/skb_array.h>
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
{
int cpu;
struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
if (!worker)
return NULL;
for_each_possible_cpu(cpu) {
per_cpu_ptr(worker, cpu)->ptr = ptr;
INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
}
return worker;
}
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
unsigned int len)
{
int ret;
memset(queue, 0, sizeof(*queue));
ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
if (ret)
return ret;
queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
if (!queue->worker) {
ptr_ring_cleanup(&queue->ring, NULL);
return -ENOMEM;
}
return 0;
}
void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
{
free_percpu(queue->worker);
WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
}
#define NEXT(skb) ((skb)->prev)
#define STUB(queue) ((struct sk_buff *)&queue->empty)
void wg_prev_queue_init(struct prev_queue *queue)
{
NEXT(STUB(queue)) = NULL;
queue->head = queue->tail = STUB(queue);
queue->peeked = NULL;
atomic_set(&queue->count, 0);
BUILD_BUG_ON(
offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
offsetof(struct prev_queue, empty) ||
offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
offsetof(struct prev_queue, empty));
}
static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
{
WRITE_ONCE(NEXT(skb), NULL);
WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
}
bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
{
if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
return false;
__wg_prev_queue_enqueue(queue, skb);
return true;
}
struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
{
struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
if (tail == STUB(queue)) {
if (!next)
return NULL;
queue->tail = next;
tail = next;
next = smp_load_acquire(&NEXT(next));
}
if (next) {
queue->tail = next;
atomic_dec(&queue->count);
return tail;
}
if (tail != READ_ONCE(queue->head))
return NULL;
__wg_prev_queue_enqueue(queue, STUB(queue));
next = smp_load_acquire(&NEXT(tail));
if (next) {
queue->tail = next;
atomic_dec(&queue->count);
return tail;
}
return NULL;
}
#undef NEXT
#undef STUB
|