1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include "bpf_experimental.h"
#include "bpf_qdisc_common.h"
char _license[] SEC("license") = "GPL";
struct skb_node {
struct sk_buff __kptr * skb;
struct bpf_list_node node;
};
private(A) struct bpf_spin_lock q_fifo_lock;
private(A) struct bpf_list_head q_fifo __contains(skb_node, node);
bool init_called;
SEC("struct_ops/bpf_fifo_enqueue")
int BPF_PROG(bpf_fifo_enqueue, struct sk_buff *skb, struct Qdisc *sch,
struct bpf_sk_buff_ptr *to_free)
{
struct skb_node *skbn;
u32 pkt_len;
if (sch->q.qlen == sch->limit)
goto drop;
skbn = bpf_obj_new(typeof(*skbn));
if (!skbn)
goto drop;
pkt_len = qdisc_pkt_len(skb);
sch->q.qlen++;
skb = bpf_kptr_xchg(&skbn->skb, skb);
if (skb)
bpf_qdisc_skb_drop(skb, to_free);
bpf_spin_lock(&q_fifo_lock);
bpf_list_push_back(&q_fifo, &skbn->node);
bpf_spin_unlock(&q_fifo_lock);
sch->qstats.backlog += pkt_len;
return NET_XMIT_SUCCESS;
drop:
bpf_qdisc_skb_drop(skb, to_free);
return NET_XMIT_DROP;
}
SEC("struct_ops/bpf_fifo_dequeue")
struct sk_buff *BPF_PROG(bpf_fifo_dequeue, struct Qdisc *sch)
{
struct bpf_list_node *node;
struct sk_buff *skb = NULL;
struct skb_node *skbn;
bpf_spin_lock(&q_fifo_lock);
node = bpf_list_pop_front(&q_fifo);
bpf_spin_unlock(&q_fifo_lock);
if (!node)
return NULL;
skbn = container_of(node, struct skb_node, node);
skb = bpf_kptr_xchg(&skbn->skb, skb);
bpf_obj_drop(skbn);
if (!skb)
return NULL;
sch->qstats.backlog -= qdisc_pkt_len(skb);
bpf_qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
SEC("struct_ops/bpf_fifo_init")
int BPF_PROG(bpf_fifo_init, struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
sch->limit = 1000;
init_called = true;
return 0;
}
SEC("struct_ops/bpf_fifo_reset")
void BPF_PROG(bpf_fifo_reset, struct Qdisc *sch)
{
struct bpf_list_node *node;
struct skb_node *skbn;
int i;
bpf_for(i, 0, sch->q.qlen) {
struct sk_buff *skb = NULL;
bpf_spin_lock(&q_fifo_lock);
node = bpf_list_pop_front(&q_fifo);
bpf_spin_unlock(&q_fifo_lock);
if (!node)
break;
skbn = container_of(node, struct skb_node, node);
skb = bpf_kptr_xchg(&skbn->skb, skb);
if (skb)
bpf_kfree_skb(skb);
bpf_obj_drop(skbn);
}
sch->q.qlen = 0;
}
SEC("struct_ops")
void BPF_PROG(bpf_fifo_destroy, struct Qdisc *sch)
{
}
SEC(".struct_ops")
struct Qdisc_ops fifo = {
.enqueue = (void *)bpf_fifo_enqueue,
.dequeue = (void *)bpf_fifo_dequeue,
.init = (void *)bpf_fifo_init,
.reset = (void *)bpf_fifo_reset,
.destroy = (void *)bpf_fifo_destroy,
.id = "bpf_fifo",
};
|