1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include "bpf_helpers.h"
/* Permit pretty deep stack traces */
#define MAX_STACK_RAWTP 100
struct stack_trace_t {
int pid;
int kern_stack_size;
int user_stack_size;
int user_stack_buildid_size;
__u64 kern_stack[MAX_STACK_RAWTP];
__u64 user_stack[MAX_STACK_RAWTP];
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
};
struct bpf_map_def SEC("maps") perfmap = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(__u32),
.max_entries = 2,
};
struct bpf_map_def SEC("maps") stackdata_map = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct stack_trace_t),
.max_entries = 1,
};
/* Allocate per-cpu space twice the needed. For the code below
* usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
* if (usize < 0)
* return 0;
* ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
*
* If we have value_size = MAX_STACK_RAWTP * sizeof(__u64),
* verifier will complain that access "raw_data + usize"
* with size "max_len - usize" may be out of bound.
* The maximum "raw_data + usize" is "raw_data + max_len"
* and the maximum "max_len - usize" is "max_len", verifier
* concludes that the maximum buffer access range is
* "raw_data[0...max_len * 2 - 1]" and hence reject the program.
*
* Doubling the to-be-used max buffer size can fix this verifier
* issue and avoid complicated C programming massaging.
* This is an acceptable workaround since there is one entry here.
*/
struct bpf_map_def SEC("maps") rawdata_map = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = MAX_STACK_RAWTP * sizeof(__u64) * 2,
.max_entries = 1,
};
SEC("tracepoint/raw_syscalls/sys_enter")
int bpf_prog1(void *ctx)
{
int max_len, max_buildid_len, usize, ksize, total_size;
struct stack_trace_t *data;
void *raw_data;
__u32 key = 0;
data = bpf_map_lookup_elem(&stackdata_map, &key);
if (!data)
return 0;
max_len = MAX_STACK_RAWTP * sizeof(__u64);
max_buildid_len = MAX_STACK_RAWTP * sizeof(struct bpf_stack_build_id);
data->pid = bpf_get_current_pid_tgid();
data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
max_len, 0);
data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
BPF_F_USER_STACK);
data->user_stack_buildid_size = bpf_get_stack(
ctx, data->user_stack_buildid, max_buildid_len,
BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data));
/* write both kernel and user stacks to the same buffer */
raw_data = bpf_map_lookup_elem(&rawdata_map, &key);
if (!raw_data)
return 0;
usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
if (usize < 0)
return 0;
ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
if (ksize < 0)
return 0;
total_size = usize + ksize;
if (total_size > 0 && total_size <= max_len)
bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size);
return 0;
}
char _license[] SEC("license") = "GPL";
__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
|