diff options
Diffstat (limited to 'kernel/trace/bpf_trace.c')
| -rw-r--r-- | kernel/trace/bpf_trace.c | 27 | 
1 files changed, 20 insertions, 7 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 27d1f4ffa3de..40207c2a4113 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {  	.arg4_type	= ARG_CONST_SIZE,  }; -static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); +static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);  static __always_inline u64  __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, -			u64 flags, struct perf_raw_record *raw) +			u64 flags, struct perf_sample_data *sd)  {  	struct bpf_array *array = container_of(map, struct bpf_array, map); -	struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);  	unsigned int cpu = smp_processor_id();  	u64 index = flags & BPF_F_INDEX_MASK;  	struct bpf_event_entry *ee; @@ -373,8 +372,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,  	if (unlikely(event->oncpu != cpu))  		return -EOPNOTSUPP; -	perf_sample_data_init(sd, 0, 0); -	sd->raw = raw;  	perf_event_output(event, sd, regs);  	return 0;  } @@ -382,6 +379,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,  BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,  	   u64, flags, void *, data, u64, size)  { +	struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);  	struct perf_raw_record raw = {  		.frag = {  			.size = size, @@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,  	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))  		return -EINVAL; -	return __bpf_perf_event_output(regs, map, flags, &raw); +	perf_sample_data_init(sd, 0, 0); +	sd->raw = &raw; + +	return __bpf_perf_event_output(regs, map, flags, sd);  }  static const struct bpf_func_proto bpf_perf_event_output_proto = { @@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {  };  static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); +static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);  u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,  		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)  { +	struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);  	struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);  	struct perf_raw_frag frag = {  		.copy		= ctx_copy, @@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,  	};  	perf_fetch_caller_regs(regs); +	perf_sample_data_init(sd, 0, 0); +	sd->raw = &raw; -	return __bpf_perf_event_output(regs, map, flags, &raw); +	return __bpf_perf_event_output(regs, map, flags, sd);  }  BPF_CALL_0(bpf_get_current_task) @@ -759,6 +764,8 @@ const struct bpf_prog_ops perf_event_prog_ops = {  static DEFINE_MUTEX(bpf_event_mutex); +#define BPF_TRACE_MAX_PROGS 64 +  int perf_event_attach_bpf_prog(struct perf_event *event,  			       struct bpf_prog *prog)  { @@ -772,6 +779,12 @@ int perf_event_attach_bpf_prog(struct perf_event *event,  		goto unlock;  	old_array = event->tp_event->prog_array; +	if (old_array && +	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { +		ret = -E2BIG; +		goto unlock; +	} +  	ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);  	if (ret < 0)  		goto unlock;  | 
