diff options
Diffstat (limited to 'include/linux/bpf.h')
| -rw-r--r-- | include/linux/bpf.h | 73 | 
1 files changed, 69 insertions, 4 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index cc700925b802..a98c83346134 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -7,6 +7,7 @@  #include <uapi/linux/bpf.h>  #include <uapi/linux/filter.h> +#include <crypto/sha2.h>  #include <linux/workqueue.h>  #include <linux/file.h>  #include <linux/percpu.h> @@ -109,6 +110,7 @@ struct bpf_map_ops {  	long (*map_pop_elem)(struct bpf_map *map, void *value);  	long (*map_peek_elem)(struct bpf_map *map, void *value);  	void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu); +	int (*map_get_hash)(struct bpf_map *map, u32 hash_buf_size, void *hash_buf);  	/* funcs called by prog_array and perf_event_array map */  	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, @@ -206,6 +208,7 @@ enum btf_field_type {  	BPF_WORKQUEUE  = (1 << 10),  	BPF_UPTR       = (1 << 11),  	BPF_RES_SPIN_LOCK = (1 << 12), +	BPF_TASK_WORK  = (1 << 13),  };  enum bpf_cgroup_storage_type { @@ -259,6 +262,7 @@ struct btf_record {  	int timer_off;  	int wq_off;  	int refcount_off; +	int task_work_off;  	struct btf_field fields[];  }; @@ -285,9 +289,11 @@ struct bpf_map_owner {  	bool xdp_has_frags;  	u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];  	const struct btf_type *attach_func_proto; +	enum bpf_attach_type expected_attach_type;  };  struct bpf_map { +	u8 sha[SHA256_DIGEST_SIZE];  	const struct bpf_map_ops *ops;  	struct bpf_map *inner_map_meta;  #ifdef CONFIG_SECURITY @@ -328,6 +334,7 @@ struct bpf_map {  	atomic64_t sleepable_refcnt;  	s64 __percpu *elem_count;  	u64 cookie; /* write-once */ +	char *excl_prog_sha;  };  static inline const char *btf_field_type_name(enum btf_field_type type) @@ -358,6 +365,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)  		return "bpf_rb_node";  	case BPF_REFCOUNT:  		return "bpf_refcount"; +	case BPF_TASK_WORK: +		return "bpf_task_work";  	default:  		WARN_ON_ONCE(1);  		return "unknown"; @@ -396,6 +405,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type)  		return sizeof(struct bpf_rb_node);  	case BPF_REFCOUNT:  		return sizeof(struct bpf_refcount); +	case BPF_TASK_WORK: +		return sizeof(struct bpf_task_work);  	default:  		WARN_ON_ONCE(1);  		return 0; @@ -428,6 +439,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type)  		return __alignof__(struct bpf_rb_node);  	case BPF_REFCOUNT:  		return __alignof__(struct bpf_refcount); +	case BPF_TASK_WORK: +		return __alignof__(struct bpf_task_work);  	default:  		WARN_ON_ONCE(1);  		return 0; @@ -459,6 +472,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)  	case BPF_KPTR_REF:  	case BPF_KPTR_PERCPU:  	case BPF_UPTR: +	case BPF_TASK_WORK:  		break;  	default:  		WARN_ON_ONCE(1); @@ -595,6 +609,7 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,  			   bool lock_src);  void bpf_timer_cancel_and_free(void *timer);  void bpf_wq_cancel_and_free(void *timer); +void bpf_task_work_cancel_and_free(void *timer);  void bpf_list_head_free(const struct btf_field *field, void *list_head,  			struct bpf_spin_lock *spin_lock);  void bpf_rb_root_free(const struct btf_field *field, void *rb_root, @@ -767,12 +782,15 @@ enum bpf_type_flag {  	 */  	MEM_WRITE		= BIT(18 + BPF_BASE_TYPE_BITS), +	/* DYNPTR points to skb_metadata_end()-skb_metadata_len() */ +	DYNPTR_TYPE_SKB_META	= BIT(19 + BPF_BASE_TYPE_BITS), +  	__BPF_TYPE_FLAG_MAX,  	__BPF_TYPE_LAST_FLAG	= __BPF_TYPE_FLAG_MAX - 1,  };  #define DYNPTR_TYPE_FLAG_MASK	(DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \ -				 | DYNPTR_TYPE_XDP) +				 | DYNPTR_TYPE_XDP | DYNPTR_TYPE_SKB_META)  /* Max number of base types. */  #define BPF_BASE_TYPE_LIMIT	(1UL << BPF_BASE_TYPE_BITS) @@ -1110,7 +1128,7 @@ struct bpf_prog_offload {   */  #define MAX_BPF_FUNC_REG_ARGS 5 -/* The argument is a structure. */ +/* The argument is a structure or a union. */  #define BTF_FMODEL_STRUCT_ARG		BIT(0)  /* The argument is signed. */ @@ -1358,6 +1376,8 @@ enum bpf_dynptr_type {  	BPF_DYNPTR_TYPE_SKB,  	/* Underlying data is a xdp_buff */  	BPF_DYNPTR_TYPE_XDP, +	/* Points to skb_metadata_end()-skb_metadata_len() */ +	BPF_DYNPTR_TYPE_SKB_META,  };  int bpf_dynptr_check_size(u32 size); @@ -1619,6 +1639,7 @@ struct bpf_prog_aux {  	bool priv_stack_requested;  	bool changes_pkt_data;  	bool might_sleep; +	bool kprobe_write_ctx;  	u64 prog_array_member_cnt; /* counts how many times as member of prog_array */  	struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */  	struct bpf_arena *arena; @@ -1628,6 +1649,7 @@ struct bpf_prog_aux {  	/* function name for valid attach_btf_id */  	const char *attach_func_name;  	struct bpf_prog **func; +	struct bpf_prog_aux *main_prog_aux;  	void *jit_data; /* JIT specific data. arch dependent */  	struct bpf_jit_poke_descriptor *poke_tab;  	struct bpf_kfunc_desc_tab *kfunc_tab; @@ -1711,7 +1733,10 @@ struct bpf_prog {  	enum bpf_attach_type	expected_attach_type; /* For some prog types */  	u32			len;		/* Number of filter blocks */  	u32			jited_len;	/* Size of jited insns in bytes */ -	u8			tag[BPF_TAG_SIZE]; +	union { +		u8 digest[SHA256_DIGEST_SIZE]; +		u8 tag[BPF_TAG_SIZE]; +	};  	struct bpf_prog_stats __percpu *stats;  	int __percpu		*active;  	unsigned int		(*bpf_func)(const void *ctx, @@ -1985,6 +2010,7 @@ static inline void bpf_module_put(const void *data, struct module *owner)  		module_put(owner);  }  int bpf_struct_ops_link_create(union bpf_attr *attr); +u32 bpf_struct_ops_id(const void *kdata);  #ifdef CONFIG_NET  /* Define it here to avoid the use of forward declaration */ @@ -2411,6 +2437,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec);  bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);  void bpf_obj_free_timer(const struct btf_record *rec, void *obj);  void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj); +void bpf_obj_free_task_work(const struct btf_record *rec, void *obj);  void bpf_obj_free_fields(const struct btf_record *rec, void *obj);  void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); @@ -2697,7 +2724,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,  int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,  			    u64 flags); -int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); +int bpf_stackmap_extract(struct bpf_map *map, void *key, void *value, bool delete);  int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,  				 void *key, void *value, u64 map_flags); @@ -2874,6 +2901,7 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,  		     enum bpf_dynptr_type type, u32 offset, u32 size);  void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);  void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); +void bpf_prog_report_arena_violation(bool write, unsigned long addr, unsigned long fault_ip);  #else /* !CONFIG_BPF_SYSCALL */  static inline struct bpf_prog *bpf_prog_get(u32 ufd) @@ -3161,6 +3189,11 @@ static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)  static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)  {  } + +static inline void bpf_prog_report_arena_violation(bool write, unsigned long addr, +						   unsigned long fault_ip) +{ +}  #endif /* CONFIG_BPF_SYSCALL */  static __always_inline int @@ -3403,6 +3436,38 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,  #endif /* CONFIG_BPF_SYSCALL */  #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ +#if defined(CONFIG_KEYS) && defined(CONFIG_BPF_SYSCALL) + +struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags); +struct bpf_key *bpf_lookup_system_key(u64 id); +void bpf_key_put(struct bpf_key *bkey); +int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p, +			       struct bpf_dynptr *sig_p, +			       struct bpf_key *trusted_keyring); + +#else +static inline struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) +{ +	return NULL; +} + +static inline struct bpf_key *bpf_lookup_system_key(u64 id) +{ +	return NULL; +} + +static inline void bpf_key_put(struct bpf_key *bkey) +{ +} + +static inline int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p, +					     struct bpf_dynptr *sig_p, +					     struct bpf_key *trusted_keyring) +{ +	return -EOPNOTSUPP; +} +#endif /* defined(CONFIG_KEYS) && defined(CONFIG_BPF_SYSCALL) */ +  /* verifier prototypes for helper functions called from eBPF programs */  extern const struct bpf_func_proto bpf_map_lookup_elem_proto;  extern const struct bpf_func_proto bpf_map_update_elem_proto;  | 
