From b5964b968ac64c2ec2debee7518499113b27c34e Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Wed, 1 Mar 2023 07:49:50 -0800 Subject: bpf: Add skb dynptrs Add skb dynptrs, which are dynptrs whose underlying pointer points to a skb. The dynptr acts on skb data. skb dynptrs have two main benefits. One is that they allow operations on sizes that are not statically known at compile-time (eg variable-sized accesses). Another is that parsing the packet data through dynptrs (instead of through direct access of skb->data and skb->data_end) can be more ergonomic and less brittle (eg does not need manual if checking for being within bounds of data_end). For bpf prog types that don't support writes on skb data, the dynptr is read-only (bpf_dynptr_write() will return an error) For reads and writes through the bpf_dynptr_read() and bpf_dynptr_write() interfaces, reading and writing from/to data in the head as well as from/to non-linear paged buffers is supported. Data slices through the bpf_dynptr_data API are not supported; instead bpf_dynptr_slice() and bpf_dynptr_slice_rdwr() (added in subsequent commit) should be used. For examples of how skb dynptrs can be used, please see the attached selftests. Signed-off-by: Joanne Koong Link: https://lore.kernel.org/r/20230301154953.641654-8-joannelkoong@gmail.com Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 62ce1f5d1b1d..d0351d30e551 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5325,11 +5325,17 @@ union bpf_attr { * Description * Write *len* bytes from *src* into *dst*, starting from *offset* * into *dst*. - * *flags* is currently unused. + * + * *flags* must be 0 except for skb-type dynptrs. + * + * For skb-type dynptrs: + * * For *flags*, please see the flags accepted by + * **bpf_skb_store_bytes**\ (). * Return * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* - * is a read-only dynptr or if *flags* is not 0. + * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs, + * other errors correspond to errors returned by **bpf_skb_store_bytes**\ (). * * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len) * Description @@ -5337,6 +5343,9 @@ union bpf_attr { * * *len* must be a statically known value. The returned data slice * is invalidated whenever the dynptr is invalidated. + * + * skb type dynptrs may not use bpf_dynptr_data. They should + * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr. * Return * Pointer to the underlying dynptr data, NULL if the dynptr is * read-only, if the dynptr is invalid, or if the offset and length -- cgit v1.2.3 From 05421aecd4ed65da0dc17b0c3c13779ef334e9e5 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Wed, 1 Mar 2023 07:49:51 -0800 Subject: bpf: Add xdp dynptrs Add xdp dynptrs, which are dynptrs whose underlying pointer points to a xdp_buff. The dynptr acts on xdp data. xdp dynptrs have two main benefits. One is that they allow operations on sizes that are not statically known at compile-time (eg variable-sized accesses). Another is that parsing the packet data through dynptrs (instead of through direct access of xdp->data and xdp->data_end) can be more ergonomic and less brittle (eg does not need manual if checking for being within bounds of data_end). For reads and writes on the dynptr, this includes reading/writing from/to and across fragments. Data slices through the bpf_dynptr_data API are not supported; instead bpf_dynptr_slice() and bpf_dynptr_slice_rdwr() should be used. For examples of how xdp dynptrs can be used, please see the attached selftests. Signed-off-by: Joanne Koong Link: https://lore.kernel.org/r/20230301154953.641654-9-joannelkoong@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 8 +++++++- include/linux/filter.h | 14 ++++++++++++++ include/uapi/linux/bpf.h | 2 +- kernel/bpf/helpers.c | 9 ++++++++- kernel/bpf/verifier.c | 10 ++++++++++ net/core/filter.c | 37 +++++++++++++++++++++++++++++++++++-- tools/include/uapi/linux/bpf.h | 2 +- 7 files changed, 76 insertions(+), 6 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e7436d7615b0..23ec684e660d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -610,11 +610,15 @@ enum bpf_type_flag { /* DYNPTR points to sk_buff */ DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS), + /* DYNPTR points to xdp_buff */ + DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; -#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB) +#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \ + | DYNPTR_TYPE_XDP) /* Max number of base types. */ #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) @@ -1151,6 +1155,8 @@ enum bpf_dynptr_type { BPF_DYNPTR_TYPE_RINGBUF, /* Underlying data is a sk_buff */ BPF_DYNPTR_TYPE_SKB, + /* Underlying data is a xdp_buff */ + BPF_DYNPTR_TYPE_XDP, }; int bpf_dynptr_check_size(u32 size); diff --git a/include/linux/filter.h b/include/linux/filter.h index de18e844d15e..3f6992261ec5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1546,6 +1546,8 @@ static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u64 index int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len); int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags); +int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len); +int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len); #else /* CONFIG_NET */ static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len) @@ -1558,6 +1560,18 @@ static inline int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, { return -EOPNOTSUPP; } + +static inline int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, + void *buf, u32 len) +{ + return -EOPNOTSUPP; +} + +static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, + void *buf, u32 len) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_NET */ #endif /* __LINUX_FILTER_H__ */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d0351d30e551..faa304c926cf 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5344,7 +5344,7 @@ union bpf_attr { * *len* must be a statically known value. The returned data slice * is invalidated whenever the dynptr is invalidated. * - * skb type dynptrs may not use bpf_dynptr_data. They should + * skb and xdp type dynptrs may not use bpf_dynptr_data. They should * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr. * Return * Pointer to the underlying dynptr data, NULL if the dynptr is diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index e8e2414d1587..114a875a05b1 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1530,6 +1530,8 @@ BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern return 0; case BPF_DYNPTR_TYPE_SKB: return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); + case BPF_DYNPTR_TYPE_XDP: + return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); default: WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type); return -EFAULT; @@ -1576,6 +1578,10 @@ BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, v case BPF_DYNPTR_TYPE_SKB: return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, flags); + case BPF_DYNPTR_TYPE_XDP: + if (flags) + return -EINVAL; + return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); default: WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type); return -EFAULT; @@ -1615,7 +1621,8 @@ BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u3 case BPF_DYNPTR_TYPE_RINGBUF: return (unsigned long)(ptr->data + ptr->offset + offset); case BPF_DYNPTR_TYPE_SKB: - /* skb dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ + case BPF_DYNPTR_TYPE_XDP: + /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ return 0; default: WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4f5fce16543b..5e42946e53ab 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -752,6 +752,8 @@ static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type) return BPF_DYNPTR_TYPE_RINGBUF; case DYNPTR_TYPE_SKB: return BPF_DYNPTR_TYPE_SKB; + case DYNPTR_TYPE_XDP: + return BPF_DYNPTR_TYPE_XDP; default: return BPF_DYNPTR_TYPE_INVALID; } @@ -6300,6 +6302,9 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn case DYNPTR_TYPE_SKB: err_extra = "skb "; break; + case DYNPTR_TYPE_XDP: + err_extra = "xdp "; + break; default: err_extra = ""; break; @@ -8943,6 +8948,7 @@ enum special_kfunc_type { KF_bpf_rbtree_add, KF_bpf_rbtree_first, KF_bpf_dynptr_from_skb, + KF_bpf_dynptr_from_xdp, }; BTF_SET_START(special_kfunc_set) @@ -8958,6 +8964,7 @@ BTF_ID(func, bpf_rbtree_remove) BTF_ID(func, bpf_rbtree_add) BTF_ID(func, bpf_rbtree_first) BTF_ID(func, bpf_dynptr_from_skb) +BTF_ID(func, bpf_dynptr_from_xdp) BTF_SET_END(special_kfunc_set) BTF_ID_LIST(special_kfunc_list) @@ -8975,6 +8982,7 @@ BTF_ID(func, bpf_rbtree_remove) BTF_ID(func, bpf_rbtree_add) BTF_ID(func, bpf_rbtree_first) BTF_ID(func, bpf_dynptr_from_skb) +BTF_ID(func, bpf_dynptr_from_xdp) static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta) { @@ -9731,6 +9739,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) dynptr_arg_type |= DYNPTR_TYPE_SKB; + else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) + dynptr_arg_type |= DYNPTR_TYPE_XDP; ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type); if (ret < 0) diff --git a/net/core/filter.c b/net/core/filter.c index f3afa31a9b10..c692046fa7f6 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3839,7 +3839,7 @@ static const struct bpf_func_proto sk_skb_change_head_proto = { .arg3_type = ARG_ANYTHING, }; -BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp) +BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp) { return xdp_get_buff_len(xdp); } @@ -3999,6 +3999,11 @@ static const struct bpf_func_proto bpf_xdp_load_bytes_proto = { .arg4_type = ARG_CONST_SIZE, }; +int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len) +{ + return ____bpf_xdp_load_bytes(xdp, offset, buf, len); +} + BPF_CALL_4(bpf_xdp_store_bytes, struct xdp_buff *, xdp, u32, offset, void *, buf, u32, len) { @@ -4026,6 +4031,11 @@ static const struct bpf_func_proto bpf_xdp_store_bytes_proto = { .arg4_type = ARG_CONST_SIZE, }; +int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len) +{ + return ____bpf_xdp_store_bytes(xdp, offset, buf, len); +} + static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); @@ -11648,6 +11658,19 @@ __bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags, return 0; } + +__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_buff *xdp, u64 flags, + struct bpf_dynptr_kern *ptr__uninit) +{ + if (flags) { + bpf_dynptr_set_null(ptr__uninit); + return -EINVAL; + } + + bpf_dynptr_init(ptr__uninit, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp)); + + return 0; +} __diag_pop(); int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, @@ -11668,11 +11691,20 @@ BTF_SET8_START(bpf_kfunc_check_set_skb) BTF_ID_FLAGS(func, bpf_dynptr_from_skb) BTF_SET8_END(bpf_kfunc_check_set_skb) +BTF_SET8_START(bpf_kfunc_check_set_xdp) +BTF_ID_FLAGS(func, bpf_dynptr_from_xdp) +BTF_SET8_END(bpf_kfunc_check_set_xdp) + static const struct btf_kfunc_id_set bpf_kfunc_set_skb = { .owner = THIS_MODULE, .set = &bpf_kfunc_check_set_skb, }; +static const struct btf_kfunc_id_set bpf_kfunc_set_xdp = { + .owner = THIS_MODULE, + .set = &bpf_kfunc_check_set_xdp, +}; + static int __init bpf_kfunc_init(void) { int ret; @@ -11685,6 +11717,7 @@ static int __init bpf_kfunc_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_OUT, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_IN, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_XMIT, &bpf_kfunc_set_skb); - return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb); + return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp); } late_initcall(bpf_kfunc_init); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d0351d30e551..faa304c926cf 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5344,7 +5344,7 @@ union bpf_attr { * *len* must be a statically known value. The returned data slice * is invalidated whenever the dynptr is invalidated. * - * skb type dynptrs may not use bpf_dynptr_data. They should + * skb and xdp type dynptrs may not use bpf_dynptr_data. They should * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr. * Return * Pointer to the underlying dynptr data, NULL if the dynptr is -- cgit v1.2.3 From 66e3a13e7c2c44d0c9dd6bb244680ca7529a8845 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Wed, 1 Mar 2023 07:49:52 -0800 Subject: bpf: Add bpf_dynptr_slice and bpf_dynptr_slice_rdwr Two new kfuncs are added, bpf_dynptr_slice and bpf_dynptr_slice_rdwr. The user must pass in a buffer to store the contents of the data slice if a direct pointer to the data cannot be obtained. For skb and xdp type dynptrs, these two APIs are the only way to obtain a data slice. However, for other types of dynptrs, there is no difference between bpf_dynptr_slice(_rdwr) and bpf_dynptr_data. For skb type dynptrs, the data is copied into the user provided buffer if any of the data is not in the linear portion of the skb. For xdp type dynptrs, the data is copied into the user provided buffer if the data is between xdp frags. If the skb is cloned and a call to bpf_dynptr_data_rdwr is made, then the skb will be uncloned (see bpf_unclone_prologue()). Please note that any bpf_dynptr_write() automatically invalidates any prior data slices of the skb dynptr. This is because the skb may be cloned or may need to pull its paged buffer into the head. As such, any bpf_dynptr_write() will automatically have its prior data slices invalidated, even if the write is to data in the skb head of an uncloned skb. Please note as well that any other helper calls that change the underlying packet buffer (eg bpf_skb_pull_data()) invalidates any data slices of the skb dynptr as well, for the same reasons. Signed-off-by: Joanne Koong Link: https://lore.kernel.org/r/20230301154953.641654-10-joannelkoong@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 14 +++++ include/uapi/linux/bpf.h | 5 ++ kernel/bpf/helpers.c | 138 +++++++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 127 +++++++++++++++++++++++++++++++++++-- net/core/filter.c | 6 +- tools/include/uapi/linux/bpf.h | 5 ++ 6 files changed, 288 insertions(+), 7 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 3f6992261ec5..efa5d4a1677e 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1548,6 +1548,9 @@ int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags); int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len); int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len); +void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len); +void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, + void *buf, unsigned long len, bool flush); #else /* CONFIG_NET */ static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len) @@ -1572,6 +1575,17 @@ static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, { return -EOPNOTSUPP; } + +static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) +{ + return NULL; +} + +static inline void *bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf, + unsigned long len, bool flush) +{ + return NULL; +} #endif /* CONFIG_NET */ #endif /* __LINUX_FILTER_H__ */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index faa304c926cf..c9699304aed2 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5329,6 +5329,11 @@ union bpf_attr { * *flags* must be 0 except for skb-type dynptrs. * * For skb-type dynptrs: + * * All data slices of the dynptr are automatically + * invalidated after **bpf_dynptr_write**\ (). This is + * because writing may pull the skb and change the + * underlying packet buffer. + * * * For *flags*, please see the flags accepted by * **bpf_skb_store_bytes**\ (). * Return diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 114a875a05b1..648b29e78b84 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2193,6 +2193,142 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) return p; } +/** + * bpf_dynptr_slice - Obtain a read-only pointer to the dynptr data. + * + * For non-skb and non-xdp type dynptrs, there is no difference between + * bpf_dynptr_slice and bpf_dynptr_data. + * + * If the intention is to write to the data slice, please use + * bpf_dynptr_slice_rdwr. + * + * The user must check that the returned pointer is not null before using it. + * + * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice + * does not change the underlying packet data pointers, so a call to + * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in + * the bpf program. + * + * @ptr: The dynptr whose data slice to retrieve + * @offset: Offset into the dynptr + * @buffer: User-provided buffer to copy contents into + * @buffer__szk: Size (in bytes) of the buffer. This is the length of the + * requested slice. This must be a constant. + * + * @returns: NULL if the call failed (eg invalid dynptr), pointer to a read-only + * data slice (can be either direct pointer to the data or a pointer to the user + * provided buffer, with its contents containing the data, if unable to obtain + * direct pointer) + */ +__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset, + void *buffer, u32 buffer__szk) +{ + enum bpf_dynptr_type type; + u32 len = buffer__szk; + int err; + + if (!ptr->data) + return 0; + + err = bpf_dynptr_check_off_len(ptr, offset, len); + if (err) + return 0; + + type = bpf_dynptr_get_type(ptr); + + switch (type) { + case BPF_DYNPTR_TYPE_LOCAL: + case BPF_DYNPTR_TYPE_RINGBUF: + return ptr->data + ptr->offset + offset; + case BPF_DYNPTR_TYPE_SKB: + return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer); + case BPF_DYNPTR_TYPE_XDP: + { + void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); + if (xdp_ptr) + return xdp_ptr; + + bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer, len, false); + return buffer; + } + default: + WARN_ONCE(true, "unknown dynptr type %d\n", type); + return 0; + } +} + +/** + * bpf_dynptr_slice_rdwr - Obtain a writable pointer to the dynptr data. + * + * For non-skb and non-xdp type dynptrs, there is no difference between + * bpf_dynptr_slice and bpf_dynptr_data. + * + * The returned pointer is writable and may point to either directly the dynptr + * data at the requested offset or to the buffer if unable to obtain a direct + * data pointer to (example: the requested slice is to the paged area of an skb + * packet). In the case where the returned pointer is to the buffer, the user + * is responsible for persisting writes through calling bpf_dynptr_write(). This + * usually looks something like this pattern: + * + * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); + * if (!eth) + * return TC_ACT_SHOT; + * + * // mutate eth header // + * + * if (eth == buffer) + * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); + * + * Please note that, as in the example above, the user must check that the + * returned pointer is not null before using it. + * + * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr + * does not change the underlying packet data pointers, so a call to + * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in + * the bpf program. + * + * @ptr: The dynptr whose data slice to retrieve + * @offset: Offset into the dynptr + * @buffer: User-provided buffer to copy contents into + * @buffer__szk: Size (in bytes) of the buffer. This is the length of the + * requested slice. This must be a constant. + * + * @returns: NULL if the call failed (eg invalid dynptr), pointer to a + * data slice (can be either direct pointer to the data or a pointer to the user + * provided buffer, with its contents containing the data, if unable to obtain + * direct pointer) + */ +__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset, + void *buffer, u32 buffer__szk) +{ + if (!ptr->data || bpf_dynptr_is_rdonly(ptr)) + return 0; + + /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. + * + * For skb-type dynptrs, it is safe to write into the returned pointer + * if the bpf program allows skb data writes. There are two possiblities + * that may occur when calling bpf_dynptr_slice_rdwr: + * + * 1) The requested slice is in the head of the skb. In this case, the + * returned pointer is directly to skb data, and if the skb is cloned, the + * verifier will have uncloned it (see bpf_unclone_prologue()) already. + * The pointer can be directly written into. + * + * 2) Some portion of the requested slice is in the paged buffer area. + * In this case, the requested data will be copied out into the buffer + * and the returned pointer will be a pointer to the buffer. The skb + * will not be pulled. To persist the write, the user will need to call + * bpf_dynptr_write(), which will pull the skb and commit the write. + * + * Similarly for xdp programs, if the requested slice is not across xdp + * fragments, then a direct pointer will be returned, otherwise the data + * will be copied out into the buffer and the user will need to call + * bpf_dynptr_write() to commit changes. + */ + return bpf_dynptr_slice(ptr, offset, buffer, buffer__szk); +} + __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) { return obj; @@ -2262,6 +2398,8 @@ BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) BTF_ID_FLAGS(func, bpf_rdonly_cast) BTF_ID_FLAGS(func, bpf_rcu_read_lock) BTF_ID_FLAGS(func, bpf_rcu_read_unlock) +BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) BTF_SET8_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5e42946e53ab..a856896e835a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -759,6 +759,22 @@ static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type) } } +static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type) +{ + switch (type) { + case BPF_DYNPTR_TYPE_LOCAL: + return DYNPTR_TYPE_LOCAL; + case BPF_DYNPTR_TYPE_RINGBUF: + return DYNPTR_TYPE_RINGBUF; + case BPF_DYNPTR_TYPE_SKB: + return DYNPTR_TYPE_SKB; + case BPF_DYNPTR_TYPE_XDP: + return DYNPTR_TYPE_XDP; + default: + return 0; + } +} + static bool dynptr_type_refcounted(enum bpf_dynptr_type type) { return type == BPF_DYNPTR_TYPE_RINGBUF; @@ -1681,6 +1697,12 @@ static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) reg->type == PTR_TO_PACKET_END; } +static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg) +{ + return base_type(reg->type) == PTR_TO_MEM && + (reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP); +} + /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) @@ -7429,6 +7451,9 @@ static int check_func_proto(const struct bpf_func_proto *fn, int func_id) /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. + * + * This also applies to dynptr slices belonging to skb and xdp dynptrs, + * since these slices point to packet data. */ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { @@ -7436,7 +7461,7 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) struct bpf_reg_state *reg; bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ - if (reg_is_pkt_pointer_any(reg)) + if (reg_is_pkt_pointer_any(reg) || reg_is_dynptr_slice_pkt(reg)) mark_reg_invalid(env, reg); })); } @@ -8688,6 +8713,11 @@ struct bpf_kfunc_call_arg_meta { struct { struct btf_field *field; } arg_rbtree_root; + struct { + enum bpf_dynptr_type type; + u32 id; + } initialized_dynptr; + u64 mem_size; }; static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta) @@ -8761,6 +8791,19 @@ static bool is_kfunc_arg_mem_size(const struct btf *btf, return __kfunc_param_match_suffix(btf, arg, "__sz"); } +static bool is_kfunc_arg_const_mem_size(const struct btf *btf, + const struct btf_param *arg, + const struct bpf_reg_state *reg) +{ + const struct btf_type *t; + + t = btf_type_skip_modifiers(btf, arg->type, NULL); + if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) + return false; + + return __kfunc_param_match_suffix(btf, arg, "__szk"); +} + static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg) { return __kfunc_param_match_suffix(btf, arg, "__k"); @@ -8949,6 +8992,8 @@ enum special_kfunc_type { KF_bpf_rbtree_first, KF_bpf_dynptr_from_skb, KF_bpf_dynptr_from_xdp, + KF_bpf_dynptr_slice, + KF_bpf_dynptr_slice_rdwr, }; BTF_SET_START(special_kfunc_set) @@ -8965,6 +9010,8 @@ BTF_ID(func, bpf_rbtree_add) BTF_ID(func, bpf_rbtree_first) BTF_ID(func, bpf_dynptr_from_skb) BTF_ID(func, bpf_dynptr_from_xdp) +BTF_ID(func, bpf_dynptr_slice) +BTF_ID(func, bpf_dynptr_slice_rdwr) BTF_SET_END(special_kfunc_set) BTF_ID_LIST(special_kfunc_list) @@ -8983,6 +9030,8 @@ BTF_ID(func, bpf_rbtree_add) BTF_ID(func, bpf_rbtree_first) BTF_ID(func, bpf_dynptr_from_skb) BTF_ID(func, bpf_dynptr_from_xdp) +BTF_ID(func, bpf_dynptr_slice) +BTF_ID(func, bpf_dynptr_slice_rdwr) static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta) { @@ -9062,7 +9111,10 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) return KF_ARG_PTR_TO_CALLBACK; - if (argno + 1 < nargs && is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1])) + + if (argno + 1 < nargs && + (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]) || + is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]))) arg_mem_size = true; /* This is the catch all argument type of register types supported by @@ -9745,6 +9797,18 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type); if (ret < 0) return ret; + + if (!(dynptr_arg_type & MEM_UNINIT)) { + int id = dynptr_id(env, reg); + + if (id < 0) { + verbose(env, "verifier internal error: failed to obtain dynptr id\n"); + return id; + } + meta->initialized_dynptr.id = id; + meta->initialized_dynptr.type = dynptr_get_type(env, reg); + } + break; } case KF_ARG_PTR_TO_LIST_HEAD: @@ -9840,14 +9904,33 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return ret; break; case KF_ARG_PTR_TO_MEM_SIZE: - ret = check_kfunc_mem_size_reg(env, ®s[regno + 1], regno + 1); + { + struct bpf_reg_state *size_reg = ®s[regno + 1]; + const struct btf_param *size_arg = &args[i + 1]; + + ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1); if (ret < 0) { verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1); return ret; } - /* Skip next '__sz' argument */ + + if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) { + if (meta->arg_constant.found) { + verbose(env, "verifier internal error: only one constant argument permitted\n"); + return -EFAULT; + } + if (!tnum_is_const(size_reg->var_off)) { + verbose(env, "R%d must be a known constant\n", regno + 1); + return -EINVAL; + } + meta->arg_constant.found = true; + meta->arg_constant.value = size_reg->var_off.value; + } + + /* Skip next '__sz' or '__szk' argument */ i++; break; + } case KF_ARG_PTR_TO_CALLBACK: meta->subprogno = reg->subprogno; break; @@ -10082,6 +10165,42 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED; regs[BPF_REG_0].btf = desc_btf; regs[BPF_REG_0].btf_id = meta.arg_constant.value; + } else if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice] || + meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) { + enum bpf_type_flag type_flag = get_dynptr_type_flag(meta.initialized_dynptr.type); + + mark_reg_known_zero(env, regs, BPF_REG_0); + + if (!meta.arg_constant.found) { + verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n"); + return -EFAULT; + } + + regs[BPF_REG_0].mem_size = meta.arg_constant.value; + + /* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */ + regs[BPF_REG_0].type = PTR_TO_MEM | type_flag; + + if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice]) { + regs[BPF_REG_0].type |= MEM_RDONLY; + } else { + /* this will set env->seen_direct_write to true */ + if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) { + verbose(env, "the prog does not allow writes to packet data\n"); + return -EINVAL; + } + } + + if (!meta.initialized_dynptr.id) { + verbose(env, "verifier internal error: no dynptr id\n"); + return -EFAULT; + } + regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id; + + /* we don't need to set BPF_REG_0's ref obj id + * because packet slices are not refcounted (see + * dynptr_type_refcounted) + */ } else { verbose(env, "kernel function %s unhandled dynamic return type\n", meta.func_name); diff --git a/net/core/filter.c b/net/core/filter.c index c692046fa7f6..8f3124e06133 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3894,8 +3894,8 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { .arg2_type = ARG_ANYTHING, }; -static void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, - void *buf, unsigned long len, bool flush) +void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, + void *buf, unsigned long len, bool flush) { unsigned long ptr_len, ptr_off = 0; skb_frag_t *next_frag, *end_frag; @@ -3941,7 +3941,7 @@ static void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, } } -static void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) +void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); u32 size = xdp->data_end - xdp->data; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index faa304c926cf..c9699304aed2 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5329,6 +5329,11 @@ union bpf_attr { * *flags* must be 0 except for skb-type dynptrs. * * For skb-type dynptrs: + * * All data slices of the dynptr are automatically + * invalidated after **bpf_dynptr_write**\ (). This is + * because writing may pull the skb and change the + * underlying packet buffer. + * * * For *flags*, please see the flags accepted by * **bpf_skb_store_bytes**\ (). * Return -- cgit v1.2.3 From f71f8530494bb5ab43d3369ef0ce8373eb1ee077 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 2 Mar 2023 13:46:13 +0200 Subject: bpf: Add support for absolute value BPF timers Add a new flag BPF_F_TIMER_ABS that can be passed to bpf_timer_start() to start an absolute value timer instead of the default relative value. This makes the timer expire at an exact point in time, instead of a time with latencies induced by both the BPF and timer subsystems. Suggested-by: Artem Bityutskiy Signed-off-by: Tero Kristo Link: https://lore.kernel.org/r/20230302114614.2985072-2-tero.kristo@linux.intel.com Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 15 +++++++++++++++ kernel/bpf/helpers.c | 11 +++++++++-- tools/include/uapi/linux/bpf.h | 15 +++++++++++++++ 3 files changed, 39 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index c9699304aed2..976b194eb775 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4969,6 +4969,12 @@ union bpf_attr { * different maps if key/value layout matches across maps. * Every bpf_timer_set_callback() can have different callback_fn. * + * *flags* can be one of: + * + * **BPF_F_TIMER_ABS** + * Start the timer in absolute expire value instead of the + * default relative one. + * * Return * 0 on success. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier @@ -7097,4 +7103,13 @@ struct bpf_core_relo { enum bpf_core_relo_kind kind; }; +/* + * Flags to control bpf_timer_start() behaviour. + * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is + * relative to current time. + */ +enum { + BPF_F_TIMER_ABS = (1ULL << 0), +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 6fc0d6c44e4c..12f12e879bcf 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1264,10 +1264,11 @@ BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, fla { struct bpf_hrtimer *t; int ret = 0; + enum hrtimer_mode mode; if (in_nmi()) return -EOPNOTSUPP; - if (flags) + if (flags > BPF_F_TIMER_ABS) return -EINVAL; __bpf_spin_lock_irqsave(&timer->lock); t = timer->timer; @@ -1275,7 +1276,13 @@ BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, fla ret = -EINVAL; goto out; } - hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT); + + if (flags & BPF_F_TIMER_ABS) + mode = HRTIMER_MODE_ABS_SOFT; + else + mode = HRTIMER_MODE_REL_SOFT; + + hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); out: __bpf_spin_unlock_irqrestore(&timer->lock); return ret; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index c9699304aed2..976b194eb775 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4969,6 +4969,12 @@ union bpf_attr { * different maps if key/value layout matches across maps. * Every bpf_timer_set_callback() can have different callback_fn. * + * *flags* can be one of: + * + * **BPF_F_TIMER_ABS** + * Start the timer in absolute expire value instead of the + * default relative one. + * * Return * 0 on success. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier @@ -7097,4 +7103,13 @@ struct bpf_core_relo { enum bpf_core_relo_kind kind; }; +/* + * Flags to control bpf_timer_start() behaviour. + * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is + * relative to current time. + */ +enum { + BPF_F_TIMER_ABS = (1ULL << 0), +}; + #endif /* _UAPI__LINUX_BPF_H__ */ -- cgit v1.2.3 From d509c55cda22096a1836e35b03f66e1ef411c0c2 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Wed, 1 Mar 2023 12:09:13 +0200 Subject: wifi: nl80211: Update the documentation of NL80211_SCAN_FLAG_COLOCATED_6GHZ Add a detailed description of NL80211_SCAN_FLAG_COLOCATED_6GHZ flag. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230301115906.487ab04feb39.I5129fd61841332474693046241586f057b134c3c@changeid Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f14621a954e1..c22eeb18b996 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -6510,8 +6510,14 @@ enum nl80211_timeout_reason { * @NL80211_SCAN_FLAG_FREQ_KHZ: report scan results with * %NL80211_ATTR_SCAN_FREQ_KHZ. This also means * %NL80211_ATTR_SCAN_FREQUENCIES will not be included. - * @NL80211_SCAN_FLAG_COLOCATED_6GHZ: scan for colocated APs reported by - * 2.4/5 GHz APs + * @NL80211_SCAN_FLAG_COLOCATED_6GHZ: scan for collocated APs reported by + * 2.4/5 GHz APs. When the flag is set, the scan logic will use the + * information from the RNR element found in beacons/probe responses + * received on the 2.4/5 GHz channels to actively scan only the 6GHz + * channels on which APs are expected to be found. Note that when not set, + * the scan logic would scan all 6GHz channels, but since transmission of + * probe requests on non PSC channels is limited, it is highly likely that + * these channels would passively be scanned. */ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, -- cgit v1.2.3 From cbbaf2bb829b6c4ef911d4a725fc9b1fadc1e43f Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Wed, 1 Mar 2023 12:09:21 +0200 Subject: wifi: nl80211: add a command to enable/disable HW timestamping Add a command to enable and disable HW timestamping of TM and FTM frames. HW timestamping can be enabled for a specific mac address or for all addresses. The low level driver will indicate how many peers HW timestamping can be enabled concurrently, and this information will be passed to userspace. Signed-off-by: Avraham Stern Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230301115906.05678d7b1c17.Iccc08869ea8156f1c71a3111a47f86dd56234bd0@changeid [switch to needing netdev UP, minor edits] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 27 +++++++++++++++++++++++++++ include/uapi/linux/nl80211.h | 22 ++++++++++++++++++++++ net/wireless/nl80211.c | 37 +++++++++++++++++++++++++++++++++++++ net/wireless/rdev-ops.h | 17 +++++++++++++++++ net/wireless/trace.h | 25 +++++++++++++++++++++++++ 5 files changed, 128 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 03b911abd772..f0da61c6ec4b 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -827,6 +827,18 @@ struct cfg80211_fils_aad { const u8 *anonce; }; +/** + * struct cfg80211_set_hw_timestamp - enable/disable HW timestamping + * @macaddr: peer MAC address. NULL to enable/disable HW timestamping for all + * addresses. + * @enable: if set, enable HW timestamping for the specified MAC address. + * Otherwise disable HW timestamping for the specified MAC address. + */ +struct cfg80211_set_hw_timestamp { + const u8 *macaddr; + bool enable; +}; + /** * cfg80211_get_chandef_type - return old channel type from chandef * @chandef: the channel definition @@ -4330,6 +4342,8 @@ struct mgmt_frame_regs { * @add_link_station: Add a link to a station. * @mod_link_station: Modify a link of a station. * @del_link_station: Remove a link of a station. + * + * @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -4683,6 +4697,8 @@ struct cfg80211_ops { struct link_station_parameters *params); int (*del_link_station)(struct wiphy *wiphy, struct net_device *dev, struct link_station_del_parameters *params); + int (*set_hw_timestamp)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_set_hw_timestamp *hwts); }; /* @@ -5139,6 +5155,8 @@ struct wiphy_iftype_akm_suites { int n_akm_suites; }; +#define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff + /** * struct wiphy - wireless hardware description * @mtx: mutex for the data (structures) of this device @@ -5348,6 +5366,13 @@ struct wiphy_iftype_akm_suites { * NL80211_MAX_NR_AKM_SUITES in order to avoid compatibility issues with * legacy userspace and maximum allowed value is * CFG80211_MAX_NUM_AKM_SUITES. + * + * @hw_timestamp_max_peers: maximum number of peers that the driver supports + * enabling HW timestamping for concurrently. Setting this field to a + * non-zero value indicates that the driver supports HW timestamping. + * A value of %CFG80211_HW_TIMESTAMP_ALL_PEERS indicates the driver + * supports enabling HW timestamping for all peers (i.e. no need to + * specify a mac address). */ struct wiphy { struct mutex mtx; @@ -5496,6 +5521,8 @@ struct wiphy { u8 ema_max_profile_periodicity; u16 max_num_akm_suites; + u16 hw_timestamp_max_peers; + char priv[] __aligned(NETDEV_ALIGN); }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index c22eeb18b996..c8520c150f9c 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1299,6 +1299,16 @@ * @NL80211_CMD_MODIFY_LINK_STA: Modify a link of an MLD station * @NL80211_CMD_REMOVE_LINK_STA: Remove a link of an MLD station * + * @NL80211_CMD_SET_HW_TIMESTAMP: Enable/disable HW timestamping of Timing + * measurement and Fine timing measurement frames. If %NL80211_ATTR_MAC + * is included, enable/disable HW timestamping only for frames to/from the + * specified MAC address. Otherwise enable/disable HW timestamping for + * all TM/FTM frames (including ones that were enabled with specific MAC + * address). If %NL80211_ATTR_HW_TIMESTAMP_ENABLED is not included, disable + * HW timestamping. + * The number of peers that HW timestamping can be enabled for concurrently + * is indicated by %NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1550,6 +1560,8 @@ enum nl80211_commands { NL80211_CMD_MODIFY_LINK_STA, NL80211_CMD_REMOVE_LINK_STA, + NL80211_CMD_SET_HW_TIMESTAMP, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -2775,6 +2787,13 @@ enum nl80211_commands { * indicates that the sub-channel is punctured. Higher 16 bits are * reserved. * + * @NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS: Maximum number of peers that HW + * timestamping can be enabled for concurrently (u16), a wiphy attribute. + * A value of 0xffff indicates setting for all peers (i.e. not specifying + * an address with %NL80211_CMD_SET_HW_TIMESTAMP) is supported. + * @NL80211_ATTR_HW_TIMESTAMP_ENABLED: Indicates whether HW timestamping should + * be enabled or not (flag attribute). + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3306,6 +3325,9 @@ enum nl80211_attrs { NL80211_ATTR_PUNCT_BITMAP, + NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS, + NL80211_ATTR_HW_TIMESTAMP_ENABLED, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 112b4bb009c8..ab0497efdd37 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -806,6 +806,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT }, [NL80211_ATTR_PUNCT_BITMAP] = NLA_POLICY_RANGE(NLA_U8, 0, 0xffff), + + [NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS] = { .type = NLA_U16 }, + [NL80211_ATTR_HW_TIMESTAMP_ENABLED] = { .type = NLA_FLAG }, }; /* policy for the key attributes */ @@ -2964,6 +2967,11 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_MLO) nla_put_flag(msg, NL80211_ATTR_MLO_SUPPORT); + if (rdev->wiphy.hw_timestamp_max_peers && + nla_put_u16(msg, NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS, + rdev->wiphy.hw_timestamp_max_peers)) + goto nla_put_failure; + /* done */ state->split_start = 0; break; @@ -16162,6 +16170,29 @@ nl80211_remove_link_station(struct sk_buff *skb, struct genl_info *info) return ret; } +static int nl80211_set_hw_timestamp(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct cfg80211_set_hw_timestamp hwts = {}; + + if (!rdev->wiphy.hw_timestamp_max_peers) + return -EOPNOTSUPP; + + if (!info->attrs[NL80211_ATTR_MAC] && + rdev->wiphy.hw_timestamp_max_peers != CFG80211_HW_TIMESTAMP_ALL_PEERS) + return -EOPNOTSUPP; + + if (info->attrs[NL80211_ATTR_MAC]) + hwts.macaddr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + hwts.enable = + nla_get_flag(info->attrs[NL80211_ATTR_HW_TIMESTAMP_ENABLED]); + + return rdev_set_hw_timestamp(rdev, dev, &hwts); +} + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@ -17336,6 +17367,12 @@ static const struct genl_small_ops nl80211_small_ops[] = { .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, + { + .cmd = NL80211_CMD_SET_HW_TIMESTAMP, + .doit = nl80211_set_hw_timestamp, + .flags = GENL_UNS_ADMIN_PERM, + .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), + }, }; static struct genl_family nl80211_fam __ro_after_init = { diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 13b209a8db28..2e497cf26ef2 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1494,4 +1494,21 @@ rdev_del_link_station(struct cfg80211_registered_device *rdev, return ret; } +static inline int +rdev_set_hw_timestamp(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_set_hw_timestamp *hwts) +{ + struct wiphy *wiphy = &rdev->wiphy; + int ret; + + if (!rdev->ops->set_hw_timestamp) + return -EOPNOTSUPP; + + trace_rdev_set_hw_timestamp(wiphy, dev, hwts); + ret = rdev->ops->set_hw_timestamp(wiphy, dev, hwts); + trace_rdev_return_int(wiphy, ret); + + return ret; +} #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/trace.h b/net/wireless/trace.h index ca7474eec723..f3fcfc4fcce5 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -3918,6 +3918,31 @@ TRACE_EVENT(rdev_del_link_station, __entry->link_id) ); +TRACE_EVENT(rdev_set_hw_timestamp, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_set_hw_timestamp *hwts), + + TP_ARGS(wiphy, netdev, hwts), + + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(macaddr) + __field(bool, enable) + ), + + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(macaddr, hwts->macaddr); + __entry->enable = hwts->enable; + ), + + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac %pM, enable: %u", + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->macaddr, + __entry->enable) +); + #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH -- cgit v1.2.3 From 6933486133ecf71bbe273d7ac72cfc4a51286af3 Mon Sep 17 00:00:00 2001 From: Veerendranath Jakkam Date: Thu, 12 Jan 2023 06:54:13 +0530 Subject: wifi: nl80211: Add support for randomizing TA of auth and deauth frames Add support to use a random local address in authentication and deauthentication frames sent to unassociated peer when the driver supports. The driver needs to configure receive behavior to accept frames with random transmit address specified in TX path authentication frames during the time of the frame exchange is pending and such frames need to be acknowledged similarly to frames sent to the local permanent address when this random address functionality is used. This capability allows use of randomized transmit address for PASN authentication frames to improve privacy of WLAN clients. Signed-off-by: Veerendranath Jakkam Link: https://lore.kernel.org/r/20230112012415.167556-2-quic_vjakkam@quicinc.com Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 5 ++++ net/wireless/mlme.c | 55 +++++++++++++++++++++++++++++--------------- 2 files changed, 41 insertions(+), 19 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index c8520c150f9c..9a0ac0363f1f 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -6348,6 +6348,10 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_SECURE_NAN: Device supports NAN Pairing which enables * authentication, data encryption and message integrity. * + * @NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA: Device supports randomized TA + * in authentication and deauthentication frames sent to unassociated peer + * using @NL80211_CMD_FRAME. + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -6418,6 +6422,7 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE, NL80211_EXT_FEATURE_PUNCT, NL80211_EXT_FEATURE_SECURE_NAN, + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 81d3f40d6235..ac059cefbeb3 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -673,6 +673,39 @@ static bool cfg80211_allowed_address(struct wireless_dev *wdev, const u8 *addr) return ether_addr_equal(addr, wdev_address(wdev)); } +static bool cfg80211_allowed_random_address(struct wireless_dev *wdev, + const struct ieee80211_mgmt *mgmt) +{ + if (ieee80211_is_auth(mgmt->frame_control) || + ieee80211_is_deauth(mgmt->frame_control)) { + /* Allow random TA to be used with authentication and + * deauthentication frames if the driver has indicated support. + */ + if (wiphy_ext_feature_isset( + wdev->wiphy, + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA)) + return true; + } else if (ieee80211_is_action(mgmt->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) { + /* Allow random TA to be used with Public Action frames if the + * driver has indicated support. + */ + if (!wdev->connected && + wiphy_ext_feature_isset( + wdev->wiphy, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA)) + return true; + + if (wdev->connected && + wiphy_ext_feature_isset( + wdev->wiphy, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED)) + return true; + } + + return false; +} + int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) @@ -774,25 +807,9 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, return err; } - if (!cfg80211_allowed_address(wdev, mgmt->sa)) { - /* Allow random TA to be used with Public Action frames if the - * driver has indicated support for this. Otherwise, only allow - * the local address to be used. - */ - if (!ieee80211_is_action(mgmt->frame_control) || - mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) - return -EINVAL; - if (!wdev->connected && - !wiphy_ext_feature_isset( - &rdev->wiphy, - NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA)) - return -EINVAL; - if (wdev->connected && - !wiphy_ext_feature_isset( - &rdev->wiphy, - NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED)) - return -EINVAL; - } + if (!cfg80211_allowed_address(wdev, mgmt->sa) && + !cfg80211_allowed_random_address(wdev, mgmt)) + return -EINVAL; /* Transmit the management frame as requested by user space */ return rdev_mgmt_tx(rdev, wdev, params, cookie); -- cgit v1.2.3 From 4386b921857793440ebd4db3d6b70639149c7074 Mon Sep 17 00:00:00 2001 From: Sriram Yagnaraman Date: Fri, 24 Feb 2023 10:52:51 +0100 Subject: netfilter: bridge: introduce broute meta statement nftables equivalent for ebtables -t broute. Implement broute meta statement to set br_netfilter_broute flag in skb to force a packet to be routed instead of being bridged. Signed-off-by: Sriram Yagnaraman Signed-off-by: Florian Westphal --- include/uapi/linux/netfilter/nf_tables.h | 2 + net/bridge/netfilter/nft_meta_bridge.c | 71 ++++++++++++++++++++++++++++++-- 2 files changed, 70 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index ff677f3a6cad..9c6f02c26054 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -931,6 +931,7 @@ enum nft_exthdr_attributes { * @NFT_META_TIME_HOUR: hour of day (in seconds) * @NFT_META_SDIF: slave device interface index * @NFT_META_SDIFNAME: slave device interface name + * @NFT_META_BRI_BROUTE: packet br_netfilter_broute bit */ enum nft_meta_keys { NFT_META_LEN, @@ -969,6 +970,7 @@ enum nft_meta_keys { NFT_META_TIME_HOUR, NFT_META_SDIF, NFT_META_SDIFNAME, + NFT_META_BRI_BROUTE, __NFT_META_IIFTYPE, }; diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c index c3ecd77e25cb..bd4d1b4d745f 100644 --- a/net/bridge/netfilter/nft_meta_bridge.c +++ b/net/bridge/netfilter/nft_meta_bridge.c @@ -8,6 +8,9 @@ #include #include #include +#include /* NF_BR_PRE_ROUTING */ + +#include "../br_private.h" static const struct net_device * nft_meta_get_bridge(const struct net_device *dev) @@ -102,6 +105,50 @@ static const struct nft_expr_ops nft_meta_bridge_get_ops = { .reduce = nft_meta_get_reduce, }; +static void nft_meta_bridge_set_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_meta *meta = nft_expr_priv(expr); + u32 *sreg = ®s->data[meta->sreg]; + struct sk_buff *skb = pkt->skb; + u8 value8; + + switch (meta->key) { + case NFT_META_BRI_BROUTE: + value8 = nft_reg_load8(sreg); + BR_INPUT_SKB_CB(skb)->br_netfilter_broute = !!value8; + break; + default: + nft_meta_set_eval(expr, regs, pkt); + } +} + +static int nft_meta_bridge_set_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_meta *priv = nft_expr_priv(expr); + unsigned int len; + int err; + + priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); + switch (priv->key) { + case NFT_META_BRI_BROUTE: + len = sizeof(u8); + break; + default: + return nft_meta_set_init(ctx, expr, tb); + } + + priv->len = len; + err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len); + if (err < 0) + return err; + + return 0; +} + static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track, const struct nft_expr *expr) { @@ -120,15 +167,33 @@ static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track, return false; } +static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + struct nft_meta *priv = nft_expr_priv(expr); + unsigned int hooks; + + switch (priv->key) { + case NFT_META_BRI_BROUTE: + hooks = 1 << NF_BR_PRE_ROUTING; + break; + default: + return nft_meta_set_validate(ctx, expr, data); + } + + return nft_chain_validate_hooks(ctx->chain, hooks); +} + static const struct nft_expr_ops nft_meta_bridge_set_ops = { .type = &nft_meta_bridge_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)), - .eval = nft_meta_set_eval, - .init = nft_meta_set_init, + .eval = nft_meta_bridge_set_eval, + .init = nft_meta_bridge_set_init, .destroy = nft_meta_set_destroy, .dump = nft_meta_set_dump, .reduce = nft_meta_bridge_set_reduce, - .validate = nft_meta_set_validate, + .validate = nft_meta_bridge_set_validate, }; static const struct nft_expr_ops * -- cgit v1.2.3 From 6018e1f407cccf39b804d1f75ad4de7be4e6cc45 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 8 Mar 2023 10:41:17 -0800 Subject: bpf: implement numbers iterator Implement the first open-coded iterator type over a range of integers. It's public API consists of: - bpf_iter_num_new() constructor, which accepts [start, end) range (that is, start is inclusive, end is exclusive). - bpf_iter_num_next() which will keep returning read-only pointer to int until the range is exhausted, at which point NULL will be returned. If bpf_iter_num_next() is kept calling after this, NULL will be persistently returned. - bpf_iter_num_destroy() destructor, which needs to be called at some point to clean up iterator state. BPF verifier enforces that iterator destructor is called at some point before BPF program exits. Note that `start = end = X` is a valid combination to setup an empty iterator. bpf_iter_num_new() will return 0 (success) for any such combination. If bpf_iter_num_new() detects invalid combination of input arguments, it returns error, resets iterator state to, effectively, empty iterator, so any subsequent call to bpf_iter_num_next() will keep returning NULL. BPF verifier has no knowledge that returned integers are in the [start, end) value range, as both `start` and `end` are not statically known and enforced: they are runtime values. While the implementation is pretty trivial, some care needs to be taken to avoid overflows and underflows. Subsequent selftests will validate correctness of [start, end) semantics, especially around extremes (INT_MIN and INT_MAX). Similarly to bpf_loop(), we enforce that no more than BPF_MAX_LOOPS can be specified. bpf_iter_num_{new,next,destroy}() is a logical evolution from bounded BPF loops and bpf_loop() helper and is the basis for implementing ergonomic BPF loops with no statically known or verified bounds. Subsequent patches implement bpf_for() macro, demonstrating how this can be wrapped into something that works and feels like a normal for() loop in C language. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230308184121.1165081-5-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 8 +++-- include/uapi/linux/bpf.h | 8 +++++ kernel/bpf/bpf_iter.c | 70 ++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/helpers.c | 3 ++ tools/include/uapi/linux/bpf.h | 8 +++++ 5 files changed, 95 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6792a7940e1e..e64ff1e89fb2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1617,8 +1617,12 @@ struct bpf_array { #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ #define MAX_TAIL_CALL_CNT 33 -/* Maximum number of loops for bpf_loop */ -#define BPF_MAX_LOOPS BIT(23) +/* Maximum number of loops for bpf_loop and bpf_iter_num. + * It's enum to expose it (and thus make it discoverable) through BTF. + */ +enum { + BPF_MAX_LOOPS = 8 * 1024 * 1024, +}; #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ BPF_F_RDONLY_PROG | \ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 976b194eb775..4abddb668a10 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -7112,4 +7112,12 @@ enum { BPF_F_TIMER_ABS = (1ULL << 0), }; +/* BPF numbers iterator state */ +struct bpf_iter_num { + /* opaque iterator state; having __u64 here allows to preserve correct + * alignment requirements in vmlinux.h, generated from BTF + */ + __u64 __opaque[1]; +} __attribute__((aligned(8))); + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 5dc307bdeaeb..96856f130cbf 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -776,3 +776,73 @@ const struct bpf_func_proto bpf_loop_proto = { .arg3_type = ARG_PTR_TO_STACK_OR_NULL, .arg4_type = ARG_ANYTHING, }; + +struct bpf_iter_num_kern { + int cur; /* current value, inclusive */ + int end; /* final value, exclusive */ +} __aligned(8); + +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in vmlinux BTF"); + +__bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) +{ + struct bpf_iter_num_kern *s = (void *)it; + + BUILD_BUG_ON(sizeof(struct bpf_iter_num_kern) != sizeof(struct bpf_iter_num)); + BUILD_BUG_ON(__alignof__(struct bpf_iter_num_kern) != __alignof__(struct bpf_iter_num)); + + BTF_TYPE_EMIT(struct btf_iter_num); + + /* start == end is legit, it's an empty range and we'll just get NULL + * on first (and any subsequent) bpf_iter_num_next() call + */ + if (start > end) { + s->cur = s->end = 0; + return -EINVAL; + } + + /* avoid overflows, e.g., if start == INT_MIN and end == INT_MAX */ + if ((s64)end - (s64)start > BPF_MAX_LOOPS) { + s->cur = s->end = 0; + return -E2BIG; + } + + /* user will call bpf_iter_num_next() first, + * which will set s->cur to exactly start value; + * underflow shouldn't matter + */ + s->cur = start - 1; + s->end = end; + + return 0; +} + +__bpf_kfunc int *bpf_iter_num_next(struct bpf_iter_num* it) +{ + struct bpf_iter_num_kern *s = (void *)it; + + /* check failed initialization or if we are done (same behavior); + * need to be careful about overflow, so convert to s64 for checks, + * e.g., if s->cur == s->end == INT_MAX, we can't just do + * s->cur + 1 >= s->end + */ + if ((s64)(s->cur + 1) >= s->end) { + s->cur = s->end = 0; + return NULL; + } + + s->cur++; + + return &s->cur; +} + +__bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it) +{ + struct bpf_iter_num_kern *s = (void *)it; + + s->cur = s->end = 0; +} + +__diag_pop(); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 637ac4e92e75..f9b7eeedce08 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2411,6 +2411,9 @@ BTF_ID_FLAGS(func, bpf_rcu_read_lock) BTF_ID_FLAGS(func, bpf_rcu_read_unlock) BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) +BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) BTF_SET8_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 976b194eb775..4abddb668a10 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -7112,4 +7112,12 @@ enum { BPF_F_TIMER_ABS = (1ULL << 0), }; +/* BPF numbers iterator state */ +struct bpf_iter_num { + /* opaque iterator state; having __u64 here allows to preserve correct + * alignment requirements in vmlinux.h, generated from BTF + */ + __u64 __opaque[1]; +} __attribute__((aligned(8))); + #endif /* _UAPI__LINUX_BPF_H__ */ -- cgit v1.2.3 From 4821a076eb602a6238528e9ebafeac853c833415 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 7 Mar 2023 16:23:26 -0500 Subject: sctp: add fair capacity stream scheduler As it says in rfc8260#section-3.5 about the fair capacity scheduler: A fair capacity distribution between the streams is used. This scheduler considers the lengths of the messages of each stream and schedules them in a specific way to maintain an equal capacity for all streams. The details are implementation dependent. interleaving user messages allows for a better realization of the fair capacity usage. This patch adds Fair Capacity Scheduler based on the foundations added by commit 5bbbbe32a431 ("sctp: introduce stream scheduler foundations"): A fc_list and a fc_length are added into struct sctp_stream_out_ext and a fc_list is added into struct sctp_stream. In .enqueue, when there are chunks enqueued into a stream, this stream will be linked into stream-> fc_list by its fc_list ordered by its fc_length. In .dequeue, it always picks up the 1st skb from stream->fc_list. In .dequeue_done, fc_length is increased by chunk's len and update its location in stream->fc_list according to the its new fc_length. Note that when the new fc_length overflows in .dequeue_done, instead of resetting all fc_lengths to 0, we only reduced them by U32_MAX / 4 to avoid a moment of imbalance in the scheduling, as Marcelo suggested. Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: Paolo Abeni --- include/net/sctp/stream_sched.h | 1 + include/net/sctp/structs.h | 7 ++ include/uapi/linux/sctp.h | 3 +- net/sctp/Makefile | 3 +- net/sctp/stream_sched.c | 1 + net/sctp/stream_sched_fc.c | 183 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 196 insertions(+), 2 deletions(-) create mode 100644 net/sctp/stream_sched_fc.c (limited to 'include/uapi/linux') diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h index fa00dc20a0d7..913170710adb 100644 --- a/include/net/sctp/stream_sched.h +++ b/include/net/sctp/stream_sched.h @@ -58,5 +58,6 @@ void sctp_sched_ops_register(enum sctp_sched_type sched, struct sctp_sched_ops *sched_ops); void sctp_sched_ops_prio_init(void); void sctp_sched_ops_rr_init(void); +void sctp_sched_ops_fc_init(void); #endif /* __sctp_stream_sched_h__ */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index e1f6e7fc2b11..2f1c9f50b352 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1429,6 +1429,10 @@ struct sctp_stream_out_ext { struct { struct list_head rr_list; }; + struct { + struct list_head fc_list; + __u32 fc_length; + }; }; }; @@ -1475,6 +1479,9 @@ struct sctp_stream { /* The next stream in line */ struct sctp_stream_out_ext *rr_next; }; + struct { + struct list_head fc_list; + }; }; struct sctp_stream_interleave *si; }; diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index ed7d4ecbf53d..6814c5a1c4bc 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -1211,7 +1211,8 @@ enum sctp_sched_type { SCTP_SS_DEFAULT = SCTP_SS_FCFS, SCTP_SS_PRIO, SCTP_SS_RR, - SCTP_SS_MAX = SCTP_SS_RR + SCTP_SS_FC, + SCTP_SS_MAX = SCTP_SS_FC }; /* Probe Interval socket option */ diff --git a/net/sctp/Makefile b/net/sctp/Makefile index e845e4588535..0448398408d8 100644 --- a/net/sctp/Makefile +++ b/net/sctp/Makefile @@ -13,7 +13,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ tsnmap.o bind_addr.o socket.o primitive.o \ output.o input.o debug.o stream.o auth.o \ offload.o stream_sched.o stream_sched_prio.o \ - stream_sched_rr.o stream_interleave.o + stream_sched_rr.o stream_sched_fc.o \ + stream_interleave.o sctp_diag-y := diag.o diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c index 330067002deb..1ebd14ef8daa 100644 --- a/net/sctp/stream_sched.c +++ b/net/sctp/stream_sched.c @@ -124,6 +124,7 @@ void sctp_sched_ops_init(void) sctp_sched_ops_fcfs_init(); sctp_sched_ops_prio_init(); sctp_sched_ops_rr_init(); + sctp_sched_ops_fc_init(); } static void sctp_sched_free_sched(struct sctp_stream *stream) diff --git a/net/sctp/stream_sched_fc.c b/net/sctp/stream_sched_fc.c new file mode 100644 index 000000000000..b336c2f5486b --- /dev/null +++ b/net/sctp/stream_sched_fc.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* SCTP kernel implementation + * (C) Copyright Red Hat Inc. 2022 + * + * This file is part of the SCTP kernel implementation + * + * These functions manipulate sctp stream queue/scheduling. + * + * Please send any bug reports or fixes you make to the + * email addresched(es): + * lksctp developers + * + * Written or modified by: + * Xin Long + */ + +#include +#include +#include +#include + +/* Fair Capacity handling + * RFC 8260 section 3.5 + */ +static void sctp_sched_fc_unsched_all(struct sctp_stream *stream); + +static int sctp_sched_fc_set(struct sctp_stream *stream, __u16 sid, + __u16 weight, gfp_t gfp) +{ + return 0; +} + +static int sctp_sched_fc_get(struct sctp_stream *stream, __u16 sid, + __u16 *value) +{ + return 0; +} + +static int sctp_sched_fc_init(struct sctp_stream *stream) +{ + INIT_LIST_HEAD(&stream->fc_list); + + return 0; +} + +static int sctp_sched_fc_init_sid(struct sctp_stream *stream, __u16 sid, + gfp_t gfp) +{ + struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext; + + INIT_LIST_HEAD(&soute->fc_list); + soute->fc_length = 0; + + return 0; +} + +static void sctp_sched_fc_free_sid(struct sctp_stream *stream, __u16 sid) +{ +} + +static void sctp_sched_fc_sched(struct sctp_stream *stream, + struct sctp_stream_out_ext *soute) +{ + struct sctp_stream_out_ext *pos; + + if (!list_empty(&soute->fc_list)) + return; + + list_for_each_entry(pos, &stream->fc_list, fc_list) + if (pos->fc_length >= soute->fc_length) + break; + list_add_tail(&soute->fc_list, &pos->fc_list); +} + +static void sctp_sched_fc_enqueue(struct sctp_outq *q, + struct sctp_datamsg *msg) +{ + struct sctp_stream *stream; + struct sctp_chunk *ch; + __u16 sid; + + ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list); + sid = sctp_chunk_stream_no(ch); + stream = &q->asoc->stream; + sctp_sched_fc_sched(stream, SCTP_SO(stream, sid)->ext); +} + +static struct sctp_chunk *sctp_sched_fc_dequeue(struct sctp_outq *q) +{ + struct sctp_stream *stream = &q->asoc->stream; + struct sctp_stream_out_ext *soute; + struct sctp_chunk *ch; + + /* Bail out quickly if queue is empty */ + if (list_empty(&q->out_chunk_list)) + return NULL; + + /* Find which chunk is next */ + if (stream->out_curr) + soute = stream->out_curr->ext; + else + soute = list_entry(stream->fc_list.next, struct sctp_stream_out_ext, fc_list); + ch = list_entry(soute->outq.next, struct sctp_chunk, stream_list); + + sctp_sched_dequeue_common(q, ch); + return ch; +} + +static void sctp_sched_fc_dequeue_done(struct sctp_outq *q, + struct sctp_chunk *ch) +{ + struct sctp_stream *stream = &q->asoc->stream; + struct sctp_stream_out_ext *soute, *pos; + __u16 sid, i; + + sid = sctp_chunk_stream_no(ch); + soute = SCTP_SO(stream, sid)->ext; + /* reduce all fc_lengths by U32_MAX / 4 if the current fc_length overflows. */ + if (soute->fc_length > U32_MAX - ch->skb->len) { + for (i = 0; i < stream->outcnt; i++) { + pos = SCTP_SO(stream, i)->ext; + if (!pos) + continue; + if (pos->fc_length <= (U32_MAX >> 2)) { + pos->fc_length = 0; + continue; + } + pos->fc_length -= (U32_MAX >> 2); + } + } + soute->fc_length += ch->skb->len; + + if (list_empty(&soute->outq)) { + list_del_init(&soute->fc_list); + return; + } + + pos = soute; + list_for_each_entry_continue(pos, &stream->fc_list, fc_list) + if (pos->fc_length >= soute->fc_length) + break; + list_move_tail(&soute->fc_list, &pos->fc_list); +} + +static void sctp_sched_fc_sched_all(struct sctp_stream *stream) +{ + struct sctp_association *asoc; + struct sctp_chunk *ch; + + asoc = container_of(stream, struct sctp_association, stream); + list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) { + __u16 sid = sctp_chunk_stream_no(ch); + + if (SCTP_SO(stream, sid)->ext) + sctp_sched_fc_sched(stream, SCTP_SO(stream, sid)->ext); + } +} + +static void sctp_sched_fc_unsched_all(struct sctp_stream *stream) +{ + struct sctp_stream_out_ext *soute, *tmp; + + list_for_each_entry_safe(soute, tmp, &stream->fc_list, fc_list) + list_del_init(&soute->fc_list); +} + +static struct sctp_sched_ops sctp_sched_fc = { + .set = sctp_sched_fc_set, + .get = sctp_sched_fc_get, + .init = sctp_sched_fc_init, + .init_sid = sctp_sched_fc_init_sid, + .free_sid = sctp_sched_fc_free_sid, + .enqueue = sctp_sched_fc_enqueue, + .dequeue = sctp_sched_fc_dequeue, + .dequeue_done = sctp_sched_fc_dequeue_done, + .sched_all = sctp_sched_fc_sched_all, + .unsched_all = sctp_sched_fc_unsched_all, +}; + +void sctp_sched_ops_fc_init(void) +{ + sctp_sched_ops_register(SCTP_SS_FC, &sctp_sched_fc); +} -- cgit v1.2.3 From 42d452e7709fdb4d42376d2a97369e22cc80a5d2 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 7 Mar 2023 16:23:27 -0500 Subject: sctp: add weighted fair queueing stream scheduler As it says in rfc8260#section-3.6 about the weighted fair queueing scheduler: A Weighted Fair Queueing scheduler between the streams is used. The weight is configurable per outgoing SCTP stream. This scheduler considers the lengths of the messages of each stream and schedules them in a specific way to use the capacity according to the given weights. If the weight of stream S1 is n times the weight of stream S2, the scheduler should assign to stream S1 n times the capacity it assigns to stream S2. The details are implementation dependent. Interleaving user messages allows for a better realization of the capacity usage according to the given weights. This patch adds Weighted Fair Queueing Scheduler actually based on the code of Fair Capacity Scheduler by adding fc_weight into struct sctp_stream_out_ext and taking it into account when sorting stream-> fc_list in sctp_sched_fc_sched() and sctp_sched_fc_dequeue_done(). Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: Paolo Abeni --- include/net/sctp/stream_sched.h | 1 + include/net/sctp/structs.h | 1 + include/uapi/linux/sctp.h | 3 ++- net/sctp/stream_sched.c | 1 + net/sctp/stream_sched_fc.c | 50 +++++++++++++++++++++++++++++++++++++---- 5 files changed, 51 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h index 913170710adb..572d73fdcd5e 100644 --- a/include/net/sctp/stream_sched.h +++ b/include/net/sctp/stream_sched.h @@ -59,5 +59,6 @@ void sctp_sched_ops_register(enum sctp_sched_type sched, void sctp_sched_ops_prio_init(void); void sctp_sched_ops_rr_init(void); void sctp_sched_ops_fc_init(void); +void sctp_sched_ops_wfq_init(void); #endif /* __sctp_stream_sched_h__ */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 2f1c9f50b352..a0933efd93c3 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1432,6 +1432,7 @@ struct sctp_stream_out_ext { struct { struct list_head fc_list; __u32 fc_length; + __u16 fc_weight; }; }; }; diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index 6814c5a1c4bc..b7d91d4cf0db 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -1212,7 +1212,8 @@ enum sctp_sched_type { SCTP_SS_PRIO, SCTP_SS_RR, SCTP_SS_FC, - SCTP_SS_MAX = SCTP_SS_FC + SCTP_SS_WFQ, + SCTP_SS_MAX = SCTP_SS_WFQ }; /* Probe Interval socket option */ diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c index 1ebd14ef8daa..e843760e9aaa 100644 --- a/net/sctp/stream_sched.c +++ b/net/sctp/stream_sched.c @@ -125,6 +125,7 @@ void sctp_sched_ops_init(void) sctp_sched_ops_prio_init(); sctp_sched_ops_rr_init(); sctp_sched_ops_fc_init(); + sctp_sched_ops_wfq_init(); } static void sctp_sched_free_sched(struct sctp_stream *stream) diff --git a/net/sctp/stream_sched_fc.c b/net/sctp/stream_sched_fc.c index b336c2f5486b..4bd18a497a6d 100644 --- a/net/sctp/stream_sched_fc.c +++ b/net/sctp/stream_sched_fc.c @@ -19,11 +19,32 @@ #include #include -/* Fair Capacity handling - * RFC 8260 section 3.5 +/* Fair Capacity and Weighted Fair Queueing handling + * RFC 8260 section 3.5 and 3.6 */ static void sctp_sched_fc_unsched_all(struct sctp_stream *stream); +static int sctp_sched_wfq_set(struct sctp_stream *stream, __u16 sid, + __u16 weight, gfp_t gfp) +{ + struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext; + + if (!weight) + return -EINVAL; + + soute->fc_weight = weight; + return 0; +} + +static int sctp_sched_wfq_get(struct sctp_stream *stream, __u16 sid, + __u16 *value) +{ + struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext; + + *value = soute->fc_weight; + return 0; +} + static int sctp_sched_fc_set(struct sctp_stream *stream, __u16 sid, __u16 weight, gfp_t gfp) { @@ -50,6 +71,7 @@ static int sctp_sched_fc_init_sid(struct sctp_stream *stream, __u16 sid, INIT_LIST_HEAD(&soute->fc_list); soute->fc_length = 0; + soute->fc_weight = 1; return 0; } @@ -67,7 +89,8 @@ static void sctp_sched_fc_sched(struct sctp_stream *stream, return; list_for_each_entry(pos, &stream->fc_list, fc_list) - if (pos->fc_length >= soute->fc_length) + if ((__u64)pos->fc_length * soute->fc_weight >= + (__u64)soute->fc_length * pos->fc_weight) break; list_add_tail(&soute->fc_list, &pos->fc_list); } @@ -137,7 +160,8 @@ static void sctp_sched_fc_dequeue_done(struct sctp_outq *q, pos = soute; list_for_each_entry_continue(pos, &stream->fc_list, fc_list) - if (pos->fc_length >= soute->fc_length) + if ((__u64)pos->fc_length * soute->fc_weight >= + (__u64)soute->fc_length * pos->fc_weight) break; list_move_tail(&soute->fc_list, &pos->fc_list); } @@ -181,3 +205,21 @@ void sctp_sched_ops_fc_init(void) { sctp_sched_ops_register(SCTP_SS_FC, &sctp_sched_fc); } + +static struct sctp_sched_ops sctp_sched_wfq = { + .set = sctp_sched_wfq_set, + .get = sctp_sched_wfq_get, + .init = sctp_sched_fc_init, + .init_sid = sctp_sched_fc_init_sid, + .free_sid = sctp_sched_fc_free_sid, + .enqueue = sctp_sched_fc_enqueue, + .dequeue = sctp_sched_fc_dequeue, + .dequeue_done = sctp_sched_fc_dequeue_done, + .sched_all = sctp_sched_fc_sched_all, + .unsched_all = sctp_sched_fc_unsched_all, +}; + +void sctp_sched_ops_wfq_init(void) +{ + sctp_sched_ops_register(SCTP_SS_WFQ, &sctp_sched_wfq); +} -- cgit v1.2.3 From 5a70f4a63000ba68004fb3c1aaf2f90303dd228f Mon Sep 17 00:00:00 2001 From: Michael Weiß Date: Thu, 9 Mar 2023 14:38:23 +0100 Subject: bpf: Fix a typo for BPF_F_ANY_ALIGNMENT in bpf.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix s/BPF_PROF_LOAD/BPF_PROG_LOAD/ typo in the documentation comment for BPF_F_ANY_ALIGNMENT in bpf.h. Signed-off-by: Michael Weiß Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230309133823.944097-1-michael.weiss@aisec.fraunhofer.de --- include/uapi/linux/bpf.h | 2 +- tools/include/uapi/linux/bpf.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4abddb668a10..d8c534e05b0a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1108,7 +1108,7 @@ enum bpf_link_type { */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) -/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the +/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will allow any alignment whatsoever. On platforms * with strict alignment requirements for loads ands stores (such * as sparc and mips) the verifier validates that all loads and diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4abddb668a10..d8c534e05b0a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1108,7 +1108,7 @@ enum bpf_link_type { */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) -/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the +/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will allow any alignment whatsoever. On platforms * with strict alignment requirements for loads ands stores (such * as sparc and mips) the verifier validates that all loads and -- cgit v1.2.3 From be50da3e9d4ad1958f7b11322d44d94d5c25a4c1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Mar 2023 10:45:59 +0100 Subject: net: virtio_net: implement exact header length guest feature Virtio spec introduced a feature VIRTIO_NET_F_GUEST_HDRLEN which when set implicates that device benefits from knowing the exact size of the header. For compatibility, to signal to the device that the header is reliable driver also needs to set this feature. Without this feature set by driver, device has to figure out the header size itself. Quoting the original virtio spec: "hdr_len is a hint to the device as to how much of the header needs to be kept to copy into each packet" "a hint" might not be clear for the reader what does it mean, if it is "maybe like that" of "exactly like that". This feature just makes it crystal clear and let the device count on the hdr_len being filled up by the exact length of header. Also note the spec already has following note about hdr_len: "Due to various bugs in implementations, this field is not useful as a guarantee of the transport header size." Without this feature the device needs to parse the header in core data path handling. Accurate information helps the device to eliminate such header parsing and directly use the hardware accelerators for GSO operation. virtio_net_hdr_from_skb() fills up hdr_len to skb_headlen(skb). The driver already complies to fill the correct value. Introduce the feature and advertise it. Note that virtio spec also includes following note for device implementation: "Caution should be taken by the implementation so as to prevent a malicious driver from attacking the device by setting an incorrect hdr_len." There is a plan to support this feature in our emulated device. A device of SolidRun offers this feature bit. They claim this feature will save the device a few cycles for every GSO packet. Link: https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-230006x3 Signed-off-by: Jiri Pirko Reviewed-by: Parav Pandit Reviewed-by: Alvaro Karsz Acked-by: Michael S. Tsirkin Acked-by: Willem de Bruijn Link: https://lore.kernel.org/r/20230309094559.917857-1-jiri@resnulli.us Signed-off-by: Jakub Kicinski --- drivers/net/virtio_net.c | 6 ++++-- include/uapi/linux/virtio_net.h | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fb5e68ed3ec2..e85b03988733 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -62,7 +62,8 @@ static const unsigned long guest_offloads[] = { VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GUEST_USO4, - VIRTIO_NET_F_GUEST_USO6 + VIRTIO_NET_F_GUEST_USO6, + VIRTIO_NET_F_GUEST_HDRLEN }; #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ @@ -4213,7 +4214,8 @@ static struct virtio_device_id id_table[] = { VIRTIO_NET_F_CTRL_MAC_ADDR, \ VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ - VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL + VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \ + VIRTIO_NET_F_GUEST_HDRLEN static unsigned int features[] = { VIRTNET_FEATURES, diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index b4062bed186a..12c1c9699935 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -61,6 +61,7 @@ #define VIRTIO_NET_F_GUEST_USO6 55 /* Guest can handle USOv6 in. */ #define VIRTIO_NET_F_HOST_USO 56 /* Host can handle USO in. */ #define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */ +#define VIRTIO_NET_F_GUEST_HDRLEN 59 /* Guest provides the exact hdr_len value. */ #define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */ #define VIRTIO_NET_F_RSC_EXT 61 /* extended coalescing info */ #define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device -- cgit v1.2.3 From 27d7fdf06fdb84455ff585b58c8034e2fab42583 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Mon, 13 Mar 2023 14:56:27 -0600 Subject: bpf: use canonical ftrace path The canonical location for the tracefs filesystem is at /sys/kernel/tracing. But, from Documentation/trace/ftrace.rst: Before 4.1, all ftrace tracing control files were within the debugfs file system, which is typically located at /sys/kernel/debug/tracing. For backward compatibility, when mounting the debugfs file system, the tracefs file system will be automatically mounted at: /sys/kernel/debug/tracing Many comments and samples in the bpf code still refer to this older debugfs path, so let's update them to avoid confusion. There are a few spots where the bpf code explicitly checks both tracefs and debugfs (tools/bpf/bpftool/tracelog.c and tools/lib/api/fs/fs.c) and I've left those alone so that the tools can continue to work with both paths. Signed-off-by: Ross Zwisler Acked-by: Michael S. Tsirkin Reviewed-by: Steven Rostedt (Google) Link: https://lore.kernel.org/r/20230313205628.1058720-2-zwisler@kernel.org Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 8 ++++---- samples/bpf/cpustat_kern.c | 4 ++-- samples/bpf/hbm.c | 4 ++-- samples/bpf/ibumad_kern.c | 4 ++-- samples/bpf/lwt_len_hist.sh | 2 +- samples/bpf/offwaketime_kern.c | 2 +- samples/bpf/task_fd_query_user.c | 4 ++-- samples/bpf/test_lwt_bpf.sh | 2 +- samples/bpf/test_overhead_tp.bpf.c | 4 ++-- tools/include/uapi/linux/bpf.h | 8 ++++---- 10 files changed, 21 insertions(+), 21 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d8c534e05b0a..13129df937cd 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1647,17 +1647,17 @@ union bpf_attr { * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) - * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if + * to file *\/sys/kernel/tracing/trace* from TraceFS, if * available. It can take up to three additional **u64** * arguments (as an eBPF helpers, the total number of arguments is * limited to five). * * Each time the helper is called, it appends a line to the trace. - * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is - * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. + * Lines are discarded while *\/sys/kernel/tracing/trace* is + * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this. * The format of the trace is customizable, and the exact output * one will get depends on the options set in - * *\/sys/kernel/debug/tracing/trace_options* (see also the + * *\/sys/kernel/tracing/trace_options* (see also the * *README* file under the same directory). However, it usually * defaults to something like: * diff --git a/samples/bpf/cpustat_kern.c b/samples/bpf/cpustat_kern.c index 5aefd19cdfa1..944f13fe164a 100644 --- a/samples/bpf/cpustat_kern.c +++ b/samples/bpf/cpustat_kern.c @@ -76,8 +76,8 @@ struct { /* * The trace events for cpu_idle and cpu_frequency are taken from: - * /sys/kernel/debug/tracing/events/power/cpu_idle/format - * /sys/kernel/debug/tracing/events/power/cpu_frequency/format + * /sys/kernel/tracing/events/power/cpu_idle/format + * /sys/kernel/tracing/events/power/cpu_frequency/format * * These two events have same format, so define one common structure. */ diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c index 516fbac28b71..ff58ec43f56a 100644 --- a/samples/bpf/hbm.c +++ b/samples/bpf/hbm.c @@ -65,7 +65,7 @@ static void Usage(void); static void read_trace_pipe2(void); static void do_error(char *msg, bool errno_flag); -#define DEBUGFS "/sys/kernel/debug/tracing/" +#define TRACEFS "/sys/kernel/tracing/" static struct bpf_program *bpf_prog; static struct bpf_object *obj; @@ -77,7 +77,7 @@ static void read_trace_pipe2(void) FILE *outf; char *outFname = "hbm_out.log"; - trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0); + trace_fd = open(TRACEFS "trace_pipe", O_RDONLY, 0); if (trace_fd < 0) { printf("Error opening trace_pipe\n"); return; diff --git a/samples/bpf/ibumad_kern.c b/samples/bpf/ibumad_kern.c index 9b193231024a..f07474c72525 100644 --- a/samples/bpf/ibumad_kern.c +++ b/samples/bpf/ibumad_kern.c @@ -39,8 +39,8 @@ struct { /* Taken from the current format defined in * include/trace/events/ib_umad.h * and - * /sys/kernel/debug/tracing/events/ib_umad/ib_umad_read/format - * /sys/kernel/debug/tracing/events/ib_umad/ib_umad_write/format + * /sys/kernel/tracing/events/ib_umad/ib_umad_read/format + * /sys/kernel/tracing/events/ib_umad/ib_umad_write/format */ struct ib_umad_rw_args { u64 pad; diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh index 7078bfcc4f4d..381b2c634784 100755 --- a/samples/bpf/lwt_len_hist.sh +++ b/samples/bpf/lwt_len_hist.sh @@ -5,7 +5,7 @@ NS1=lwt_ns1 VETH0=tst_lwt1a VETH1=tst_lwt1b BPF_PROG=lwt_len_hist.bpf.o -TRACE_ROOT=/sys/kernel/debug/tracing +TRACE_ROOT=/sys/kernel/tracing function cleanup { # To reset saved histogram, remove pinned map diff --git a/samples/bpf/offwaketime_kern.c b/samples/bpf/offwaketime_kern.c index eb4d94742e6b..23f12b47e9e5 100644 --- a/samples/bpf/offwaketime_kern.c +++ b/samples/bpf/offwaketime_kern.c @@ -110,7 +110,7 @@ static inline int update_counts(void *ctx, u32 pid, u64 delta) } #if 1 -/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */ +/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */ struct sched_switch_args { unsigned long long pad; char prev_comm[TASK_COMM_LEN]; diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c index a33d74bd3a4b..1e61f2180470 100644 --- a/samples/bpf/task_fd_query_user.c +++ b/samples/bpf/task_fd_query_user.c @@ -235,7 +235,7 @@ static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return) struct bpf_link *link; ssize_t bytes; - snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/%s_events", + snprintf(buf, sizeof(buf), "/sys/kernel/tracing/%s_events", event_type); kfd = open(buf, O_WRONLY | O_TRUNC, 0); CHECK_PERROR_RET(kfd < 0); @@ -252,7 +252,7 @@ static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return) close(kfd); kfd = -1; - snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/%ss/%s/id", + snprintf(buf, sizeof(buf), "/sys/kernel/tracing/events/%ss/%s/id", event_type, event_alias); efd = open(buf, O_RDONLY, 0); CHECK_PERROR_RET(efd < 0); diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh index 2e9f5126963b..0bf2d0f6bf4b 100755 --- a/samples/bpf/test_lwt_bpf.sh +++ b/samples/bpf/test_lwt_bpf.sh @@ -21,7 +21,7 @@ IP_LOCAL="192.168.99.1" PROG_SRC="test_lwt_bpf.c" BPF_PROG="test_lwt_bpf.o" -TRACE_ROOT=/sys/kernel/debug/tracing +TRACE_ROOT=/sys/kernel/tracing CONTEXT_INFO=$(cat ${TRACE_ROOT}/trace_options | grep context) function lookup_mac() diff --git a/samples/bpf/test_overhead_tp.bpf.c b/samples/bpf/test_overhead_tp.bpf.c index 67cab3881969..8b498328e961 100644 --- a/samples/bpf/test_overhead_tp.bpf.c +++ b/samples/bpf/test_overhead_tp.bpf.c @@ -7,7 +7,7 @@ #include "vmlinux.h" #include -/* from /sys/kernel/debug/tracing/events/task/task_rename/format */ +/* from /sys/kernel/tracing/events/task/task_rename/format */ struct task_rename { __u64 pad; __u32 pid; @@ -21,7 +21,7 @@ int prog(struct task_rename *ctx) return 0; } -/* from /sys/kernel/debug/tracing/events/fib/fib_table_lookup/format */ +/* from /sys/kernel/tracing/events/fib/fib_table_lookup/format */ struct fib_table_lookup { __u64 pad; __u32 tb_id; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d8c534e05b0a..13129df937cd 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1647,17 +1647,17 @@ union bpf_attr { * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) - * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if + * to file *\/sys/kernel/tracing/trace* from TraceFS, if * available. It can take up to three additional **u64** * arguments (as an eBPF helpers, the total number of arguments is * limited to five). * * Each time the helper is called, it appends a line to the trace. - * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is - * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. + * Lines are discarded while *\/sys/kernel/tracing/trace* is + * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this. * The format of the trace is customizable, and the exact output * one will get depends on the options set in - * *\/sys/kernel/debug/tracing/trace_options* (see also the + * *\/sys/kernel/tracing/trace_options* (see also the * *README* file under the same directory). However, it usually * defaults to something like: * -- cgit v1.2.3 From a3a48de5eade770e911d35291217bdd69ce04ef1 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Mar 2023 15:11:51 +0200 Subject: vxlan: mdb: Add MDB control path support Implement MDB control path support, enabling the creation, deletion, replacement and dumping of MDB entries in a similar fashion to the bridge driver. Unlike the bridge driver, each entry stores a list of remote VTEPs to which matched packets need to be replicated to and not a list of bridge ports. The motivating use case is the installation of MDB entries by a user space control plane in response to received EVPN routes. As such, only allow permanent MDB entries to be installed and do not implement snooping functionality, avoiding a lot of unnecessary complexity. Since entries can only be modified by user space under RTNL, use RTNL as the write lock. Use RCU to ensure that MDB entries and remotes are not freed while being accessed from the data path during transmission. In terms of uAPI, reuse the existing MDB netlink interface, but add a few new attributes to request and response messages: * IP address of the destination VXLAN tunnel endpoint where the multicast receivers reside. * UDP destination port number to use to connect to the remote VXLAN tunnel endpoint. * VXLAN VNI Network Identifier to use to connect to the remote VXLAN tunnel endpoint. Required when Ingress Replication (IR) is used and the remote VTEP is not a member of originating broadcast domain (VLAN/VNI) [1]. * Source VNI Network Identifier the MDB entry belongs to. Used only when the VXLAN device is in external mode. * Interface index of the outgoing interface to reach the remote VXLAN tunnel endpoint. This is required when the underlay destination IP is multicast (P2MP), as the multicast routing tables are not consulted. All the new attributes are added under the 'MDBA_SET_ENTRY_ATTRS' nest which is strictly validated by the bridge driver, thereby automatically rejecting the new attributes. [1] https://datatracker.ietf.org/doc/html/draft-ietf-bess-evpn-irb-mcast#section-3.2.2 Signed-off-by: Ido Schimmel Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- drivers/net/vxlan/Makefile | 2 +- drivers/net/vxlan/vxlan_core.c | 8 + drivers/net/vxlan/vxlan_mdb.c | 1341 +++++++++++++++++++++++++++++++++++++ drivers/net/vxlan/vxlan_private.h | 31 + include/net/vxlan.h | 5 + include/uapi/linux/if_bridge.h | 10 + 6 files changed, 1396 insertions(+), 1 deletion(-) create mode 100644 drivers/net/vxlan/vxlan_mdb.c (limited to 'include/uapi/linux') diff --git a/drivers/net/vxlan/Makefile b/drivers/net/vxlan/Makefile index d4c255499b72..91b8fec8b6cf 100644 --- a/drivers/net/vxlan/Makefile +++ b/drivers/net/vxlan/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_VXLAN) += vxlan.o -vxlan-objs := vxlan_core.o vxlan_multicast.o vxlan_vnifilter.o +vxlan-objs := vxlan_core.o vxlan_multicast.o vxlan_vnifilter.o vxlan_mdb.o diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 5de1a20497a6..a8b26d4f76de 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -2878,8 +2878,14 @@ static int vxlan_init(struct net_device *dev) if (err) goto err_free_percpu; + err = vxlan_mdb_init(vxlan); + if (err) + goto err_gro_cells_destroy; + return 0; +err_gro_cells_destroy: + gro_cells_destroy(&vxlan->gro_cells); err_free_percpu: free_percpu(dev->tstats); err_vnigroup_uninit: @@ -2904,6 +2910,8 @@ static void vxlan_uninit(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + vxlan_mdb_fini(vxlan); + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) vxlan_vnigroup_uninit(vxlan); diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c new file mode 100644 index 000000000000..129692b3663f --- /dev/null +++ b/drivers/net/vxlan/vxlan_mdb.c @@ -0,0 +1,1341 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vxlan_private.h" + +struct vxlan_mdb_entry_key { + union vxlan_addr src; + union vxlan_addr dst; + __be32 vni; +}; + +struct vxlan_mdb_entry { + struct rhash_head rhnode; + struct list_head remotes; + struct vxlan_mdb_entry_key key; + struct hlist_node mdb_node; + struct rcu_head rcu; +}; + +#define VXLAN_MDB_REMOTE_F_BLOCKED BIT(0) + +struct vxlan_mdb_remote { + struct list_head list; + struct vxlan_rdst __rcu *rd; + u8 flags; + u8 filter_mode; + u8 rt_protocol; + struct hlist_head src_list; + struct rcu_head rcu; +}; + +#define VXLAN_SGRP_F_DELETE BIT(0) + +struct vxlan_mdb_src_entry { + struct hlist_node node; + union vxlan_addr addr; + u8 flags; +}; + +struct vxlan_mdb_dump_ctx { + long reserved; + long entry_idx; + long remote_idx; +}; + +struct vxlan_mdb_config_src_entry { + union vxlan_addr addr; + struct list_head node; +}; + +struct vxlan_mdb_config { + struct vxlan_dev *vxlan; + struct vxlan_mdb_entry_key group; + struct list_head src_list; + union vxlan_addr remote_ip; + u32 remote_ifindex; + __be32 remote_vni; + __be16 remote_port; + u16 nlflags; + u8 flags; + u8 filter_mode; + u8 rt_protocol; +}; + +static const struct rhashtable_params vxlan_mdb_rht_params = { + .head_offset = offsetof(struct vxlan_mdb_entry, rhnode), + .key_offset = offsetof(struct vxlan_mdb_entry, key), + .key_len = sizeof(struct vxlan_mdb_entry_key), + .automatic_shrinking = true, +}; + +static int __vxlan_mdb_add(const struct vxlan_mdb_config *cfg, + struct netlink_ext_ack *extack); +static int __vxlan_mdb_del(const struct vxlan_mdb_config *cfg, + struct netlink_ext_ack *extack); + +static void vxlan_br_mdb_entry_fill(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote, + struct br_mdb_entry *e) +{ + const union vxlan_addr *dst = &mdb_entry->key.dst; + + memset(e, 0, sizeof(*e)); + e->ifindex = vxlan->dev->ifindex; + e->state = MDB_PERMANENT; + + if (remote->flags & VXLAN_MDB_REMOTE_F_BLOCKED) + e->flags |= MDB_FLAGS_BLOCKED; + + switch (dst->sa.sa_family) { + case AF_INET: + e->addr.u.ip4 = dst->sin.sin_addr.s_addr; + e->addr.proto = htons(ETH_P_IP); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + e->addr.u.ip6 = dst->sin6.sin6_addr; + e->addr.proto = htons(ETH_P_IPV6); + break; +#endif + } +} + +static int vxlan_mdb_entry_info_fill_srcs(struct sk_buff *skb, + const struct vxlan_mdb_remote *remote) +{ + struct vxlan_mdb_src_entry *ent; + struct nlattr *nest; + + if (hlist_empty(&remote->src_list)) + return 0; + + nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST); + if (!nest) + return -EMSGSIZE; + + hlist_for_each_entry(ent, &remote->src_list, node) { + struct nlattr *nest_ent; + + nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY); + if (!nest_ent) + goto out_cancel_err; + + if (vxlan_nla_put_addr(skb, MDBA_MDB_SRCATTR_ADDRESS, + &ent->addr) || + nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER, 0)) + goto out_cancel_err; + + nla_nest_end(skb, nest_ent); + } + + nla_nest_end(skb, nest); + + return 0; + +out_cancel_err: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int vxlan_mdb_entry_info_fill(const struct vxlan_dev *vxlan, + struct sk_buff *skb, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote) +{ + struct vxlan_rdst *rd = rtnl_dereference(remote->rd); + struct br_mdb_entry e; + struct nlattr *nest; + + nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY_INFO); + if (!nest) + return -EMSGSIZE; + + vxlan_br_mdb_entry_fill(vxlan, mdb_entry, remote, &e); + + if (nla_put_nohdr(skb, sizeof(e), &e) || + nla_put_u32(skb, MDBA_MDB_EATTR_TIMER, 0)) + goto nest_err; + + if (!vxlan_addr_any(&mdb_entry->key.src) && + vxlan_nla_put_addr(skb, MDBA_MDB_EATTR_SOURCE, &mdb_entry->key.src)) + goto nest_err; + + if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, remote->rt_protocol) || + nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE, remote->filter_mode) || + vxlan_mdb_entry_info_fill_srcs(skb, remote) || + vxlan_nla_put_addr(skb, MDBA_MDB_EATTR_DST, &rd->remote_ip)) + goto nest_err; + + if (rd->remote_port && rd->remote_port != vxlan->cfg.dst_port && + nla_put_u16(skb, MDBA_MDB_EATTR_DST_PORT, + be16_to_cpu(rd->remote_port))) + goto nest_err; + + if (rd->remote_vni != vxlan->default_dst.remote_vni && + nla_put_u32(skb, MDBA_MDB_EATTR_VNI, be32_to_cpu(rd->remote_vni))) + goto nest_err; + + if (rd->remote_ifindex && + nla_put_u32(skb, MDBA_MDB_EATTR_IFINDEX, rd->remote_ifindex)) + goto nest_err; + + if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && + mdb_entry->key.vni && nla_put_u32(skb, MDBA_MDB_EATTR_SRC_VNI, + be32_to_cpu(mdb_entry->key.vni))) + goto nest_err; + + nla_nest_end(skb, nest); + + return 0; + +nest_err: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int vxlan_mdb_entry_fill(const struct vxlan_dev *vxlan, + struct sk_buff *skb, + struct vxlan_mdb_dump_ctx *ctx, + const struct vxlan_mdb_entry *mdb_entry) +{ + int remote_idx = 0, s_remote_idx = ctx->remote_idx; + struct vxlan_mdb_remote *remote; + struct nlattr *nest; + int err = 0; + + nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY); + if (!nest) + return -EMSGSIZE; + + list_for_each_entry(remote, &mdb_entry->remotes, list) { + if (remote_idx < s_remote_idx) + goto skip; + + err = vxlan_mdb_entry_info_fill(vxlan, skb, mdb_entry, remote); + if (err) + break; +skip: + remote_idx++; + } + + ctx->remote_idx = err ? remote_idx : 0; + nla_nest_end(skb, nest); + return err; +} + +static int vxlan_mdb_fill(const struct vxlan_dev *vxlan, struct sk_buff *skb, + struct vxlan_mdb_dump_ctx *ctx) +{ + int entry_idx = 0, s_entry_idx = ctx->entry_idx; + struct vxlan_mdb_entry *mdb_entry; + struct nlattr *nest; + int err = 0; + + nest = nla_nest_start_noflag(skb, MDBA_MDB); + if (!nest) + return -EMSGSIZE; + + hlist_for_each_entry(mdb_entry, &vxlan->mdb_list, mdb_node) { + if (entry_idx < s_entry_idx) + goto skip; + + err = vxlan_mdb_entry_fill(vxlan, skb, ctx, mdb_entry); + if (err) + break; +skip: + entry_idx++; + } + + ctx->entry_idx = err ? entry_idx : 0; + nla_nest_end(skb, nest); + return err; +} + +int vxlan_mdb_dump(struct net_device *dev, struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct vxlan_mdb_dump_ctx *ctx = (void *)cb->ctx; + struct vxlan_dev *vxlan = netdev_priv(dev); + struct br_port_msg *bpm; + struct nlmsghdr *nlh; + int err; + + ASSERT_RTNL(); + + NL_ASSERT_DUMP_CTX_FITS(struct vxlan_mdb_dump_ctx); + + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, RTM_NEWMDB, sizeof(*bpm), + NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); + bpm->family = AF_BRIDGE; + bpm->ifindex = dev->ifindex; + + err = vxlan_mdb_fill(vxlan, skb, ctx); + + nlmsg_end(skb, nlh); + + cb->seq = vxlan->mdb_seq; + nl_dump_check_consistent(cb, nlh); + + return err; +} + +static const struct nla_policy +vxlan_mdbe_src_list_entry_pol[MDBE_SRCATTR_MAX + 1] = { + [MDBE_SRCATTR_ADDRESS] = NLA_POLICY_RANGE(NLA_BINARY, + sizeof(struct in_addr), + sizeof(struct in6_addr)), +}; + +static const struct nla_policy +vxlan_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = { + [MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(vxlan_mdbe_src_list_entry_pol), +}; + +static struct netlink_range_validation vni_range = { + .max = VXLAN_N_VID - 1, +}; + +static const struct nla_policy vxlan_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = { + [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY, + sizeof(struct in_addr), + sizeof(struct in6_addr)), + [MDBE_ATTR_GROUP_MODE] = NLA_POLICY_RANGE(NLA_U8, MCAST_EXCLUDE, + MCAST_INCLUDE), + [MDBE_ATTR_SRC_LIST] = NLA_POLICY_NESTED(vxlan_mdbe_src_list_pol), + [MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC), + [MDBE_ATTR_DST] = NLA_POLICY_RANGE(NLA_BINARY, + sizeof(struct in_addr), + sizeof(struct in6_addr)), + [MDBE_ATTR_DST_PORT] = { .type = NLA_U16 }, + [MDBE_ATTR_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range), + [MDBE_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), + [MDBE_ATTR_SRC_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range), +}; + +static bool vxlan_mdb_is_valid_source(const struct nlattr *attr, __be16 proto, + struct netlink_ext_ack *extack) +{ + switch (proto) { + case htons(ETH_P_IP): + if (nla_len(attr) != sizeof(struct in_addr)) { + NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length"); + return false; + } + if (ipv4_is_multicast(nla_get_in_addr(attr))) { + NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed"); + return false; + } + break; +#if IS_ENABLED(CONFIG_IPV6) + case htons(ETH_P_IPV6): { + struct in6_addr src; + + if (nla_len(attr) != sizeof(struct in6_addr)) { + NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length"); + return false; + } + src = nla_get_in6_addr(attr); + if (ipv6_addr_is_multicast(&src)) { + NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed"); + return false; + } + break; + } +#endif + default: + NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address"); + return false; + } + + return true; +} + +static void vxlan_mdb_config_group_set(struct vxlan_mdb_config *cfg, + const struct br_mdb_entry *entry, + const struct nlattr *source_attr) +{ + struct vxlan_mdb_entry_key *group = &cfg->group; + + switch (entry->addr.proto) { + case htons(ETH_P_IP): + group->dst.sa.sa_family = AF_INET; + group->dst.sin.sin_addr.s_addr = entry->addr.u.ip4; + break; +#if IS_ENABLED(CONFIG_IPV6) + case htons(ETH_P_IPV6): + group->dst.sa.sa_family = AF_INET6; + group->dst.sin6.sin6_addr = entry->addr.u.ip6; + break; +#endif + } + + if (source_attr) + vxlan_nla_get_addr(&group->src, source_attr); +} + +static bool vxlan_mdb_is_star_g(const struct vxlan_mdb_entry_key *group) +{ + return !vxlan_addr_any(&group->dst) && vxlan_addr_any(&group->src); +} + +static bool vxlan_mdb_is_sg(const struct vxlan_mdb_entry_key *group) +{ + return !vxlan_addr_any(&group->dst) && !vxlan_addr_any(&group->src); +} + +static int vxlan_mdb_config_src_entry_init(struct vxlan_mdb_config *cfg, + __be16 proto, + const struct nlattr *src_entry, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[MDBE_SRCATTR_MAX + 1]; + struct vxlan_mdb_config_src_entry *src; + int err; + + err = nla_parse_nested(tb, MDBE_SRCATTR_MAX, src_entry, + vxlan_mdbe_src_list_entry_pol, extack); + if (err) + return err; + + if (NL_REQ_ATTR_CHECK(extack, src_entry, tb, MDBE_SRCATTR_ADDRESS)) + return -EINVAL; + + if (!vxlan_mdb_is_valid_source(tb[MDBE_SRCATTR_ADDRESS], proto, + extack)) + return -EINVAL; + + src = kzalloc(sizeof(*src), GFP_KERNEL); + if (!src) + return -ENOMEM; + + err = vxlan_nla_get_addr(&src->addr, tb[MDBE_SRCATTR_ADDRESS]); + if (err) + goto err_free_src; + + list_add_tail(&src->node, &cfg->src_list); + + return 0; + +err_free_src: + kfree(src); + return err; +} + +static void +vxlan_mdb_config_src_entry_fini(struct vxlan_mdb_config_src_entry *src) +{ + list_del(&src->node); + kfree(src); +} + +static int vxlan_mdb_config_src_list_init(struct vxlan_mdb_config *cfg, + __be16 proto, + const struct nlattr *src_list, + struct netlink_ext_ack *extack) +{ + struct vxlan_mdb_config_src_entry *src, *tmp; + struct nlattr *src_entry; + int rem, err; + + nla_for_each_nested(src_entry, src_list, rem) { + err = vxlan_mdb_config_src_entry_init(cfg, proto, src_entry, + extack); + if (err) + goto err_src_entry_init; + } + + return 0; + +err_src_entry_init: + list_for_each_entry_safe_reverse(src, tmp, &cfg->src_list, node) + vxlan_mdb_config_src_entry_fini(src); + return err; +} + +static void vxlan_mdb_config_src_list_fini(struct vxlan_mdb_config *cfg) +{ + struct vxlan_mdb_config_src_entry *src, *tmp; + + list_for_each_entry_safe_reverse(src, tmp, &cfg->src_list, node) + vxlan_mdb_config_src_entry_fini(src); +} + +static int vxlan_mdb_config_attrs_init(struct vxlan_mdb_config *cfg, + const struct br_mdb_entry *entry, + const struct nlattr *set_attrs, + struct netlink_ext_ack *extack) +{ + struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1]; + int err; + + err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX, set_attrs, + vxlan_mdbe_attrs_pol, extack); + if (err) + return err; + + if (NL_REQ_ATTR_CHECK(extack, set_attrs, mdbe_attrs, MDBE_ATTR_DST)) { + NL_SET_ERR_MSG_MOD(extack, "Missing remote destination IP address"); + return -EINVAL; + } + + if (mdbe_attrs[MDBE_ATTR_SOURCE] && + !vxlan_mdb_is_valid_source(mdbe_attrs[MDBE_ATTR_SOURCE], + entry->addr.proto, extack)) + return -EINVAL; + + vxlan_mdb_config_group_set(cfg, entry, mdbe_attrs[MDBE_ATTR_SOURCE]); + + /* rtnetlink code only validates that IPv4 group address is + * multicast. + */ + if (!vxlan_addr_is_multicast(&cfg->group.dst) && + !vxlan_addr_any(&cfg->group.dst)) { + NL_SET_ERR_MSG_MOD(extack, "Group address is not multicast"); + return -EINVAL; + } + + if (vxlan_addr_any(&cfg->group.dst) && + mdbe_attrs[MDBE_ATTR_SOURCE]) { + NL_SET_ERR_MSG_MOD(extack, "Source cannot be specified for the all-zeros entry"); + return -EINVAL; + } + + if (vxlan_mdb_is_sg(&cfg->group)) + cfg->filter_mode = MCAST_INCLUDE; + + if (mdbe_attrs[MDBE_ATTR_GROUP_MODE]) { + if (!vxlan_mdb_is_star_g(&cfg->group)) { + NL_SET_ERR_MSG_MOD(extack, "Filter mode can only be set for (*, G) entries"); + return -EINVAL; + } + cfg->filter_mode = nla_get_u8(mdbe_attrs[MDBE_ATTR_GROUP_MODE]); + } + + if (mdbe_attrs[MDBE_ATTR_SRC_LIST]) { + if (!vxlan_mdb_is_star_g(&cfg->group)) { + NL_SET_ERR_MSG_MOD(extack, "Source list can only be set for (*, G) entries"); + return -EINVAL; + } + if (!mdbe_attrs[MDBE_ATTR_GROUP_MODE]) { + NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set without filter mode"); + return -EINVAL; + } + err = vxlan_mdb_config_src_list_init(cfg, entry->addr.proto, + mdbe_attrs[MDBE_ATTR_SRC_LIST], + extack); + if (err) + return err; + } + + if (vxlan_mdb_is_star_g(&cfg->group) && list_empty(&cfg->src_list) && + cfg->filter_mode == MCAST_INCLUDE) { + NL_SET_ERR_MSG_MOD(extack, "Cannot add (*, G) INCLUDE with an empty source list"); + return -EINVAL; + } + + if (mdbe_attrs[MDBE_ATTR_RTPROT]) + cfg->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]); + + err = vxlan_nla_get_addr(&cfg->remote_ip, mdbe_attrs[MDBE_ATTR_DST]); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Invalid remote destination address"); + goto err_src_list_fini; + } + + if (mdbe_attrs[MDBE_ATTR_DST_PORT]) + cfg->remote_port = + cpu_to_be16(nla_get_u16(mdbe_attrs[MDBE_ATTR_DST_PORT])); + + if (mdbe_attrs[MDBE_ATTR_VNI]) + cfg->remote_vni = + cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_VNI])); + + if (mdbe_attrs[MDBE_ATTR_IFINDEX]) { + cfg->remote_ifindex = + nla_get_s32(mdbe_attrs[MDBE_ATTR_IFINDEX]); + if (!__dev_get_by_index(cfg->vxlan->net, cfg->remote_ifindex)) { + NL_SET_ERR_MSG_MOD(extack, "Outgoing interface not found"); + err = -EINVAL; + goto err_src_list_fini; + } + } + + if (mdbe_attrs[MDBE_ATTR_SRC_VNI]) + cfg->group.vni = + cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_SRC_VNI])); + + return 0; + +err_src_list_fini: + vxlan_mdb_config_src_list_fini(cfg); + return err; +} + +static int vxlan_mdb_config_init(struct vxlan_mdb_config *cfg, + struct net_device *dev, struct nlattr *tb[], + u16 nlmsg_flags, + struct netlink_ext_ack *extack) +{ + struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]); + struct vxlan_dev *vxlan = netdev_priv(dev); + + memset(cfg, 0, sizeof(*cfg)); + cfg->vxlan = vxlan; + cfg->group.vni = vxlan->default_dst.remote_vni; + INIT_LIST_HEAD(&cfg->src_list); + cfg->nlflags = nlmsg_flags; + cfg->filter_mode = MCAST_EXCLUDE; + cfg->rt_protocol = RTPROT_STATIC; + cfg->remote_vni = vxlan->default_dst.remote_vni; + cfg->remote_port = vxlan->cfg.dst_port; + + if (entry->ifindex != dev->ifindex) { + NL_SET_ERR_MSG_MOD(extack, "Port net device must be the VXLAN net device"); + return -EINVAL; + } + + /* State is not part of the entry key and can be ignored on deletion + * requests. + */ + if ((nlmsg_flags & (NLM_F_CREATE | NLM_F_REPLACE)) && + entry->state != MDB_PERMANENT) { + NL_SET_ERR_MSG_MOD(extack, "MDB entry must be permanent"); + return -EINVAL; + } + + if (entry->flags) { + NL_SET_ERR_MSG_MOD(extack, "Invalid MDB entry flags"); + return -EINVAL; + } + + if (entry->vid) { + NL_SET_ERR_MSG_MOD(extack, "VID must not be specified"); + return -EINVAL; + } + + if (entry->addr.proto != htons(ETH_P_IP) && + entry->addr.proto != htons(ETH_P_IPV6)) { + NL_SET_ERR_MSG_MOD(extack, "Group address must be an IPv4 / IPv6 address"); + return -EINVAL; + } + + if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY_ATTRS)) { + NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY_ATTRS attribute"); + return -EINVAL; + } + + return vxlan_mdb_config_attrs_init(cfg, entry, tb[MDBA_SET_ENTRY_ATTRS], + extack); +} + +static void vxlan_mdb_config_fini(struct vxlan_mdb_config *cfg) +{ + vxlan_mdb_config_src_list_fini(cfg); +} + +static struct vxlan_mdb_entry * +vxlan_mdb_entry_lookup(struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry_key *group) +{ + return rhashtable_lookup_fast(&vxlan->mdb_tbl, group, + vxlan_mdb_rht_params); +} + +static struct vxlan_mdb_remote * +vxlan_mdb_remote_lookup(const struct vxlan_mdb_entry *mdb_entry, + const union vxlan_addr *addr) +{ + struct vxlan_mdb_remote *remote; + + list_for_each_entry(remote, &mdb_entry->remotes, list) { + struct vxlan_rdst *rd = rtnl_dereference(remote->rd); + + if (vxlan_addr_equal(addr, &rd->remote_ip)) + return remote; + } + + return NULL; +} + +static void vxlan_mdb_rdst_free(struct rcu_head *head) +{ + struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); + + dst_cache_destroy(&rd->dst_cache); + kfree(rd); +} + +static int vxlan_mdb_remote_rdst_init(const struct vxlan_mdb_config *cfg, + struct vxlan_mdb_remote *remote) +{ + struct vxlan_rdst *rd; + int err; + + rd = kzalloc(sizeof(*rd), GFP_KERNEL); + if (!rd) + return -ENOMEM; + + err = dst_cache_init(&rd->dst_cache, GFP_KERNEL); + if (err) + goto err_free_rdst; + + rd->remote_ip = cfg->remote_ip; + rd->remote_port = cfg->remote_port; + rd->remote_vni = cfg->remote_vni; + rd->remote_ifindex = cfg->remote_ifindex; + rcu_assign_pointer(remote->rd, rd); + + return 0; + +err_free_rdst: + kfree(rd); + return err; +} + +static void vxlan_mdb_remote_rdst_fini(struct vxlan_rdst *rd) +{ + call_rcu(&rd->rcu, vxlan_mdb_rdst_free); +} + +static int vxlan_mdb_remote_init(const struct vxlan_mdb_config *cfg, + struct vxlan_mdb_remote *remote) +{ + int err; + + err = vxlan_mdb_remote_rdst_init(cfg, remote); + if (err) + return err; + + remote->flags = cfg->flags; + remote->filter_mode = cfg->filter_mode; + remote->rt_protocol = cfg->rt_protocol; + INIT_HLIST_HEAD(&remote->src_list); + + return 0; +} + +static void vxlan_mdb_remote_fini(struct vxlan_dev *vxlan, + struct vxlan_mdb_remote *remote) +{ + WARN_ON_ONCE(!hlist_empty(&remote->src_list)); + vxlan_mdb_remote_rdst_fini(rtnl_dereference(remote->rd)); +} + +static struct vxlan_mdb_src_entry * +vxlan_mdb_remote_src_entry_lookup(const struct vxlan_mdb_remote *remote, + const union vxlan_addr *addr) +{ + struct vxlan_mdb_src_entry *ent; + + hlist_for_each_entry(ent, &remote->src_list, node) { + if (vxlan_addr_equal(&ent->addr, addr)) + return ent; + } + + return NULL; +} + +static struct vxlan_mdb_src_entry * +vxlan_mdb_remote_src_entry_add(struct vxlan_mdb_remote *remote, + const union vxlan_addr *addr) +{ + struct vxlan_mdb_src_entry *ent; + + ent = kzalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return NULL; + + ent->addr = *addr; + hlist_add_head(&ent->node, &remote->src_list); + + return ent; +} + +static void +vxlan_mdb_remote_src_entry_del(struct vxlan_mdb_src_entry *ent) +{ + hlist_del(&ent->node); + kfree(ent); +} + +static int +vxlan_mdb_remote_src_fwd_add(const struct vxlan_mdb_config *cfg, + const union vxlan_addr *addr, + struct netlink_ext_ack *extack) +{ + struct vxlan_mdb_config sg_cfg; + + memset(&sg_cfg, 0, sizeof(sg_cfg)); + sg_cfg.vxlan = cfg->vxlan; + sg_cfg.group.src = *addr; + sg_cfg.group.dst = cfg->group.dst; + sg_cfg.group.vni = cfg->group.vni; + INIT_LIST_HEAD(&sg_cfg.src_list); + sg_cfg.remote_ip = cfg->remote_ip; + sg_cfg.remote_ifindex = cfg->remote_ifindex; + sg_cfg.remote_vni = cfg->remote_vni; + sg_cfg.remote_port = cfg->remote_port; + sg_cfg.nlflags = cfg->nlflags; + sg_cfg.filter_mode = MCAST_INCLUDE; + if (cfg->filter_mode == MCAST_EXCLUDE) + sg_cfg.flags = VXLAN_MDB_REMOTE_F_BLOCKED; + sg_cfg.rt_protocol = cfg->rt_protocol; + + return __vxlan_mdb_add(&sg_cfg, extack); +} + +static void +vxlan_mdb_remote_src_fwd_del(struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry_key *group, + const struct vxlan_mdb_remote *remote, + const union vxlan_addr *addr) +{ + struct vxlan_rdst *rd = rtnl_dereference(remote->rd); + struct vxlan_mdb_config sg_cfg; + + memset(&sg_cfg, 0, sizeof(sg_cfg)); + sg_cfg.vxlan = vxlan; + sg_cfg.group.src = *addr; + sg_cfg.group.dst = group->dst; + sg_cfg.group.vni = group->vni; + INIT_LIST_HEAD(&sg_cfg.src_list); + sg_cfg.remote_ip = rd->remote_ip; + + __vxlan_mdb_del(&sg_cfg, NULL); +} + +static int +vxlan_mdb_remote_src_add(const struct vxlan_mdb_config *cfg, + struct vxlan_mdb_remote *remote, + const struct vxlan_mdb_config_src_entry *src, + struct netlink_ext_ack *extack) +{ + struct vxlan_mdb_src_entry *ent; + int err; + + ent = vxlan_mdb_remote_src_entry_lookup(remote, &src->addr); + if (!ent) { + ent = vxlan_mdb_remote_src_entry_add(remote, &src->addr); + if (!ent) + return -ENOMEM; + } else if (!(cfg->nlflags & NLM_F_REPLACE)) { + NL_SET_ERR_MSG_MOD(extack, "Source entry already exists"); + return -EEXIST; + } + + err = vxlan_mdb_remote_src_fwd_add(cfg, &ent->addr, extack); + if (err) + goto err_src_del; + + /* Clear flags in case source entry was marked for deletion as part of + * replace flow. + */ + ent->flags = 0; + + return 0; + +err_src_del: + vxlan_mdb_remote_src_entry_del(ent); + return err; +} + +static void vxlan_mdb_remote_src_del(struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry_key *group, + const struct vxlan_mdb_remote *remote, + struct vxlan_mdb_src_entry *ent) +{ + vxlan_mdb_remote_src_fwd_del(vxlan, group, remote, &ent->addr); + vxlan_mdb_remote_src_entry_del(ent); +} + +static int vxlan_mdb_remote_srcs_add(const struct vxlan_mdb_config *cfg, + struct vxlan_mdb_remote *remote, + struct netlink_ext_ack *extack) +{ + struct vxlan_mdb_config_src_entry *src; + struct vxlan_mdb_src_entry *ent; + struct hlist_node *tmp; + int err; + + list_for_each_entry(src, &cfg->src_list, node) { + err = vxlan_mdb_remote_src_add(cfg, remote, src, extack); + if (err) + goto err_src_del; + } + + return 0; + +err_src_del: + hlist_for_each_entry_safe(ent, tmp, &remote->src_list, node) + vxlan_mdb_remote_src_del(cfg->vxlan, &cfg->group, remote, ent); + return err; +} + +static void vxlan_mdb_remote_srcs_del(struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry_key *group, + struct vxlan_mdb_remote *remote) +{ + struct vxlan_mdb_src_entry *ent; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(ent, tmp, &remote->src_list, node) + vxlan_mdb_remote_src_del(vxlan, group, remote, ent); +} + +static size_t +vxlan_mdb_nlmsg_src_list_size(const struct vxlan_mdb_entry_key *group, + const struct vxlan_mdb_remote *remote) +{ + struct vxlan_mdb_src_entry *ent; + size_t nlmsg_size; + + if (hlist_empty(&remote->src_list)) + return 0; + + /* MDBA_MDB_EATTR_SRC_LIST */ + nlmsg_size = nla_total_size(0); + + hlist_for_each_entry(ent, &remote->src_list, node) { + /* MDBA_MDB_SRCLIST_ENTRY */ + nlmsg_size += nla_total_size(0) + + /* MDBA_MDB_SRCATTR_ADDRESS */ + nla_total_size(vxlan_addr_size(&group->dst)) + + /* MDBA_MDB_SRCATTR_TIMER */ + nla_total_size(sizeof(u8)); + } + + return nlmsg_size; +} + +static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote) +{ + const struct vxlan_mdb_entry_key *group = &mdb_entry->key; + struct vxlan_rdst *rd = rtnl_dereference(remote->rd); + size_t nlmsg_size; + + nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) + + /* MDBA_MDB */ + nla_total_size(0) + + /* MDBA_MDB_ENTRY */ + nla_total_size(0) + + /* MDBA_MDB_ENTRY_INFO */ + nla_total_size(sizeof(struct br_mdb_entry)) + + /* MDBA_MDB_EATTR_TIMER */ + nla_total_size(sizeof(u32)); + /* MDBA_MDB_EATTR_SOURCE */ + if (vxlan_mdb_is_sg(group)) + nlmsg_size += nla_total_size(vxlan_addr_size(&group->dst)); + /* MDBA_MDB_EATTR_RTPROT */ + nlmsg_size += nla_total_size(sizeof(u8)); + /* MDBA_MDB_EATTR_SRC_LIST */ + nlmsg_size += vxlan_mdb_nlmsg_src_list_size(group, remote); + /* MDBA_MDB_EATTR_GROUP_MODE */ + nlmsg_size += nla_total_size(sizeof(u8)); + /* MDBA_MDB_EATTR_DST */ + nlmsg_size += nla_total_size(vxlan_addr_size(&rd->remote_ip)); + /* MDBA_MDB_EATTR_DST_PORT */ + if (rd->remote_port && rd->remote_port != vxlan->cfg.dst_port) + nlmsg_size += nla_total_size(sizeof(u16)); + /* MDBA_MDB_EATTR_VNI */ + if (rd->remote_vni != vxlan->default_dst.remote_vni) + nlmsg_size += nla_total_size(sizeof(u32)); + /* MDBA_MDB_EATTR_IFINDEX */ + if (rd->remote_ifindex) + nlmsg_size += nla_total_size(sizeof(u32)); + /* MDBA_MDB_EATTR_SRC_VNI */ + if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && group->vni) + nlmsg_size += nla_total_size(sizeof(u32)); + + return nlmsg_size; +} + +static int vxlan_mdb_nlmsg_fill(const struct vxlan_dev *vxlan, + struct sk_buff *skb, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote, + int type) +{ + struct nlattr *mdb_nest, *mdb_entry_nest; + struct br_port_msg *bpm; + struct nlmsghdr *nlh; + + nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0); + if (!nlh) + return -EMSGSIZE; + + bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); + bpm->family = AF_BRIDGE; + bpm->ifindex = vxlan->dev->ifindex; + + mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB); + if (!mdb_nest) + goto cancel; + mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY); + if (!mdb_entry_nest) + goto cancel; + + if (vxlan_mdb_entry_info_fill(vxlan, skb, mdb_entry, remote)) + goto cancel; + + nla_nest_end(skb, mdb_entry_nest); + nla_nest_end(skb, mdb_nest); + nlmsg_end(skb, nlh); + + return 0; + +cancel: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static void vxlan_mdb_remote_notify(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote, + int type) +{ + struct net *net = dev_net(vxlan->dev); + struct sk_buff *skb; + int err = -ENOBUFS; + + skb = nlmsg_new(vxlan_mdb_nlmsg_size(vxlan, mdb_entry, remote), + GFP_KERNEL); + if (!skb) + goto errout; + + err = vxlan_mdb_nlmsg_fill(vxlan, skb, mdb_entry, remote, type); + if (err) { + kfree_skb(skb); + goto errout; + } + + rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_KERNEL); + return; +errout: + rtnl_set_sk_err(net, RTNLGRP_MDB, err); +} + +static int +vxlan_mdb_remote_srcs_replace(const struct vxlan_mdb_config *cfg, + const struct vxlan_mdb_entry *mdb_entry, + struct vxlan_mdb_remote *remote, + struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = cfg->vxlan; + struct vxlan_mdb_src_entry *ent; + struct hlist_node *tmp; + int err; + + hlist_for_each_entry(ent, &remote->src_list, node) + ent->flags |= VXLAN_SGRP_F_DELETE; + + err = vxlan_mdb_remote_srcs_add(cfg, remote, extack); + if (err) + goto err_clear_delete; + + hlist_for_each_entry_safe(ent, tmp, &remote->src_list, node) { + if (ent->flags & VXLAN_SGRP_F_DELETE) + vxlan_mdb_remote_src_del(vxlan, &mdb_entry->key, remote, + ent); + } + + return 0; + +err_clear_delete: + hlist_for_each_entry(ent, &remote->src_list, node) + ent->flags &= ~VXLAN_SGRP_F_DELETE; + return err; +} + +static int vxlan_mdb_remote_replace(const struct vxlan_mdb_config *cfg, + const struct vxlan_mdb_entry *mdb_entry, + struct vxlan_mdb_remote *remote, + struct netlink_ext_ack *extack) +{ + struct vxlan_rdst *new_rd, *old_rd = rtnl_dereference(remote->rd); + struct vxlan_dev *vxlan = cfg->vxlan; + int err; + + err = vxlan_mdb_remote_rdst_init(cfg, remote); + if (err) + return err; + new_rd = rtnl_dereference(remote->rd); + + err = vxlan_mdb_remote_srcs_replace(cfg, mdb_entry, remote, extack); + if (err) + goto err_rdst_reset; + + WRITE_ONCE(remote->flags, cfg->flags); + WRITE_ONCE(remote->filter_mode, cfg->filter_mode); + remote->rt_protocol = cfg->rt_protocol; + vxlan_mdb_remote_notify(vxlan, mdb_entry, remote, RTM_NEWMDB); + + vxlan_mdb_remote_rdst_fini(old_rd); + + return 0; + +err_rdst_reset: + rcu_assign_pointer(remote->rd, old_rd); + vxlan_mdb_remote_rdst_fini(new_rd); + return err; +} + +static int vxlan_mdb_remote_add(const struct vxlan_mdb_config *cfg, + struct vxlan_mdb_entry *mdb_entry, + struct netlink_ext_ack *extack) +{ + struct vxlan_mdb_remote *remote; + int err; + + remote = vxlan_mdb_remote_lookup(mdb_entry, &cfg->remote_ip); + if (remote) { + if (!(cfg->nlflags & NLM_F_REPLACE)) { + NL_SET_ERR_MSG_MOD(extack, "Replace not specified and MDB remote entry already exists"); + return -EEXIST; + } + return vxlan_mdb_remote_replace(cfg, mdb_entry, remote, extack); + } + + if (!(cfg->nlflags & NLM_F_CREATE)) { + NL_SET_ERR_MSG_MOD(extack, "Create not specified and entry does not exist"); + return -ENOENT; + } + + remote = kzalloc(sizeof(*remote), GFP_KERNEL); + if (!remote) + return -ENOMEM; + + err = vxlan_mdb_remote_init(cfg, remote); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to initialize remote MDB entry"); + goto err_free_remote; + } + + err = vxlan_mdb_remote_srcs_add(cfg, remote, extack); + if (err) + goto err_remote_fini; + + list_add_rcu(&remote->list, &mdb_entry->remotes); + vxlan_mdb_remote_notify(cfg->vxlan, mdb_entry, remote, RTM_NEWMDB); + + return 0; + +err_remote_fini: + vxlan_mdb_remote_fini(cfg->vxlan, remote); +err_free_remote: + kfree(remote); + return err; +} + +static void vxlan_mdb_remote_del(struct vxlan_dev *vxlan, + struct vxlan_mdb_entry *mdb_entry, + struct vxlan_mdb_remote *remote) +{ + vxlan_mdb_remote_notify(vxlan, mdb_entry, remote, RTM_DELMDB); + list_del_rcu(&remote->list); + vxlan_mdb_remote_srcs_del(vxlan, &mdb_entry->key, remote); + vxlan_mdb_remote_fini(vxlan, remote); + kfree_rcu(remote, rcu); +} + +static struct vxlan_mdb_entry * +vxlan_mdb_entry_get(struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry_key *group) +{ + struct vxlan_mdb_entry *mdb_entry; + int err; + + mdb_entry = vxlan_mdb_entry_lookup(vxlan, group); + if (mdb_entry) + return mdb_entry; + + mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL); + if (!mdb_entry) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&mdb_entry->remotes); + memcpy(&mdb_entry->key, group, sizeof(mdb_entry->key)); + hlist_add_head(&mdb_entry->mdb_node, &vxlan->mdb_list); + + err = rhashtable_lookup_insert_fast(&vxlan->mdb_tbl, + &mdb_entry->rhnode, + vxlan_mdb_rht_params); + if (err) + goto err_free_entry; + + return mdb_entry; + +err_free_entry: + hlist_del(&mdb_entry->mdb_node); + kfree(mdb_entry); + return ERR_PTR(err); +} + +static void vxlan_mdb_entry_put(struct vxlan_dev *vxlan, + struct vxlan_mdb_entry *mdb_entry) +{ + if (!list_empty(&mdb_entry->remotes)) + return; + + rhashtable_remove_fast(&vxlan->mdb_tbl, &mdb_entry->rhnode, + vxlan_mdb_rht_params); + hlist_del(&mdb_entry->mdb_node); + kfree_rcu(mdb_entry, rcu); +} + +static int __vxlan_mdb_add(const struct vxlan_mdb_config *cfg, + struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = cfg->vxlan; + struct vxlan_mdb_entry *mdb_entry; + int err; + + mdb_entry = vxlan_mdb_entry_get(vxlan, &cfg->group); + if (IS_ERR(mdb_entry)) + return PTR_ERR(mdb_entry); + + err = vxlan_mdb_remote_add(cfg, mdb_entry, extack); + if (err) + goto err_entry_put; + + vxlan->mdb_seq++; + + return 0; + +err_entry_put: + vxlan_mdb_entry_put(vxlan, mdb_entry); + return err; +} + +static int __vxlan_mdb_del(const struct vxlan_mdb_config *cfg, + struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = cfg->vxlan; + struct vxlan_mdb_entry *mdb_entry; + struct vxlan_mdb_remote *remote; + + mdb_entry = vxlan_mdb_entry_lookup(vxlan, &cfg->group); + if (!mdb_entry) { + NL_SET_ERR_MSG_MOD(extack, "Did not find MDB entry"); + return -ENOENT; + } + + remote = vxlan_mdb_remote_lookup(mdb_entry, &cfg->remote_ip); + if (!remote) { + NL_SET_ERR_MSG_MOD(extack, "Did not find MDB remote entry"); + return -ENOENT; + } + + vxlan_mdb_remote_del(vxlan, mdb_entry, remote); + vxlan_mdb_entry_put(vxlan, mdb_entry); + + vxlan->mdb_seq++; + + return 0; +} + +int vxlan_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags, + struct netlink_ext_ack *extack) +{ + struct vxlan_mdb_config cfg; + int err; + + ASSERT_RTNL(); + + err = vxlan_mdb_config_init(&cfg, dev, tb, nlmsg_flags, extack); + if (err) + return err; + + err = __vxlan_mdb_add(&cfg, extack); + + vxlan_mdb_config_fini(&cfg); + return err; +} + +int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[], + struct netlink_ext_ack *extack) +{ + struct vxlan_mdb_config cfg; + int err; + + ASSERT_RTNL(); + + err = vxlan_mdb_config_init(&cfg, dev, tb, 0, extack); + if (err) + return err; + + err = __vxlan_mdb_del(&cfg, extack); + + vxlan_mdb_config_fini(&cfg); + return err; +} + +static void vxlan_mdb_check_empty(void *ptr, void *arg) +{ + WARN_ON_ONCE(1); +} + +static void vxlan_mdb_remotes_flush(struct vxlan_dev *vxlan, + struct vxlan_mdb_entry *mdb_entry) +{ + struct vxlan_mdb_remote *remote, *tmp; + + list_for_each_entry_safe(remote, tmp, &mdb_entry->remotes, list) + vxlan_mdb_remote_del(vxlan, mdb_entry, remote); +} + +static void vxlan_mdb_entries_flush(struct vxlan_dev *vxlan) +{ + struct vxlan_mdb_entry *mdb_entry; + struct hlist_node *tmp; + + /* The removal of an entry cannot trigger the removal of another entry + * since entries are always added to the head of the list. + */ + hlist_for_each_entry_safe(mdb_entry, tmp, &vxlan->mdb_list, mdb_node) { + vxlan_mdb_remotes_flush(vxlan, mdb_entry); + vxlan_mdb_entry_put(vxlan, mdb_entry); + } +} + +int vxlan_mdb_init(struct vxlan_dev *vxlan) +{ + int err; + + err = rhashtable_init(&vxlan->mdb_tbl, &vxlan_mdb_rht_params); + if (err) + return err; + + INIT_HLIST_HEAD(&vxlan->mdb_list); + + return 0; +} + +void vxlan_mdb_fini(struct vxlan_dev *vxlan) +{ + vxlan_mdb_entries_flush(vxlan); + rhashtable_free_and_destroy(&vxlan->mdb_tbl, vxlan_mdb_check_empty, + NULL); +} diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h index f4977925cb8a..7bcc38faae27 100644 --- a/drivers/net/vxlan/vxlan_private.h +++ b/drivers/net/vxlan/vxlan_private.h @@ -110,6 +110,14 @@ static inline int vxlan_nla_put_addr(struct sk_buff *skb, int attr, return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); } +static inline bool vxlan_addr_is_multicast(const union vxlan_addr *ip) +{ + if (ip->sa.sa_family == AF_INET6) + return ipv6_addr_is_multicast(&ip->sin6.sin6_addr); + else + return ipv4_is_multicast(ip->sin.sin_addr.s_addr); +} + #else /* !CONFIG_IPV6 */ static inline @@ -138,8 +146,21 @@ static inline int vxlan_nla_put_addr(struct sk_buff *skb, int attr, return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr); } +static inline bool vxlan_addr_is_multicast(const union vxlan_addr *ip) +{ + return ipv4_is_multicast(ip->sin.sin_addr.s_addr); +} + #endif +static inline size_t vxlan_addr_size(const union vxlan_addr *ip) +{ + if (ip->sa.sa_family == AF_INET6) + return sizeof(struct in6_addr); + else + return sizeof(__be32); +} + static inline struct vxlan_vni_node * vxlan_vnifilter_lookup(struct vxlan_dev *vxlan, __be32 vni) { @@ -206,4 +227,14 @@ int vxlan_igmp_join(struct vxlan_dev *vxlan, union vxlan_addr *rip, int rifindex); int vxlan_igmp_leave(struct vxlan_dev *vxlan, union vxlan_addr *rip, int rifindex); + +/* vxlan_mdb.c */ +int vxlan_mdb_dump(struct net_device *dev, struct sk_buff *skb, + struct netlink_callback *cb); +int vxlan_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags, + struct netlink_ext_ack *extack); +int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[], + struct netlink_ext_ack *extack); +int vxlan_mdb_init(struct vxlan_dev *vxlan); +void vxlan_mdb_fini(struct vxlan_dev *vxlan); #endif diff --git a/include/net/vxlan.h b/include/net/vxlan.h index bca5b01af247..110b703d8978 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -3,6 +3,7 @@ #define __NET_VXLAN_H 1 #include +#include #include #include #include @@ -302,6 +303,10 @@ struct vxlan_dev { struct vxlan_vni_group __rcu *vnigrp; struct hlist_head fdb_head[FDB_HASH_SIZE]; + + struct rhashtable mdb_tbl; + struct hlist_head mdb_list; + unsigned int mdb_seq; }; #define VXLAN_F_LEARN 0x01 diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index d60c456710b3..c9d624f528c5 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -633,6 +633,11 @@ enum { MDBA_MDB_EATTR_GROUP_MODE, MDBA_MDB_EATTR_SOURCE, MDBA_MDB_EATTR_RTPROT, + MDBA_MDB_EATTR_DST, + MDBA_MDB_EATTR_DST_PORT, + MDBA_MDB_EATTR_VNI, + MDBA_MDB_EATTR_IFINDEX, + MDBA_MDB_EATTR_SRC_VNI, __MDBA_MDB_EATTR_MAX }; #define MDBA_MDB_EATTR_MAX (__MDBA_MDB_EATTR_MAX - 1) @@ -728,6 +733,11 @@ enum { MDBE_ATTR_SRC_LIST, MDBE_ATTR_GROUP_MODE, MDBE_ATTR_RTPROT, + MDBE_ATTR_DST, + MDBE_ATTR_DST_PORT, + MDBE_ATTR_VNI, + MDBE_ATTR_IFINDEX, + MDBE_ATTR_SRC_VNI, __MDBE_ATTR_MAX, }; #define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1) -- cgit v1.2.3 From 8e40c3b6e1538401d2cf8d43087ebe1db8026af9 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Wed, 8 Mar 2023 16:15:56 +0530 Subject: wifi: nl80211: Update the documentation of NL80211_SCAN_FLAG_COLOCATED_6GHZ Currently when NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in the scan flags, in addition to the co-located APs, PSC channels in the 6 GHz band would also be scanned if the user space has asked for it. In other words, the scan would happen on PSC channels & co-located 6 GHz channels that were reported in the RNR IE. Update the documentation of NL80211_SCAN_FLAG_COLOCATED_6GHZ flag to reflect the above said behavior. Signed-off-by: Manikanta Pubbisetty Link: https://lore.kernel.org/r/20230308104556.9399-1-quic_mpubbise@quicinc.com Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 9a0ac0363f1f..14e958a32b84 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -6544,7 +6544,9 @@ enum nl80211_timeout_reason { * channels on which APs are expected to be found. Note that when not set, * the scan logic would scan all 6GHz channels, but since transmission of * probe requests on non PSC channels is limited, it is highly likely that - * these channels would passively be scanned. + * these channels would passively be scanned. Also note that when the flag + * is set, in addition to the colocated APs, PSC channels would also be + * scanned if the user space has asked for it. */ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, -- cgit v1.2.3 From 68b04864ca425d1894c96b8141d4fba1181f11cb Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Wed, 22 Mar 2023 20:24:00 -0700 Subject: bpf: Create links for BPF struct_ops maps. Make bpf_link support struct_ops. Previously, struct_ops were always used alone without any associated links. Upon updating its value, a struct_ops would be activated automatically. Yet other BPF program types required to make a bpf_link with their instances before they could become active. Now, however, you can create an inactive struct_ops, and create a link to activate it later. With bpf_links, struct_ops has a behavior similar to other BPF program types. You can pin/unpin them from their links and the struct_ops will be deactivated when its link is removed while previously need someone to delete the value for it to be deactivated. bpf_links are responsible for registering their associated struct_ops. You can only use a struct_ops that has the BPF_F_LINK flag set to create a bpf_link, while a structs without this flag behaves in the same manner as before and is registered upon updating its value. The BPF_LINK_TYPE_STRUCT_OPS serves a dual purpose. Not only is it used to craft the links for BPF struct_ops programs, but also to create links for BPF struct_ops them-self. Since the links of BPF struct_ops programs are only used to create trampolines internally, they are never seen in other contexts. Thus, they can be reused for struct_ops themself. To maintain a reference to the map supporting this link, we add bpf_struct_ops_link as an additional type. The pointer of the map is RCU and won't be necessary until later in the patchset. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20230323032405.3735486-4-kuifeng@meta.com Signed-off-by: Martin KaFai Lau --- include/linux/bpf.h | 7 ++ include/uapi/linux/bpf.h | 12 +++- kernel/bpf/bpf_struct_ops.c | 143 ++++++++++++++++++++++++++++++++++++++++- kernel/bpf/syscall.c | 23 ++++--- net/ipv4/bpf_tcp_ca.c | 8 ++- tools/include/uapi/linux/bpf.h | 12 +++- 6 files changed, 190 insertions(+), 15 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f04098468d7a..8552279efe46 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1518,6 +1518,7 @@ struct bpf_struct_ops { void *kdata, const void *udata); int (*reg)(void *kdata); void (*unreg)(void *kdata); + int (*validate)(void *kdata); const struct btf_type *type; const struct btf_type *value_type; const char *name; @@ -1552,6 +1553,7 @@ static inline void bpf_module_put(const void *data, struct module *owner) else module_put(owner); } +int bpf_struct_ops_link_create(union bpf_attr *attr); #ifdef CONFIG_NET /* Define it here to avoid the use of forward declaration */ @@ -1592,6 +1594,11 @@ static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, { return -EINVAL; } +static inline int bpf_struct_ops_link_create(union bpf_attr *attr) +{ + return -EOPNOTSUPP; +} + #endif #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 13129df937cd..42f40ee083bf 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1033,6 +1033,7 @@ enum bpf_attach_type { BPF_PERF_EVENT, BPF_TRACE_KPROBE_MULTI, BPF_LSM_CGROUP, + BPF_STRUCT_OPS, __MAX_BPF_ATTACH_TYPE }; @@ -1266,6 +1267,9 @@ enum { /* Create a map that is suitable to be an inner map with dynamic max entries */ BPF_F_INNER_MAP = (1U << 12), + +/* Create a map that will be registered/unregesitered by the backed bpf_link */ + BPF_F_LINK = (1U << 13), }; /* Flags for BPF_PROG_QUERY. */ @@ -1507,7 +1511,10 @@ union bpf_attr { } task_fd_query; struct { /* struct used by BPF_LINK_CREATE command */ - __u32 prog_fd; /* eBPF program to attach */ + union { + __u32 prog_fd; /* eBPF program to attach */ + __u32 map_fd; /* struct_ops to attach */ + }; union { __u32 target_fd; /* object to attach to */ __u32 target_ifindex; /* target ifindex */ @@ -6379,6 +6386,9 @@ struct bpf_link_info { struct { __u32 ifindex; } xdp; + struct { + __u32 map_id; + } struct_ops; }; } __attribute__((aligned(8))); diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 2f3c4a0e03ee..3d6b5240c25a 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -17,6 +17,7 @@ enum bpf_struct_ops_state { BPF_STRUCT_OPS_STATE_INIT, BPF_STRUCT_OPS_STATE_INUSE, BPF_STRUCT_OPS_STATE_TOBEFREE, + BPF_STRUCT_OPS_STATE_READY, }; #define BPF_STRUCT_OPS_COMMON_VALUE \ @@ -59,6 +60,11 @@ struct bpf_struct_ops_map { struct bpf_struct_ops_value kvalue; }; +struct bpf_struct_ops_link { + struct bpf_link link; + struct bpf_map __rcu *map; +}; + #define VALUE_PREFIX "bpf_struct_ops_" #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1) @@ -500,11 +506,29 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, *(unsigned long *)(udata + moff) = prog->aux->id; } - bpf_map_inc(map); + if (st_map->map.map_flags & BPF_F_LINK) { + err = st_ops->validate(kdata); + if (err) + goto reset_unlock; + set_memory_rox((long)st_map->image, 1); + /* Let bpf_link handle registration & unregistration. + * + * Pair with smp_load_acquire() during lookup_elem(). + */ + smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_READY); + goto unlock; + } set_memory_rox((long)st_map->image, 1); err = st_ops->reg(kdata); if (likely(!err)) { + /* This refcnt increment on the map here after + * 'st_ops->reg()' is secure since the state of the + * map must be set to INIT at this moment, and thus + * bpf_struct_ops_map_delete_elem() can't unregister + * or transition it to TOBEFREE concurrently. + */ + bpf_map_inc(map); /* Pair with smp_load_acquire() during lookup_elem(). * It ensures the above udata updates (e.g. prog->aux->id) * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set. @@ -520,7 +544,6 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, */ set_memory_nx((long)st_map->image, 1); set_memory_rw((long)st_map->image, 1); - bpf_map_put(map); reset_unlock: bpf_struct_ops_map_put_progs(st_map); @@ -538,6 +561,9 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) struct bpf_struct_ops_map *st_map; st_map = (struct bpf_struct_ops_map *)map; + if (st_map->map.map_flags & BPF_F_LINK) + return -EOPNOTSUPP; + prev_state = cmpxchg(&st_map->kvalue.state, BPF_STRUCT_OPS_STATE_INUSE, BPF_STRUCT_OPS_STATE_TOBEFREE); @@ -614,7 +640,7 @@ static void bpf_struct_ops_map_free(struct bpf_map *map) static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) { if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || - attr->map_flags || !attr->btf_vmlinux_value_type_id) + (attr->map_flags & ~BPF_F_LINK) || !attr->btf_vmlinux_value_type_id) return -EINVAL; return 0; } @@ -638,6 +664,9 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) if (attr->value_size != vt->size) return ERR_PTR(-EINVAL); + if (attr->map_flags & BPF_F_LINK && !st_ops->validate) + return ERR_PTR(-EOPNOTSUPP); + t = st_ops->type; st_map_size = sizeof(*st_map) + @@ -725,3 +754,111 @@ void bpf_struct_ops_put(const void *kdata) bpf_map_put(&st_map->map); } + +static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map) +{ + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; + + return map->map_type == BPF_MAP_TYPE_STRUCT_OPS && + map->map_flags & BPF_F_LINK && + /* Pair with smp_store_release() during map_update */ + smp_load_acquire(&st_map->kvalue.state) == BPF_STRUCT_OPS_STATE_READY; +} + +static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link) +{ + struct bpf_struct_ops_link *st_link; + struct bpf_struct_ops_map *st_map; + + st_link = container_of(link, struct bpf_struct_ops_link, link); + st_map = (struct bpf_struct_ops_map *) + rcu_dereference_protected(st_link->map, true); + if (st_map) { + /* st_link->map can be NULL if + * bpf_struct_ops_link_create() fails to register. + */ + st_map->st_ops->unreg(&st_map->kvalue.data); + bpf_map_put(&st_map->map); + } + kfree(st_link); +} + +static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link, + struct seq_file *seq) +{ + struct bpf_struct_ops_link *st_link; + struct bpf_map *map; + + st_link = container_of(link, struct bpf_struct_ops_link, link); + rcu_read_lock(); + map = rcu_dereference(st_link->map); + seq_printf(seq, "map_id:\t%d\n", map->id); + rcu_read_unlock(); +} + +static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + struct bpf_struct_ops_link *st_link; + struct bpf_map *map; + + st_link = container_of(link, struct bpf_struct_ops_link, link); + rcu_read_lock(); + map = rcu_dereference(st_link->map); + info->struct_ops.map_id = map->id; + rcu_read_unlock(); + return 0; +} + +static const struct bpf_link_ops bpf_struct_ops_map_lops = { + .dealloc = bpf_struct_ops_map_link_dealloc, + .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo, + .fill_link_info = bpf_struct_ops_map_link_fill_link_info, +}; + +int bpf_struct_ops_link_create(union bpf_attr *attr) +{ + struct bpf_struct_ops_link *link = NULL; + struct bpf_link_primer link_primer; + struct bpf_struct_ops_map *st_map; + struct bpf_map *map; + int err; + + map = bpf_map_get(attr->link_create.map_fd); + if (!map) + return -EINVAL; + + st_map = (struct bpf_struct_ops_map *)map; + + if (!bpf_struct_ops_valid_to_reg(map)) { + err = -EINVAL; + goto err_out; + } + + link = kzalloc(sizeof(*link), GFP_USER); + if (!link) { + err = -ENOMEM; + goto err_out; + } + bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL); + + err = bpf_link_prime(&link->link, &link_primer); + if (err) + goto err_out; + + err = st_map->st_ops->reg(st_map->kvalue.data); + if (err) { + bpf_link_cleanup(&link_primer); + link = NULL; + goto err_out; + } + RCU_INIT_POINTER(link->map, map); + + return bpf_link_settle(&link_primer); + +err_out: + bpf_map_put(map); + kfree(link); + return err; +} + diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cff0348a2871..21f76698875c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2825,16 +2825,19 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) const struct bpf_prog *prog = link->prog; char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; - bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); seq_printf(m, "link_type:\t%s\n" - "link_id:\t%u\n" - "prog_tag:\t%s\n" - "prog_id:\t%u\n", + "link_id:\t%u\n", bpf_link_type_strs[link->type], - link->id, - prog_tag, - prog->aux->id); + link->id); + if (prog) { + bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); + seq_printf(m, + "prog_tag:\t%s\n" + "prog_id:\t%u\n", + prog_tag, + prog->aux->id); + } if (link->ops->show_fdinfo) link->ops->show_fdinfo(link, m); } @@ -4314,7 +4317,8 @@ static int bpf_link_get_info_by_fd(struct file *file, info.type = link->type; info.id = link->id; - info.prog_id = link->prog->aux->id; + if (link->prog) + info.prog_id = link->prog->aux->id; if (link->ops->fill_link_info) { err = link->ops->fill_link_info(link, &info); @@ -4577,6 +4581,9 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) if (CHECK_ATTR(BPF_LINK_CREATE)) return -EINVAL; + if (attr->link_create.attach_type == BPF_STRUCT_OPS) + return bpf_struct_ops_link_create(attr); + prog = bpf_prog_get(attr->link_create.prog_fd); if (IS_ERR(prog)) return PTR_ERR(prog); diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 13fc0c185cd9..bbbd5eb94db2 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -239,8 +239,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t, if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name, sizeof(tcp_ca->name)) <= 0) return -EINVAL; - if (tcp_ca_find(utcp_ca->name)) - return -EEXIST; return 1; } @@ -266,6 +264,11 @@ static void bpf_tcp_ca_unreg(void *kdata) tcp_unregister_congestion_control(kdata); } +static int bpf_tcp_ca_validate(void *kdata) +{ + return tcp_validate_congestion_control(kdata); +} + struct bpf_struct_ops bpf_tcp_congestion_ops = { .verifier_ops = &bpf_tcp_ca_verifier_ops, .reg = bpf_tcp_ca_reg, @@ -273,6 +276,7 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = { .check_member = bpf_tcp_ca_check_member, .init_member = bpf_tcp_ca_init_member, .init = bpf_tcp_ca_init, + .validate = bpf_tcp_ca_validate, .name = "tcp_congestion_ops", }; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 13129df937cd..9cf1deaf21f2 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1033,6 +1033,7 @@ enum bpf_attach_type { BPF_PERF_EVENT, BPF_TRACE_KPROBE_MULTI, BPF_LSM_CGROUP, + BPF_STRUCT_OPS, __MAX_BPF_ATTACH_TYPE }; @@ -1266,6 +1267,9 @@ enum { /* Create a map that is suitable to be an inner map with dynamic max entries */ BPF_F_INNER_MAP = (1U << 12), + +/* Create a map that will be registered/unregesitered by the backed bpf_link */ + BPF_F_LINK = (1U << 13), }; /* Flags for BPF_PROG_QUERY. */ @@ -1507,7 +1511,10 @@ union bpf_attr { } task_fd_query; struct { /* struct used by BPF_LINK_CREATE command */ - __u32 prog_fd; /* eBPF program to attach */ + union { + __u32 prog_fd; /* eBPF program to attach */ + __u32 map_fd; /* eBPF struct_ops to attach */ + }; union { __u32 target_fd; /* object to attach to */ __u32 target_ifindex; /* target ifindex */ @@ -6379,6 +6386,9 @@ struct bpf_link_info { struct { __u32 ifindex; } xdp; + struct { + __u32 map_id; + } struct_ops; }; } __attribute__((aligned(8))); -- cgit v1.2.3 From aef56f2e918bf8fc8de25f0b36e8c2aba44116ec Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Wed, 22 Mar 2023 20:24:02 -0700 Subject: bpf: Update the struct_ops of a bpf_link. By improving the BPF_LINK_UPDATE command of bpf(), it should allow you to conveniently switch between different struct_ops on a single bpf_link. This would enable smoother transitions from one struct_ops to another. The struct_ops maps passing along with BPF_LINK_UPDATE should have the BPF_F_LINK flag. Signed-off-by: Kui-Feng Lee Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230323032405.3735486-6-kuifeng@meta.com Signed-off-by: Martin KaFai Lau --- include/linux/bpf.h | 3 +++ include/uapi/linux/bpf.h | 21 +++++++++++++----- kernel/bpf/bpf_struct_ops.c | 48 +++++++++++++++++++++++++++++++++++++++++- kernel/bpf/syscall.c | 34 ++++++++++++++++++++++++++++++ net/ipv4/bpf_tcp_ca.c | 6 ++++++ tools/include/uapi/linux/bpf.h | 21 +++++++++++++----- 6 files changed, 122 insertions(+), 11 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8552279efe46..2d8f3f639e68 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1476,6 +1476,8 @@ struct bpf_link_ops { void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); int (*fill_link_info)(const struct bpf_link *link, struct bpf_link_info *info); + int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, + struct bpf_map *old_map); }; struct bpf_tramp_link { @@ -1518,6 +1520,7 @@ struct bpf_struct_ops { void *kdata, const void *udata); int (*reg)(void *kdata); void (*unreg)(void *kdata); + int (*update)(void *kdata, void *old_kdata); int (*validate)(void *kdata); const struct btf_type *type; const struct btf_type *value_type; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 42f40ee083bf..e3d3b5160d26 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1555,12 +1555,23 @@ union bpf_attr { struct { /* struct used by BPF_LINK_UPDATE command */ __u32 link_fd; /* link fd */ - /* new program fd to update link with */ - __u32 new_prog_fd; + union { + /* new program fd to update link with */ + __u32 new_prog_fd; + /* new struct_ops map fd to update link with */ + __u32 new_map_fd; + }; __u32 flags; /* extra flags */ - /* expected link's program fd; is specified only if - * BPF_F_REPLACE flag is set in flags */ - __u32 old_prog_fd; + union { + /* expected link's program fd; is specified only if + * BPF_F_REPLACE flag is set in flags. + */ + __u32 old_prog_fd; + /* expected link's map fd; is specified only + * if BPF_F_REPLACE flag is set. + */ + __u32 old_map_fd; + }; } link_update; struct { diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 3d6b5240c25a..6401deca3b56 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -65,6 +65,8 @@ struct bpf_struct_ops_link { struct bpf_map __rcu *map; }; +static DEFINE_MUTEX(update_mutex); + #define VALUE_PREFIX "bpf_struct_ops_" #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1) @@ -664,7 +666,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) if (attr->value_size != vt->size) return ERR_PTR(-EINVAL); - if (attr->map_flags & BPF_F_LINK && !st_ops->validate) + if (attr->map_flags & BPF_F_LINK && (!st_ops->validate || !st_ops->update)) return ERR_PTR(-EOPNOTSUPP); t = st_ops->type; @@ -810,10 +812,54 @@ static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link, return 0; } +static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map, + struct bpf_map *expected_old_map) +{ + struct bpf_struct_ops_map *st_map, *old_st_map; + struct bpf_map *old_map; + struct bpf_struct_ops_link *st_link; + int err = 0; + + st_link = container_of(link, struct bpf_struct_ops_link, link); + st_map = container_of(new_map, struct bpf_struct_ops_map, map); + + if (!bpf_struct_ops_valid_to_reg(new_map)) + return -EINVAL; + + mutex_lock(&update_mutex); + + old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex)); + if (expected_old_map && old_map != expected_old_map) { + err = -EPERM; + goto err_out; + } + + old_st_map = container_of(old_map, struct bpf_struct_ops_map, map); + /* The new and old struct_ops must be the same type. */ + if (st_map->st_ops != old_st_map->st_ops) { + err = -EINVAL; + goto err_out; + } + + err = st_map->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data); + if (err) + goto err_out; + + bpf_map_inc(new_map); + rcu_assign_pointer(st_link->map, new_map); + bpf_map_put(old_map); + +err_out: + mutex_unlock(&update_mutex); + + return err; +} + static const struct bpf_link_ops bpf_struct_ops_map_lops = { .dealloc = bpf_struct_ops_map_link_dealloc, .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo, .fill_link_info = bpf_struct_ops_map_link_fill_link_info, + .update_map = bpf_struct_ops_map_link_update, }; int bpf_struct_ops_link_create(union bpf_attr *attr) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 21f76698875c..b4d758fa5981 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -4682,6 +4682,35 @@ out: return ret; } +static int link_update_map(struct bpf_link *link, union bpf_attr *attr) +{ + struct bpf_map *new_map, *old_map = NULL; + int ret; + + new_map = bpf_map_get(attr->link_update.new_map_fd); + if (IS_ERR(new_map)) + return -EINVAL; + + if (attr->link_update.flags & BPF_F_REPLACE) { + old_map = bpf_map_get(attr->link_update.old_map_fd); + if (IS_ERR(old_map)) { + ret = -EINVAL; + goto out_put; + } + } else if (attr->link_update.old_map_fd) { + ret = -EINVAL; + goto out_put; + } + + ret = link->ops->update_map(link, new_map, old_map); + + if (old_map) + bpf_map_put(old_map); +out_put: + bpf_map_put(new_map); + return ret; +} + #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd static int link_update(union bpf_attr *attr) @@ -4702,6 +4731,11 @@ static int link_update(union bpf_attr *attr) if (IS_ERR(link)) return PTR_ERR(link); + if (link->ops->update_map) { + ret = link_update_map(link, attr); + goto out_put_link; + } + new_prog = bpf_prog_get(attr->link_update.new_prog_fd); if (IS_ERR(new_prog)) { ret = PTR_ERR(new_prog); diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index bbbd5eb94db2..e8b27826283e 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -264,6 +264,11 @@ static void bpf_tcp_ca_unreg(void *kdata) tcp_unregister_congestion_control(kdata); } +static int bpf_tcp_ca_update(void *kdata, void *old_kdata) +{ + return tcp_update_congestion_control(kdata, old_kdata); +} + static int bpf_tcp_ca_validate(void *kdata) { return tcp_validate_congestion_control(kdata); @@ -273,6 +278,7 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = { .verifier_ops = &bpf_tcp_ca_verifier_ops, .reg = bpf_tcp_ca_reg, .unreg = bpf_tcp_ca_unreg, + .update = bpf_tcp_ca_update, .check_member = bpf_tcp_ca_check_member, .init_member = bpf_tcp_ca_init_member, .init = bpf_tcp_ca_init, diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9cf1deaf21f2..d6c5a022ae28 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1555,12 +1555,23 @@ union bpf_attr { struct { /* struct used by BPF_LINK_UPDATE command */ __u32 link_fd; /* link fd */ - /* new program fd to update link with */ - __u32 new_prog_fd; + union { + /* new program fd to update link with */ + __u32 new_prog_fd; + /* new struct_ops map fd to update link with */ + __u32 new_map_fd; + }; __u32 flags; /* extra flags */ - /* expected link's program fd; is specified only if - * BPF_F_REPLACE flag is set in flags */ - __u32 old_prog_fd; + union { + /* expected link's program fd; is specified only if + * BPF_F_REPLACE flag is set in flags. + */ + __u32 old_prog_fd; + /* expected link's map fd; is specified only + * if BPF_F_REPLACE flag is set. + */ + __u32 old_map_fd; + }; } link_update; struct { -- cgit v1.2.3 From dbbb27e183b1568d5a907ace1cd144b0709ea52a Mon Sep 17 00:00:00 2001 From: Aloka Dixit Date: Thu, 23 Mar 2023 04:38:00 -0700 Subject: cfg80211: support RNR for EMA AP As per IEEE Std 802.11ax-2021, 11.1.3.8.3 Discovery of a nontransmitted BSSID profile, an EMA AP that transmits a Beacon frame carrying a partial list of nontransmitted BSSID profiles should include in the frame a Reduced Neighbor Report element carrying information for at least the nontransmitted BSSIDs that are not present in the Multiple BSSID element carried in that frame. Add new nested attribute NL80211_ATTR_EMA_RNR_ELEMS to support the above. Number of RNR elements must be more than or equal to the number of MBSSID elements. This attribute can be used only when EMA is enabled. Userspace is responsible for splitting the RNR into multiple elements such that each element excludes the non-transmitting profiles already included in the MBSSID element (%NL80211_ATTR_MBSSID_ELEMS) at the same index. Each EMA beacon will be generated by adding MBSSID and RNR elements at the same index. If the userspace provides more RNR elements than the number of MBSSID elements then these will be added in every EMA beacon. Signed-off-by: Aloka Dixit Link: https://lore.kernel.org/r/20230323113801.6903-2-quic_alokad@quicinc.com [Johannes: validate elements] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 19 +++++++++++ include/uapi/linux/nl80211.h | 13 ++++++++ net/wireless/nl80211.c | 79 ++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 104 insertions(+), 7 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 86cb048dc924..3cf236520288 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1178,6 +1178,23 @@ struct cfg80211_mbssid_elems { } elem[]; }; +/** + * struct cfg80211_rnr_elems - Reduced neighbor report (RNR) elements + * + * @cnt: Number of elements in array %elems. + * + * @elem: Array of RNR element(s) to be added into Beacon frames. + * @elem.data: Data for RNR elements. + * @elem.len: Length of data. + */ +struct cfg80211_rnr_elems { + u8 cnt; + struct { + const u8 *data; + size_t len; + } elem[]; +}; + /** * struct cfg80211_beacon_data - beacon data * @link_id: the link ID for the AP MLD link sending this beacon @@ -1198,6 +1215,7 @@ struct cfg80211_mbssid_elems { * @probe_resp_len: length of probe response template (@probe_resp) * @probe_resp: probe response template (AP mode only) * @mbssid_ies: multiple BSSID elements + * @rnr_ies: reduced neighbor report elements * @ftm_responder: enable FTM responder functionality; -1 for no change * (which also implies no change in LCI/civic location data) * @lci: Measurement Report element content, starting with Measurement Token @@ -1221,6 +1239,7 @@ struct cfg80211_beacon_data { const u8 *lci; const u8 *civicloc; struct cfg80211_mbssid_elems *mbssid_ies; + struct cfg80211_rnr_elems *rnr_ies; s8 ftm_responder; size_t head_len, tail_len; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 14e958a32b84..cf4fb981e131 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2794,6 +2794,17 @@ enum nl80211_commands { * @NL80211_ATTR_HW_TIMESTAMP_ENABLED: Indicates whether HW timestamping should * be enabled or not (flag attribute). * + * @NL80211_ATTR_EMA_RNR_ELEMS: Optional nested attribute for + * reduced neighbor report (RNR) elements. This attribute can be used + * only when NL80211_MBSSID_CONFIG_ATTR_EMA is enabled. + * Userspace is responsible for splitting the RNR into multiple + * elements such that each element excludes the non-transmitting + * profiles already included in the MBSSID element + * (%NL80211_ATTR_MBSSID_ELEMS) at the same index. Each EMA beacon + * will be generated by adding MBSSID and RNR elements at the same + * index. If the userspace includes more RNR elements than number of + * MBSSID elements then these will be added in every EMA beacon. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3328,6 +3339,8 @@ enum nl80211_attrs { NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS, NL80211_ATTR_HW_TIMESTAMP_ENABLED, + NL80211_ATTR_EMA_RNR_ELEMS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 0a31b1d2845d..80a20d69f285 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -809,6 +809,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS] = { .type = NLA_U16 }, [NL80211_ATTR_HW_TIMESTAMP_ENABLED] = { .type = NLA_FLAG }, + [NL80211_ATTR_EMA_RNR_ELEMS] = { .type = NLA_NESTED }, }; /* policy for the key attributes */ @@ -5425,6 +5426,38 @@ nl80211_parse_mbssid_elems(struct wiphy *wiphy, struct nlattr *attrs) return elems; } +static struct cfg80211_rnr_elems * +nl80211_parse_rnr_elems(struct wiphy *wiphy, struct nlattr *attrs, + struct netlink_ext_ack *extack) +{ + struct nlattr *nl_elems; + struct cfg80211_rnr_elems *elems; + int rem_elems; + u8 i = 0, num_elems = 0; + + nla_for_each_nested(nl_elems, attrs, rem_elems) { + int ret; + + ret = validate_ie_attr(nl_elems, extack); + if (ret) + return ERR_PTR(ret); + + num_elems++; + } + + elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL); + if (!elems) + return ERR_PTR(-ENOMEM); + + nla_for_each_nested(nl_elems, attrs, rem_elems) { + elems->elem[i].data = nla_data(nl_elems); + elems->elem[i].len = nla_len(nl_elems); + i++; + } + elems->cnt = num_elems; + return elems; +} + static int nl80211_parse_he_bss_color(struct nlattr *attrs, struct cfg80211_he_bss_color *he_bss_color) { @@ -5451,7 +5484,8 @@ static int nl80211_parse_he_bss_color(struct nlattr *attrs, static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev, struct nlattr *attrs[], - struct cfg80211_beacon_data *bcn) + struct cfg80211_beacon_data *bcn, + struct netlink_ext_ack *extack) { bool haveinfo = false; int err; @@ -5548,6 +5582,21 @@ static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev, return PTR_ERR(mbssid); bcn->mbssid_ies = mbssid; + + if (bcn->mbssid_ies && attrs[NL80211_ATTR_EMA_RNR_ELEMS]) { + struct cfg80211_rnr_elems *rnr = + nl80211_parse_rnr_elems(&rdev->wiphy, + attrs[NL80211_ATTR_EMA_RNR_ELEMS], + extack); + + if (IS_ERR(rnr)) + return PTR_ERR(rnr); + + if (rnr && rnr->cnt < bcn->mbssid_ies->cnt) + return -EINVAL; + + bcn->rnr_ies = rnr; + } } return 0; @@ -5866,7 +5915,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) if (!params) return -ENOMEM; - err = nl80211_parse_beacon(rdev, info->attrs, ¶ms->beacon); + err = nl80211_parse_beacon(rdev, info->attrs, ¶ms->beacon, + info->extack); if (err) goto out; @@ -6096,6 +6146,11 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) goto out_unlock; } + if (!params->mbssid_config.ema && params->beacon.rnr_ies) { + err = -EINVAL; + goto out_unlock; + } + err = nl80211_calculate_ap_params(params); if (err) goto out_unlock; @@ -6137,6 +6192,7 @@ out: params->mbssid_config.tx_wdev->netdev && params->mbssid_config.tx_wdev->netdev != dev) dev_put(params->mbssid_config.tx_wdev->netdev); + kfree(params->beacon.rnr_ies); kfree(params); return err; @@ -6161,7 +6217,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) if (!wdev->links[link_id].ap.beacon_interval) return -EINVAL; - err = nl80211_parse_beacon(rdev, info->attrs, ¶ms); + err = nl80211_parse_beacon(rdev, info->attrs, ¶ms, info->extack); if (err) goto out; @@ -6171,6 +6227,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) out: kfree(params.mbssid_ies); + kfree(params.rnr_ies); return err; } @@ -10030,7 +10087,8 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) if (!need_new_beacon) goto skip_beacons; - err = nl80211_parse_beacon(rdev, info->attrs, ¶ms.beacon_after); + err = nl80211_parse_beacon(rdev, info->attrs, ¶ms.beacon_after, + info->extack); if (err) goto free; @@ -10047,7 +10105,8 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) if (err) goto free; - err = nl80211_parse_beacon(rdev, csa_attrs, ¶ms.beacon_csa); + err = nl80211_parse_beacon(rdev, csa_attrs, ¶ms.beacon_csa, + info->extack); if (err) goto free; @@ -10167,6 +10226,8 @@ skip_beacons: free: kfree(params.beacon_after.mbssid_ies); kfree(params.beacon_csa.mbssid_ies); + kfree(params.beacon_after.rnr_ies); + kfree(params.beacon_csa.rnr_ies); kfree(csa_attrs); return err; } @@ -15882,7 +15943,8 @@ static int nl80211_color_change(struct sk_buff *skb, struct genl_info *info) params.count = nla_get_u8(info->attrs[NL80211_ATTR_COLOR_CHANGE_COUNT]); params.color = nla_get_u8(info->attrs[NL80211_ATTR_COLOR_CHANGE_COLOR]); - err = nl80211_parse_beacon(rdev, info->attrs, ¶ms.beacon_next); + err = nl80211_parse_beacon(rdev, info->attrs, ¶ms.beacon_next, + info->extack); if (err) return err; @@ -15896,7 +15958,8 @@ static int nl80211_color_change(struct sk_buff *skb, struct genl_info *info) if (err) goto out; - err = nl80211_parse_beacon(rdev, tb, ¶ms.beacon_color_change); + err = nl80211_parse_beacon(rdev, tb, ¶ms.beacon_color_change, + info->extack); if (err) goto out; @@ -15952,6 +16015,8 @@ static int nl80211_color_change(struct sk_buff *skb, struct genl_info *info) out: kfree(params.beacon_next.mbssid_ies); kfree(params.beacon_color_change.mbssid_ies); + kfree(params.beacon_next.rnr_ies); + kfree(params.beacon_color_change.rnr_ies); kfree(tb); return err; } -- cgit v1.2.3 From 233eb4e786b57ea686b51c13a04cc2839fd682fc Mon Sep 17 00:00:00 2001 From: Shay Agroskin Date: Thu, 23 Mar 2023 18:36:05 +0200 Subject: ethtool: Add support for configuring tx_push_buf_len This attribute, which is part of ethtool's ring param configuration allows the user to specify the maximum number of the packet's payload that can be written directly to the device. Example usage: # ethtool -G [interface] tx-push-buf-len [number of bytes] Co-developed-by: Jakub Kicinski Signed-off-by: Shay Agroskin Signed-off-by: Jakub Kicinski --- Documentation/netlink/specs/ethtool.yaml | 8 +++++ Documentation/networking/ethtool-netlink.rst | 47 ++++++++++++++++++---------- include/linux/ethtool.h | 14 ++++++--- include/uapi/linux/ethtool_netlink.h | 2 ++ net/ethtool/netlink.h | 2 +- net/ethtool/rings.c | 34 ++++++++++++++++++-- 6 files changed, 84 insertions(+), 23 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml index 4727c067e2ba..6d8ae3d9a680 100644 --- a/Documentation/netlink/specs/ethtool.yaml +++ b/Documentation/netlink/specs/ethtool.yaml @@ -165,6 +165,12 @@ attribute-sets: - name: rx-push type: u8 + - + name: tx-push-buf-len + type: u32 + - + name: tx-push-buf-len-max + type: u32 - name: mm-stat @@ -311,6 +317,8 @@ operations: - cqe-size - tx-push - rx-push + - tx-push-buf-len + - tx-push-buf-len-max dump: *ring-get-op - name: rings-set diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index e1bc6186d7ea..cd0973d4ba01 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -860,22 +860,24 @@ Request contents: Kernel response contents: - ==================================== ====== =========================== - ``ETHTOOL_A_RINGS_HEADER`` nested reply header - ``ETHTOOL_A_RINGS_RX_MAX`` u32 max size of RX ring - ``ETHTOOL_A_RINGS_RX_MINI_MAX`` u32 max size of RX mini ring - ``ETHTOOL_A_RINGS_RX_JUMBO_MAX`` u32 max size of RX jumbo ring - ``ETHTOOL_A_RINGS_TX_MAX`` u32 max size of TX ring - ``ETHTOOL_A_RINGS_RX`` u32 size of RX ring - ``ETHTOOL_A_RINGS_RX_MINI`` u32 size of RX mini ring - ``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring - ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring - ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring - ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split - ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE - ``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode - ``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode - ==================================== ====== =========================== + ======================================= ====== =========================== + ``ETHTOOL_A_RINGS_HEADER`` nested reply header + ``ETHTOOL_A_RINGS_RX_MAX`` u32 max size of RX ring + ``ETHTOOL_A_RINGS_RX_MINI_MAX`` u32 max size of RX mini ring + ``ETHTOOL_A_RINGS_RX_JUMBO_MAX`` u32 max size of RX jumbo ring + ``ETHTOOL_A_RINGS_TX_MAX`` u32 max size of TX ring + ``ETHTOOL_A_RINGS_RX`` u32 size of RX ring + ``ETHTOOL_A_RINGS_RX_MINI`` u32 size of RX mini ring + ``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring + ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring + ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring + ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split + ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE + ``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode + ``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode + ``ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN`` u32 size of TX push buffer + ``ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX`` u32 max size of TX push buffer + ======================================= ====== =========================== ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with page-flipping TCP zero-copy receive (``getsockopt(TCP_ZEROCOPY_RECEIVE)``). @@ -891,6 +893,18 @@ through MMIO writes, thus reducing the latency. However, enabling this feature may increase the CPU cost. Drivers may enforce additional per-packet eligibility checks (e.g. on packet size). +``ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN`` specifies the maximum number of bytes of a +transmitted packet a driver can push directly to the underlying device +('push' mode). Pushing some of the payload bytes to the device has the +advantages of reducing latency for small packets by avoiding DMA mapping (same +as ``ETHTOOL_A_RINGS_TX_PUSH`` parameter) as well as allowing the underlying +device to process packet headers ahead of fetching its payload. +This can help the device to make fast actions based on the packet's headers. +This is similar to the "tx-copybreak" parameter, which copies the packet to a +preallocated DMA memory area instead of mapping new memory. However, +tx-push-buff parameter copies the packet directly to the device to allow the +device to take faster actions on the packet. + RINGS_SET ========= @@ -908,6 +922,7 @@ Request contents: ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE ``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode ``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode + ``ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN`` u32 size of TX push buffer ==================================== ====== =========================== Kernel checks that requested ring sizes do not exceed limits reported by diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 2792185dda22..798d35890118 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -75,6 +75,8 @@ enum { * @tx_push: The flag of tx push mode * @rx_push: The flag of rx push mode * @cqe_size: Size of TX/RX completion queue event + * @tx_push_buf_len: Size of TX push buffer + * @tx_push_buf_max_len: Maximum allowed size of TX push buffer */ struct kernel_ethtool_ringparam { u32 rx_buf_len; @@ -82,6 +84,8 @@ struct kernel_ethtool_ringparam { u8 tx_push; u8 rx_push; u32 cqe_size; + u32 tx_push_buf_len; + u32 tx_push_buf_max_len; }; /** @@ -90,12 +94,14 @@ struct kernel_ethtool_ringparam { * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push + * @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len */ enum ethtool_supported_ring_param { - ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), - ETHTOOL_RING_USE_CQE_SIZE = BIT(1), - ETHTOOL_RING_USE_TX_PUSH = BIT(2), - ETHTOOL_RING_USE_RX_PUSH = BIT(3), + ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), + ETHTOOL_RING_USE_CQE_SIZE = BIT(1), + ETHTOOL_RING_USE_TX_PUSH = BIT(2), + ETHTOOL_RING_USE_RX_PUSH = BIT(3), + ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4), }; #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index d39ce21381c5..1ebf8d455f07 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -357,6 +357,8 @@ enum { ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */ ETHTOOL_A_RINGS_TX_PUSH, /* u8 */ ETHTOOL_A_RINGS_RX_PUSH, /* u8 */ + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, /* u32 */ + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, /* u32 */ /* add new constants above here */ __ETHTOOL_A_RINGS_CNT, diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index f7b189ed96b2..79424b34b553 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -413,7 +413,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1]; extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1]; extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1]; -extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_PUSH + 1]; +extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX + 1]; extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1]; extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1]; extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1]; diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index f358cd57d094..1c4972526142 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -11,6 +11,7 @@ struct rings_reply_data { struct ethnl_reply_data base; struct ethtool_ringparam ringparam; struct kernel_ethtool_ringparam kernel_ringparam; + u32 supported_ring_params; }; #define RINGS_REPDATA(__reply_base) \ @@ -32,6 +33,8 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base, if (!dev->ethtool_ops->get_ringparam) return -EOPNOTSUPP; + + data->supported_ring_params = dev->ethtool_ops->supported_ring_params; ret = ethnl_ops_begin(dev); if (ret < 0) return ret; @@ -57,7 +60,9 @@ static int rings_reply_size(const struct ethnl_req_info *req_base, nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */ nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */ - nla_total_size(sizeof(u8))); /* _RINGS_RX_PUSH */ + nla_total_size(sizeof(u8))) + /* _RINGS_RX_PUSH */ + nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN */ + nla_total_size(sizeof(u32)); /* _RINGS_TX_PUSH_BUF_LEN_MAX */ } static int rings_fill_reply(struct sk_buff *skb, @@ -67,6 +72,7 @@ static int rings_fill_reply(struct sk_buff *skb, const struct rings_reply_data *data = RINGS_REPDATA(reply_base); const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; const struct ethtool_ringparam *ringparam = &data->ringparam; + u32 supported_ring_params = data->supported_ring_params; WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); @@ -98,7 +104,12 @@ static int rings_fill_reply(struct sk_buff *skb, (kr->cqe_size && (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) || nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) || - nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push)) + nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push) || + ((supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN) && + (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, + kr->tx_push_buf_max_len) || + nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, + kr->tx_push_buf_len)))) return -EMSGSIZE; return 0; @@ -117,6 +128,7 @@ const struct nla_policy ethnl_rings_set_policy[] = { [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), + [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .type = NLA_U32 }, }; static int @@ -158,6 +170,14 @@ ethnl_set_rings_validate(struct ethnl_req_info *req_info, return -EOPNOTSUPP; } + if (tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] && + !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN)) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], + "setting tx push buf len is not supported"); + return -EOPNOTSUPP; + } + return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP; } @@ -189,6 +209,8 @@ ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info) tb[ETHTOOL_A_RINGS_TX_PUSH], &mod); ethnl_update_u8(&kernel_ringparam.rx_push, tb[ETHTOOL_A_RINGS_RX_PUSH], &mod); + ethnl_update_u32(&kernel_ringparam.tx_push_buf_len, + tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], &mod); if (!mod) return 0; @@ -209,6 +231,14 @@ ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info) return -EINVAL; } + if (kernel_ringparam.tx_push_buf_len > kernel_ringparam.tx_push_buf_max_len) { + NL_SET_ERR_MSG_ATTR_FMT(info->extack, tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], + "Requested TX push buffer exceeds the maximum of %u", + kernel_ringparam.tx_push_buf_max_len); + + return -EINVAL; + } + ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, &kernel_ringparam, info->extack); return ret < 0 ? ret : 1; -- cgit v1.2.3 From 954d1fa1ac93aa8a66f7d9a9ba545cf7f020d348 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 28 Mar 2023 10:57:59 +0800 Subject: macvlan: Add netlink attribute for broadcast cutoff Make the broadcast cutoff configurable through netlink. Note that macvlan is weird because there is no central device for us to configure (the lowerdev could be anything). So all the options are duplicated over what could be thousands of child devices. IFLA_MACVLAN_BC_QUEUE_LEN took the approach of taking the maximum of all child device settings. This is unnecessary as we could simply store the option in the port device and take the last child device that gets updated as the value to use. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 31 +++++++++++++++++++++++++++++-- include/uapi/linux/if_link.h | 1 + tools/include/uapi/linux/if_link.h | 1 + 3 files changed, 31 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 62b4748d3836..4215106adc40 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -47,6 +47,7 @@ struct macvlan_port { struct sk_buff_head bc_queue; struct work_struct bc_work; u32 bc_queue_len_used; + int bc_cutoff; u32 flags; int count; struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; @@ -814,6 +815,12 @@ static void macvlan_compute_filter(unsigned long *mc_filter, } } +static void macvlan_recompute_bc_filter(struct macvlan_dev *vlan) +{ + macvlan_compute_filter(vlan->port->bc_filter, vlan->lowerdev, NULL, + vlan->port->bc_cutoff); +} + static void macvlan_set_mac_lists(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -838,8 +845,16 @@ static void macvlan_set_mac_lists(struct net_device *dev) */ macvlan_compute_filter(vlan->port->mc_filter, vlan->lowerdev, NULL, 0); - macvlan_compute_filter(vlan->port->bc_filter, vlan->lowerdev, NULL, - 1); + macvlan_recompute_bc_filter(vlan); +} + +static void update_port_bc_cutoff(struct macvlan_dev *vlan, int cutoff) +{ + if (vlan->port->bc_cutoff == cutoff) + return; + + vlan->port->bc_cutoff = cutoff; + macvlan_recompute_bc_filter(vlan); } static int macvlan_change_mtu(struct net_device *dev, int new_mtu) @@ -1254,6 +1269,7 @@ static int macvlan_port_create(struct net_device *dev) INIT_HLIST_HEAD(&port->vlan_source_hash[i]); port->bc_queue_len_used = 0; + port->bc_cutoff = 1; skb_queue_head_init(&port->bc_queue); INIT_WORK(&port->bc_work, macvlan_process_broadcast); @@ -1527,6 +1543,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]); + if (data && data[IFLA_MACVLAN_BC_CUTOFF]) + update_port_bc_cutoff( + vlan, nla_get_s32(data[IFLA_MACVLAN_BC_CUTOFF])); + err = register_netdevice(dev); if (err < 0) goto destroy_macvlan_port; @@ -1623,6 +1643,10 @@ static int macvlan_changelink(struct net_device *dev, update_port_bc_queue_len(vlan->port); } + if (data && data[IFLA_MACVLAN_BC_CUTOFF]) + update_port_bc_cutoff( + vlan, nla_get_s32(data[IFLA_MACVLAN_BC_CUTOFF])); + if (set_mode) vlan->mode = mode; if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { @@ -1703,6 +1727,9 @@ static int macvlan_fill_info(struct sk_buff *skb, goto nla_put_failure; if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN_USED, port->bc_queue_len_used)) goto nla_put_failure; + if (port->bc_cutoff != 1 && + nla_put_s32(skb, IFLA_MACVLAN_BC_CUTOFF, port->bc_cutoff)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 57ceb788250f..8d679688efe0 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -635,6 +635,7 @@ enum { IFLA_MACVLAN_MACADDR_COUNT, IFLA_MACVLAN_BC_QUEUE_LEN, IFLA_MACVLAN_BC_QUEUE_LEN_USED, + IFLA_MACVLAN_BC_CUTOFF, __IFLA_MACVLAN_MAX, }; diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 901d98b865a1..39e659c83cfd 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -605,6 +605,7 @@ enum { IFLA_MACVLAN_MACADDR_COUNT, IFLA_MACVLAN_BC_QUEUE_LEN, IFLA_MACVLAN_BC_QUEUE_LEN_USED, + IFLA_MACVLAN_BC_CUTOFF, __IFLA_MACVLAN_MAX, }; -- cgit v1.2.3 From 9a8aac92eba90b3b7c71d0531db535f5588388f5 Mon Sep 17 00:00:00 2001 From: Kieran Frewen Date: Fri, 24 Feb 2023 10:29:17 +1300 Subject: wifi: nl80211: support advertising S1G capabilities Include S1G capabilities in netlink band info messages. Signed-off-by: Kieran Frewen Co-developed-by: Gilad Itzkovitch Signed-off-by: Gilad Itzkovitch Link: https://lore.kernel.org/r/20230223212917.4010246-1-gilad.itzkovitch@virscient.com Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 7 +++++++ net/wireless/nl80211.c | 10 ++++++++++ 2 files changed, 17 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index cf4fb981e131..c59fec406da5 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4061,6 +4061,10 @@ enum nl80211_band_iftype_attr { * @NL80211_BAND_ATTR_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes * the allowed channel bandwidth configurations. * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13. + * @NL80211_BAND_ATTR_S1G_MCS_NSS_SET: S1G capabilities, supported S1G-MCS and NSS + * set subfield, as in the S1G information IE, 5 bytes + * @NL80211_BAND_ATTR_S1G_CAPA: S1G capabilities information subfield as in the + * S1G information IE, 10 bytes * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined * @__NL80211_BAND_ATTR_AFTER_LAST: internal use */ @@ -4081,6 +4085,9 @@ enum nl80211_band_attr { NL80211_BAND_ATTR_EDMG_CHANNELS, NL80211_BAND_ATTR_EDMG_BW_CONFIG, + NL80211_BAND_ATTR_S1G_MCS_NSS_SET, + NL80211_BAND_ATTR_S1G_CAPA, + /* keep last */ __NL80211_BAND_ATTR_AFTER_LAST, NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index f1cd3d9130dd..2c9edb015652 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1961,6 +1961,16 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg, nla_nest_end(msg, nl_rates); + /* S1G capabilities */ + if (sband->band == NL80211_BAND_S1GHZ && sband->s1g_cap.s1g && + (nla_put(msg, NL80211_BAND_ATTR_S1G_CAPA, + sizeof(sband->s1g_cap.cap), + sband->s1g_cap.cap) || + nla_put(msg, NL80211_BAND_ATTR_S1G_MCS_NSS_SET, + sizeof(sband->s1g_cap.nss_mcs), + sband->s1g_cap.nss_mcs))) + return -ENOBUFS; + return 0; } -- cgit v1.2.3 From 28c1b6df436819a7ed8a781835766e45139771a3 Mon Sep 17 00:00:00 2001 From: Eric Sage Date: Mon, 27 Mar 2023 13:44:49 -0400 Subject: netfilter: nfnetlink_queue: enable classid socket info retrieval This enables associating a socket with a v1 net_cls cgroup. Useful for applying a per-cgroup policy when processing packets in userspace. Signed-off-by: Eric Sage Signed-off-by: Florian Westphal --- include/uapi/linux/netfilter/nfnetlink_queue.h | 1 + net/netfilter/nfnetlink_queue.c | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index ef7c97f21a15..efcb7c044a74 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -62,6 +62,7 @@ enum nfqnl_attr_type { NFQA_VLAN, /* nested attribute: packet vlan info */ NFQA_L2HDR, /* full L2 header */ NFQA_PRIORITY, /* skb->priority */ + NFQA_CGROUP_CLASSID, /* __u32 cgroup classid */ __NFQA_MAX }; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 87a9009d5234..e311462f6d98 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -301,6 +302,19 @@ nla_put_failure: return -1; } +static int nfqnl_put_sk_classid(struct sk_buff *skb, struct sock *sk) +{ +#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID) + if (sk && sk_fullsock(sk)) { + u32 classid = sock_cgroup_classid(&sk->sk_cgrp_data); + + if (classid && nla_put_be32(skb, NFQA_CGROUP_CLASSID, htonl(classid))) + return -1; + } +#endif + return 0; +} + static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata) { u32 seclen = 0; @@ -406,6 +420,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, + nla_total_size(sizeof(u_int32_t)) /* priority */ + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) + nla_total_size(sizeof(u_int32_t)) /* skbinfo */ +#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID) + + nla_total_size(sizeof(u_int32_t)) /* classid */ +#endif + nla_total_size(sizeof(u_int32_t)); /* cap_len */ tstamp = skb_tstamp_cond(entskb, false); @@ -599,6 +616,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, nfqnl_put_sk_uidgid(skb, entskb->sk) < 0) goto nla_put_failure; + if (nfqnl_put_sk_classid(skb, entskb->sk) < 0) + goto nla_put_failure; + if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata)) goto nla_put_failure; -- cgit v1.2.3 From a25b8b7136ad43760bd876af62b6e59abd30496c Mon Sep 17 00:00:00 2001 From: Matthieu De Beule Date: Wed, 29 Mar 2023 12:52:18 +0000 Subject: netfilter: Correct documentation errors in nf_tables.h NFTA_RANGE_OP incorrectly says nft_cmp_ops instead of nft_range_ops. NFTA_LOG_GROUP and NFTA_LOG_QTHRESHOLD claim NLA_U32 instead of NLA_U16 NFTA_EXTHDR_SREG isn't documented as a register Signed-off-by: Matthieu De Beule Signed-off-by: Florian Westphal --- include/uapi/linux/netfilter/nf_tables.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 9c6f02c26054..c4d4d8e42dc8 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -685,7 +685,7 @@ enum nft_range_ops { * enum nft_range_attributes - nf_tables range expression netlink attributes * * @NFTA_RANGE_SREG: source register of data to compare (NLA_U32: nft_registers) - * @NFTA_RANGE_OP: cmp operation (NLA_U32: nft_cmp_ops) + * @NFTA_RANGE_OP: cmp operation (NLA_U32: nft_range_ops) * @NFTA_RANGE_FROM_DATA: data range from (NLA_NESTED: nft_data_attributes) * @NFTA_RANGE_TO_DATA: data range to (NLA_NESTED: nft_data_attributes) */ @@ -878,7 +878,7 @@ enum nft_exthdr_op { * @NFTA_EXTHDR_LEN: extension header length (NLA_U32) * @NFTA_EXTHDR_FLAGS: extension header flags (NLA_U32) * @NFTA_EXTHDR_OP: option match type (NLA_U32) - * @NFTA_EXTHDR_SREG: option match type (NLA_U32) + * @NFTA_EXTHDR_SREG: source register (NLA_U32: nft_registers) */ enum nft_exthdr_attributes { NFTA_EXTHDR_UNSPEC, @@ -1262,10 +1262,10 @@ enum nft_last_attributes { /** * enum nft_log_attributes - nf_tables log expression netlink attributes * - * @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U32) + * @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U16) * @NFTA_LOG_PREFIX: prefix to prepend to log messages (NLA_STRING) * @NFTA_LOG_SNAPLEN: length of payload to include in netlink message (NLA_U32) - * @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U32) + * @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U16) * @NFTA_LOG_LEVEL: log level (NLA_U32) * @NFTA_LOG_FLAGS: logging flags (NLA_U32) */ -- cgit v1.2.3 From 2384127e98db52a6ac2577924ad9cae25f3e7472 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Wed, 29 Mar 2023 11:54:52 +0200 Subject: net/sched: act_tunnel_key: add support for "don't fragment" extend "act_tunnel_key" to allow specifying TUNNEL_DONT_FRAGMENT. Suggested-by: Ilya Maximets Reviewed-by: Pedro Tammela Acked-by: Jamal Hadi Salim Signed-off-by: Davide Caratti Signed-off-by: Jakub Kicinski --- include/uapi/linux/tc_act/tc_tunnel_key.h | 1 + net/sched/act_tunnel_key.c | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h index 49ad4033951b..37c6f612f161 100644 --- a/include/uapi/linux/tc_act/tc_tunnel_key.h +++ b/include/uapi/linux/tc_act/tc_tunnel_key.h @@ -34,6 +34,7 @@ enum { */ TCA_TUNNEL_KEY_ENC_TOS, /* u8 */ TCA_TUNNEL_KEY_ENC_TTL, /* u8 */ + TCA_TUNNEL_KEY_NO_FRAG, /* flag */ __TCA_TUNNEL_KEY_MAX, }; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 2d12d2626415..0c8aa7e686ea 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -420,6 +420,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM])) flags &= ~TUNNEL_CSUM; + if (nla_get_flag(tb[TCA_TUNNEL_KEY_NO_FRAG])) + flags |= TUNNEL_DONT_FRAGMENT; + if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT]) dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]); @@ -747,6 +750,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, key->tp_dst)) || nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM, !(key->tun_flags & TUNNEL_CSUM)) || + ((key->tun_flags & TUNNEL_DONT_FRAGMENT) && + nla_put_flag(skb, TCA_TUNNEL_KEY_NO_FRAG)) || tunnel_key_opts_dump(skb, info)) goto nla_put_failure; -- cgit v1.2.3 From 47a71c1f9af0a334c9dfa97633c41de4feda4287 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Apr 2023 16:41:58 -0700 Subject: bpf: Add log_true_size output field to return necessary log buffer size Add output-only log_true_size and btf_log_true_size field to BPF_PROG_LOAD and BPF_BTF_LOAD commands, respectively. It will return the size of log buffer necessary to fit in all the log contents at specified log_level. This is very useful for BPF loader libraries like libbpf to be able to size log buffer correctly, but could be used by users directly, if necessary, as well. This patch plumbs all this through the code, taking into account actual bpf_attr size provided by user to determine if these new fields are expected by users. And if they are, set them from kernel on return. We refactory btf_parse() function to accommodate this, moving attr and uattr handling inside it. The rest is very straightforward code, which is split from the logging accounting changes in the previous patch to make it simpler to review logic vs UAPI changes. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20230406234205.323208-13-andrii@kernel.org --- include/linux/bpf.h | 2 +- include/linux/btf.h | 2 +- include/uapi/linux/bpf.h | 10 ++++++++++ kernel/bpf/btf.c | 32 ++++++++++++++++++-------------- kernel/bpf/syscall.c | 16 ++++++++-------- kernel/bpf/verifier.c | 8 +++++++- tools/include/uapi/linux/bpf.h | 12 +++++++++++- 7 files changed, 56 insertions(+), 26 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 002a811b6b90..2c6095bd7d69 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2175,7 +2175,7 @@ int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, size_t actual_size); /* verify correctness of eBPF program */ -int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size); #ifndef CONFIG_BPF_JIT_ALWAYS_ON void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); diff --git a/include/linux/btf.h b/include/linux/btf.h index d53b10cc55f2..495250162422 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -125,7 +125,7 @@ extern const struct file_operations btf_fops; void btf_get(struct btf *btf); void btf_put(struct btf *btf); -int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr); +int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz); struct btf *btf_get_by_fd(int fd); int btf_get_info_by_fd(const struct btf *btf, const union bpf_attr *attr, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e3d3b5160d26..3823100b7934 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1407,6 +1407,11 @@ union bpf_attr { __aligned_u64 fd_array; /* array of FDs */ __aligned_u64 core_relos; __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + */ + __u32 log_true_size; }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -1492,6 +1497,11 @@ union bpf_attr { __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + */ + __u32 btf_log_true_size; }; struct { diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 5aa540ee611f..0748cf4b8ab6 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5504,9 +5504,10 @@ static int btf_check_type_tags(struct btf_verifier_env *env, return 0; } -static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, - u32 log_level, char __user *log_ubuf, u32 log_size) +static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) { + bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel); + char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf); struct btf_struct_metas *struct_meta_tab; struct btf_verifier_env *env = NULL; struct bpf_verifier_log *log; @@ -5514,7 +5515,7 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, u8 *data; int err; - if (btf_data_size > BTF_MAX_SIZE) + if (attr->btf_size > BTF_MAX_SIZE) return ERR_PTR(-E2BIG); env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); @@ -5522,13 +5523,13 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, return ERR_PTR(-ENOMEM); log = &env->log; - if (log_level || log_ubuf || log_size) { + if (attr->btf_log_level || log_ubuf || attr->btf_log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ - log->level = log_level; + log->level = attr->btf_log_level; log->ubuf = log_ubuf; - log->len_total = log_size; + log->len_total = attr->btf_log_size; /* log attributes have to be sane */ if (!bpf_verifier_log_attr_valid(log)) { @@ -5544,16 +5545,16 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, } env->btf = btf; - data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); + data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN); if (!data) { err = -ENOMEM; goto errout; } btf->data = data; - btf->data_size = btf_data_size; + btf->data_size = attr->btf_size; - if (copy_from_bpfptr(data, btf_data, btf_data_size)) { + if (copy_from_bpfptr(data, btf_data, attr->btf_size)) { err = -EFAULT; goto errout; } @@ -5594,6 +5595,12 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, } bpf_vlog_finalize(log); + if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) && + copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, btf_log_true_size), + &log->len_max, sizeof(log->len_max))) { + err = -EFAULT; + goto errout_meta; + } if (bpf_vlog_truncated(log)) { err = -ENOSPC; goto errout_meta; @@ -7218,15 +7225,12 @@ static int __btf_new_fd(struct btf *btf) return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC); } -int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr) +int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) { struct btf *btf; int ret; - btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel), - attr->btf_size, attr->btf_log_level, - u64_to_user_ptr(attr->btf_log_buf), - attr->btf_log_size); + btf = btf_parse(attr, uattr, uattr_size); if (IS_ERR(btf)) return PTR_ERR(btf); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index e18ac7fdc210..6d575505f89c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2501,9 +2501,9 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) } /* last field in 'union bpf_attr' used by this command */ -#define BPF_PROG_LOAD_LAST_FIELD core_relo_rec_size +#define BPF_PROG_LOAD_LAST_FIELD log_true_size -static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) +static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) { enum bpf_prog_type type = attr->prog_type; struct bpf_prog *prog, *dst_prog = NULL; @@ -2653,7 +2653,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) goto free_prog_sec; /* run eBPF verifier */ - err = bpf_check(&prog, attr, uattr); + err = bpf_check(&prog, attr, uattr, uattr_size); if (err < 0) goto free_used_maps; @@ -4371,9 +4371,9 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, return err; } -#define BPF_BTF_LOAD_LAST_FIELD btf_log_level +#define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size -static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr) +static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) { if (CHECK_ATTR(BPF_BTF_LOAD)) return -EINVAL; @@ -4381,7 +4381,7 @@ static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr) if (!bpf_capable()) return -EPERM; - return btf_new_fd(attr, uattr); + return btf_new_fd(attr, uattr, uattr_size); } #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id @@ -5059,7 +5059,7 @@ static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) err = map_freeze(&attr); break; case BPF_PROG_LOAD: - err = bpf_prog_load(&attr, uattr); + err = bpf_prog_load(&attr, uattr, size); break; case BPF_OBJ_PIN: err = bpf_obj_pin(&attr); @@ -5104,7 +5104,7 @@ static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) err = bpf_raw_tracepoint_open(&attr); break; case BPF_BTF_LOAD: - err = bpf_btf_load(&attr, uattr); + err = bpf_btf_load(&attr, uattr, size); break; case BPF_BTF_GET_FD_BY_ID: err = bpf_btf_get_fd_by_id(&attr); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a98cbc046d1e..308e7abeb979 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -18694,7 +18694,7 @@ struct btf *bpf_get_btf_vmlinux(void) return btf_vmlinux; } -int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) +int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) { u64 start_time = ktime_get_ns(); struct bpf_verifier_env *env; @@ -18861,6 +18861,12 @@ skip_full_check: env->prog->aux->verified_insns = env->insn_processed; bpf_vlog_finalize(log); + if (uattr_size >= offsetofend(union bpf_attr, log_true_size) && + copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, log_true_size), + &log->len_max, sizeof(log->len_max))) { + ret = -EFAULT; + goto err_release_maps; + } if (bpf_vlog_truncated(log)) ret = -ENOSPC; if (log->level && log->level != BPF_LOG_KERNEL && !log->ubuf) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d6c5a022ae28..3823100b7934 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1407,6 +1407,11 @@ union bpf_attr { __aligned_u64 fd_array; /* array of FDs */ __aligned_u64 core_relos; __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + */ + __u32 log_true_size; }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -1492,6 +1497,11 @@ union bpf_attr { __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + */ + __u32 btf_log_true_size; }; struct { @@ -1513,7 +1523,7 @@ union bpf_attr { struct { /* struct used by BPF_LINK_CREATE command */ union { __u32 prog_fd; /* eBPF program to attach */ - __u32 map_fd; /* eBPF struct_ops to attach */ + __u32 map_fd; /* struct_ops to attach */ }; union { __u32 target_fd; /* object to attach to */ -- cgit v1.2.3 From f62af20bed2d9e824f51cfc97ff01bc261f40e58 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 11 Apr 2023 21:01:54 +0300 Subject: net/sched: mqprio: allow per-TC user input of FP adminStatus IEEE 802.1Q-2018 clause 6.7.2 Frame preemption specifies that each packet priority can be assigned to a "frame preemption status" value of either "express" or "preemptible". Express priorities are transmitted by the local device through the eMAC, and preemptible priorities through the pMAC (the concepts of eMAC and pMAC come from the 802.3 MAC Merge layer). The FP adminStatus is defined per packet priority, but 802.1Q clause 12.30.1.1.1 framePreemptionAdminStatus also says that: | Priorities that all map to the same traffic class should be | constrained to use the same value of preemption status. It is impossible to ignore the cognitive dissonance in the standard here, because it practically means that the FP adminStatus only takes distinct values per traffic class, even though it is defined per priority. I can see no valid use case which is prevented by having the kernel take the FP adminStatus as input per traffic class (what we do here). In addition, this also enforces the above constraint by construction. User space network managers which wish to expose FP adminStatus per priority are free to do so; they must only observe the prio_tc_map of the netdev (which presumably is also under their control, when constructing the mqprio netlink attributes). The reason for configuring frame preemption as a property of the Qdisc layer is that the information about "preemptible TCs" is closest to the place which handles the num_tc and prio_tc_map of the netdev. If the UAPI would have been any other layer, it would be unclear what to do with the FP information when num_tc collapses to 0. A key assumption is that only mqprio/taprio change the num_tc and prio_tc_map of the netdev. Not sure if that's a great assumption to make. Having FP in tc-mqprio can be seen as an implementation of the use case defined in 802.1Q Annex S.2 "Preemption used in isolation". There will be a separate implementation of FP in tc-taprio, for the other use cases. Signed-off-by: Vladimir Oltean Reviewed-by: Ferenc Fejes Reviewed-by: Simon Horman Signed-off-by: Jakub Kicinski --- include/net/pkt_sched.h | 1 + include/uapi/linux/pkt_sched.h | 16 ++++++ net/sched/sch_mqprio.c | 128 ++++++++++++++++++++++++++++++++++++++++- net/sched/sch_mqprio_lib.c | 14 +++++ net/sched/sch_mqprio_lib.h | 2 + 5 files changed, 160 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index b43ed4733455..f436688b6efc 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -172,6 +172,7 @@ struct tc_mqprio_qopt_offload { u32 flags; u64 min_rate[TC_QOPT_MAX_QUEUE]; u64 max_rate[TC_QOPT_MAX_QUEUE]; + unsigned long preemptible_tcs; }; struct tc_taprio_caps { diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 000eec106856..b8d29be91b62 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -719,6 +719,11 @@ enum { #define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1) +enum { + TC_FP_EXPRESS = 1, + TC_FP_PREEMPTIBLE = 2, +}; + struct tc_mqprio_qopt { __u8 num_tc; __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; @@ -732,12 +737,23 @@ struct tc_mqprio_qopt { #define TC_MQPRIO_F_MIN_RATE 0x4 #define TC_MQPRIO_F_MAX_RATE 0x8 +enum { + TCA_MQPRIO_TC_ENTRY_UNSPEC, + TCA_MQPRIO_TC_ENTRY_INDEX, /* u32 */ + TCA_MQPRIO_TC_ENTRY_FP, /* u32 */ + + /* add new constants above here */ + __TCA_MQPRIO_TC_ENTRY_CNT, + TCA_MQPRIO_TC_ENTRY_MAX = (__TCA_MQPRIO_TC_ENTRY_CNT - 1) +}; + enum { TCA_MQPRIO_UNSPEC, TCA_MQPRIO_MODE, TCA_MQPRIO_SHAPER, TCA_MQPRIO_MIN_RATE64, TCA_MQPRIO_MAX_RATE64, + TCA_MQPRIO_TC_ENTRY, __TCA_MQPRIO_MAX, }; diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 67d77495c8fd..dc5a0ff50b14 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -5,6 +5,7 @@ * Copyright (c) 2010 John Fastabend */ +#include #include #include #include @@ -27,6 +28,7 @@ struct mqprio_sched { u32 flags; u64 min_rate[TC_QOPT_MAX_QUEUE]; u64 max_rate[TC_QOPT_MAX_QUEUE]; + u32 fp[TC_QOPT_MAX_QUEUE]; }; static int mqprio_enable_offload(struct Qdisc *sch, @@ -63,6 +65,8 @@ static int mqprio_enable_offload(struct Qdisc *sch, return -EINVAL; } + mqprio_fp_to_offload(priv->fp, &mqprio); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO, &mqprio); if (err) @@ -145,13 +149,95 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt, return 0; } +static const struct +nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = { + [TCA_MQPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32, + TC_QOPT_MAX_QUEUE), + [TCA_MQPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32, + TC_FP_EXPRESS, + TC_FP_PREEMPTIBLE), +}; + static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = { [TCA_MQPRIO_MODE] = { .len = sizeof(u16) }, [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) }, [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED }, [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED }, + [TCA_MQPRIO_TC_ENTRY] = { .type = NLA_NESTED }, }; +static int mqprio_parse_tc_entry(u32 fp[TC_QOPT_MAX_QUEUE], + struct nlattr *opt, + unsigned long *seen_tcs, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_MQPRIO_TC_ENTRY_MAX + 1]; + int err, tc; + + err = nla_parse_nested(tb, TCA_MQPRIO_TC_ENTRY_MAX, opt, + mqprio_tc_entry_policy, extack); + if (err < 0) + return err; + + if (NL_REQ_ATTR_CHECK(extack, opt, tb, TCA_MQPRIO_TC_ENTRY_INDEX)) { + NL_SET_ERR_MSG(extack, "TC entry index missing"); + return -EINVAL; + } + + tc = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_INDEX]); + if (*seen_tcs & BIT(tc)) { + NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_TC_ENTRY_INDEX], + "Duplicate tc entry"); + return -EINVAL; + } + + *seen_tcs |= BIT(tc); + + if (tb[TCA_MQPRIO_TC_ENTRY_FP]) + fp[tc] = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_FP]); + + return 0; +} + +static int mqprio_parse_tc_entries(struct Qdisc *sch, struct nlattr *nlattr_opt, + int nlattr_opt_len, + struct netlink_ext_ack *extack) +{ + struct mqprio_sched *priv = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + bool have_preemption = false; + unsigned long seen_tcs = 0; + u32 fp[TC_QOPT_MAX_QUEUE]; + struct nlattr *n; + int tc, rem; + int err = 0; + + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + fp[tc] = priv->fp[tc]; + + nla_for_each_attr(n, nlattr_opt, nlattr_opt_len, rem) { + if (nla_type(n) != TCA_MQPRIO_TC_ENTRY) + continue; + + err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack); + if (err) + goto out; + } + + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { + priv->fp[tc] = fp[tc]; + if (fp[tc] == TC_FP_PREEMPTIBLE) + have_preemption = true; + } + + if (have_preemption && !ethtool_dev_mm_supported(dev)) { + NL_SET_ERR_MSG(extack, "Device does not support preemption"); + return -EOPNOTSUPP; + } +out: + return err; +} + /* Parse the other netlink attributes that represent the payload of * TCA_OPTIONS, which are appended right after struct tc_mqprio_qopt. */ @@ -234,6 +320,13 @@ static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, priv->flags |= TC_MQPRIO_F_MAX_RATE; } + if (tb[TCA_MQPRIO_TC_ENTRY]) { + err = mqprio_parse_tc_entries(sch, nlattr_opt, nlattr_opt_len, + extack); + if (err) + return err; + } + return 0; } @@ -247,7 +340,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, int i, err = -EOPNOTSUPP; struct tc_mqprio_qopt *qopt = NULL; struct tc_mqprio_caps caps; - int len; + int len, tc; BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); @@ -265,6 +358,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, if (!opt || nla_len(opt) < sizeof(*qopt)) return -EINVAL; + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + priv->fp[tc] = TC_FP_EXPRESS; + qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO, &caps, sizeof(caps)); @@ -415,6 +511,33 @@ nla_put_failure: return -1; } +static int mqprio_dump_tc_entries(struct mqprio_sched *priv, + struct sk_buff *skb) +{ + struct nlattr *n; + int tc; + + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { + n = nla_nest_start(skb, TCA_MQPRIO_TC_ENTRY); + if (!n) + return -EMSGSIZE; + + if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_INDEX, tc)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_FP, priv->fp[tc])) + goto nla_put_failure; + + nla_nest_end(skb, n); + } + + return 0; + +nla_put_failure: + nla_nest_cancel(skb, n); + return -EMSGSIZE; +} + static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct net_device *dev = qdisc_dev(sch); @@ -465,6 +588,9 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) (dump_rates(priv, &opt, skb) != 0)) goto nla_put_failure; + if (mqprio_dump_tc_entries(priv, skb)) + goto nla_put_failure; + return nla_nest_end(skb, nla); nla_put_failure: nlmsg_trim(skb, nla); diff --git a/net/sched/sch_mqprio_lib.c b/net/sched/sch_mqprio_lib.c index c58a533b8ec5..83b3793c4012 100644 --- a/net/sched/sch_mqprio_lib.c +++ b/net/sched/sch_mqprio_lib.c @@ -114,4 +114,18 @@ void mqprio_qopt_reconstruct(struct net_device *dev, struct tc_mqprio_qopt *qopt } EXPORT_SYMBOL_GPL(mqprio_qopt_reconstruct); +void mqprio_fp_to_offload(u32 fp[TC_QOPT_MAX_QUEUE], + struct tc_mqprio_qopt_offload *mqprio) +{ + unsigned long preemptible_tcs = 0; + int tc; + + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + if (fp[tc] == TC_FP_PREEMPTIBLE) + preemptible_tcs |= BIT(tc); + + mqprio->preemptible_tcs = preemptible_tcs; +} +EXPORT_SYMBOL_GPL(mqprio_fp_to_offload); + MODULE_LICENSE("GPL"); diff --git a/net/sched/sch_mqprio_lib.h b/net/sched/sch_mqprio_lib.h index 63f725ab8761..079f597072e3 100644 --- a/net/sched/sch_mqprio_lib.h +++ b/net/sched/sch_mqprio_lib.h @@ -14,5 +14,7 @@ int mqprio_validate_qopt(struct net_device *dev, struct tc_mqprio_qopt *qopt, struct netlink_ext_ack *extack); void mqprio_qopt_reconstruct(struct net_device *dev, struct tc_mqprio_qopt *qopt); +void mqprio_fp_to_offload(u32 fp[TC_QOPT_MAX_QUEUE], + struct tc_mqprio_qopt_offload *mqprio); #endif -- cgit v1.2.3 From a721c3e54b80e45cd9202e7fca29ef018bed9069 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 11 Apr 2023 21:01:55 +0300 Subject: net/sched: taprio: allow per-TC user input of FP adminStatus This is a duplication of the FP adminStatus logic introduced for tc-mqprio. Offloading is done through the tc_mqprio_qopt_offload structure embedded within tc_taprio_qopt_offload. So practically, if a device driver is written to treat the mqprio portion of taprio just like standalone mqprio, it gets unified handling of frame preemption. I would have reused more code with taprio, but this is mostly netlink attribute parsing, which is hard to transform into generic code without having something that stinks as a result. We have the same variables with the same semantics, just different nlattr type values (TCA_MQPRIO_TC_ENTRY=5 vs TCA_TAPRIO_ATTR_TC_ENTRY=12; TCA_MQPRIO_TC_ENTRY_FP=2 vs TCA_TAPRIO_TC_ENTRY_FP=3, etc) and consequently, different policies for the nest. Every time nla_parse_nested() is called, an on-stack table "tb" of nlattr pointers is allocated statically, up to the maximum understood nlattr type. That array size is hardcoded as a constant, but when transforming this into a common parsing function, it would become either a VLA (which the Linux kernel rightfully doesn't like) or a call to the allocator. Having FP adminStatus in tc-taprio can be seen as addressing the 802.1Q Annex S.3 "Scheduling and preemption used in combination, no HOLD/RELEASE" and S.4 "Scheduling and preemption used in combination with HOLD/RELEASE" use cases. HOLD and RELEASE events are emitted towards the underlying MAC Merge layer when the schedule hits a Set-And-Hold-MAC or a Set-And-Release-MAC gate operation. So within the tc-taprio UAPI space, one can distinguish between the 2 use cases by choosing whether to use the TC_TAPRIO_CMD_SET_AND_HOLD and TC_TAPRIO_CMD_SET_AND_RELEASE gate operations within the schedule, or just TC_TAPRIO_CMD_SET_GATES. A small part of the change is dedicated to refactoring the max_sdu nlattr parsing to put all logic under the "if" that tests for presence of that nlattr. Signed-off-by: Vladimir Oltean Reviewed-by: Ferenc Fejes Reviewed-by: Simon Horman Signed-off-by: Jakub Kicinski --- include/uapi/linux/pkt_sched.h | 1 + net/sched/sch_taprio.c | 65 +++++++++++++++++++++++++++++++++--------- 2 files changed, 53 insertions(+), 13 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index b8d29be91b62..51a7addc56c6 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -1252,6 +1252,7 @@ enum { TCA_TAPRIO_TC_ENTRY_UNSPEC, TCA_TAPRIO_TC_ENTRY_INDEX, /* u32 */ TCA_TAPRIO_TC_ENTRY_MAX_SDU, /* u32 */ + TCA_TAPRIO_TC_ENTRY_FP, /* u32 */ /* add new constants above here */ __TCA_TAPRIO_TC_ENTRY_CNT, diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index cbad43019172..76db9a10ef50 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -96,6 +97,7 @@ struct taprio_sched { struct list_head taprio_list; int cur_txq[TC_MAX_QUEUE]; u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */ + u32 fp[TC_QOPT_MAX_QUEUE]; /* only for dump and offloading */ u32 txtime_delay; }; @@ -1002,6 +1004,9 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { [TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 }, [TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 }, + [TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32, + TC_FP_EXPRESS, + TC_FP_PREEMPTIBLE), }; static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { @@ -1524,6 +1529,7 @@ static int taprio_enable_offload(struct net_device *dev, mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt); offload->mqprio.extack = extack; taprio_sched_to_offload(dev, sched, offload, &caps); + mqprio_fp_to_offload(q->fp, &offload->mqprio); for (tc = 0; tc < TC_MAX_QUEUE; tc++) offload->max_sdu[tc] = q->max_sdu[tc]; @@ -1671,13 +1677,14 @@ out: static int taprio_parse_tc_entry(struct Qdisc *sch, struct nlattr *opt, u32 max_sdu[TC_QOPT_MAX_QUEUE], + u32 fp[TC_QOPT_MAX_QUEUE], unsigned long *seen_tcs, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { }; struct net_device *dev = qdisc_dev(sch); - u32 val = 0; int err, tc; + u32 val; err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt, taprio_tc_policy, extack); @@ -1702,15 +1709,18 @@ static int taprio_parse_tc_entry(struct Qdisc *sch, *seen_tcs |= BIT(tc); - if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) + if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) { val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]); + if (val > dev->max_mtu) { + NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU"); + return -ERANGE; + } - if (val > dev->max_mtu) { - NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU"); - return -ERANGE; + max_sdu[tc] = val; } - max_sdu[tc] = val; + if (tb[TCA_TAPRIO_TC_ENTRY_FP]) + fp[tc] = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_FP]); return 0; } @@ -1720,29 +1730,51 @@ static int taprio_parse_tc_entries(struct Qdisc *sch, struct netlink_ext_ack *extack) { struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); u32 max_sdu[TC_QOPT_MAX_QUEUE]; + bool have_preemption = false; unsigned long seen_tcs = 0; + u32 fp[TC_QOPT_MAX_QUEUE]; struct nlattr *n; int tc, rem; int err = 0; - for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { max_sdu[tc] = q->max_sdu[tc]; + fp[tc] = q->fp[tc]; + } nla_for_each_nested(n, opt, rem) { if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY) continue; - err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs, + err = taprio_parse_tc_entry(sch, n, max_sdu, fp, &seen_tcs, extack); if (err) - goto out; + return err; } - for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { q->max_sdu[tc] = max_sdu[tc]; + q->fp[tc] = fp[tc]; + if (fp[tc] != TC_FP_EXPRESS) + have_preemption = true; + } + + if (have_preemption) { + if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) { + NL_SET_ERR_MSG(extack, + "Preemption only supported with full offload"); + return -EOPNOTSUPP; + } + + if (!ethtool_dev_mm_supported(dev)) { + NL_SET_ERR_MSG(extack, + "Device does not support preemption"); + return -EOPNOTSUPP; + } + } -out: return err; } @@ -2023,7 +2055,7 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, { struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); - int i; + int i, tc; spin_lock_init(&q->current_entry_lock); @@ -2080,6 +2112,9 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, q->qdiscs[i] = qdisc; } + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + q->fp[tc] = TC_FP_EXPRESS; + taprio_detect_broken_mqprio(q); return taprio_change(sch, opt, extack); @@ -2223,6 +2258,7 @@ error_nest: } static int taprio_dump_tc_entries(struct sk_buff *skb, + struct taprio_sched *q, struct sched_gate_list *sched) { struct nlattr *n; @@ -2240,6 +2276,9 @@ static int taprio_dump_tc_entries(struct sk_buff *skb, sched->max_sdu[tc])) goto nla_put_failure; + if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, q->fp[tc])) + goto nla_put_failure; + nla_nest_end(skb, n); } @@ -2281,7 +2320,7 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) goto options_error; - if (oper && taprio_dump_tc_entries(skb, oper)) + if (oper && taprio_dump_tc_entries(skb, q, oper)) goto options_error; if (oper && dump_schedule(skb, oper)) -- cgit v1.2.3 From d54730b50bae1f3119bd686d551d66f0fcc387ca Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Sat, 15 Apr 2023 13:18:04 -0700 Subject: bpf: Introduce opaque bpf_refcount struct and add btf_record plumbing A 'struct bpf_refcount' is added to the set of opaque uapi/bpf.h types meant for use in BPF programs. Similarly to other opaque types like bpf_spin_lock and bpf_rbtree_node, the verifier needs to know where in user-defined struct types a bpf_refcount can be located, so necessary btf_record plumbing is added to enable this. bpf_refcount is sized to hold a refcount_t. Similarly to bpf_spin_lock, the offset of a bpf_refcount is cached in btf_record as refcount_off in addition to being in the field array. Caching refcount_off makes sense for this field because further patches in the series will modify functions that take local kptrs (e.g. bpf_obj_drop) to change their behavior if the type they're operating on is refcounted. So enabling fast "is this type refcounted?" checks is desirable. No such verifier behavior changes are introduced in this patch, just logic to recognize 'struct bpf_refcount' in btf_record. Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230415201811.343116-3-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 8 ++++++++ include/uapi/linux/bpf.h | 4 ++++ kernel/bpf/btf.c | 12 +++++++++++- kernel/bpf/syscall.c | 6 +++++- tools/include/uapi/linux/bpf.h | 4 ++++ 5 files changed, 32 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 7888ed497432..be44d765b7a4 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -187,6 +187,7 @@ enum btf_field_type { BPF_RB_NODE = (1 << 7), BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD | BPF_RB_NODE | BPF_RB_ROOT, + BPF_REFCOUNT = (1 << 8), }; typedef void (*btf_dtor_kfunc_t)(void *); @@ -223,6 +224,7 @@ struct btf_record { u32 field_mask; int spin_lock_off; int timer_off; + int refcount_off; struct btf_field fields[]; }; @@ -293,6 +295,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type) return "bpf_rb_root"; case BPF_RB_NODE: return "bpf_rb_node"; + case BPF_REFCOUNT: + return "bpf_refcount"; default: WARN_ON_ONCE(1); return "unknown"; @@ -317,6 +321,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type) return sizeof(struct bpf_rb_root); case BPF_RB_NODE: return sizeof(struct bpf_rb_node); + case BPF_REFCOUNT: + return sizeof(struct bpf_refcount); default: WARN_ON_ONCE(1); return 0; @@ -341,6 +347,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type) return __alignof__(struct bpf_rb_root); case BPF_RB_NODE: return __alignof__(struct bpf_rb_node); + case BPF_REFCOUNT: + return __alignof__(struct bpf_refcount); default: WARN_ON_ONCE(1); return 0; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 3823100b7934..4b20a7269bee 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6985,6 +6985,10 @@ struct bpf_rb_node { __u64 :64; } __attribute__((aligned(8))); +struct bpf_refcount { + __u32 :32; +} __attribute__((aligned(4))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index f3c998feeccb..14889fd5ba8e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3391,6 +3391,7 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask, field_mask_test_name(BPF_LIST_NODE, "bpf_list_node"); field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root"); field_mask_test_name(BPF_RB_NODE, "bpf_rb_node"); + field_mask_test_name(BPF_REFCOUNT, "bpf_refcount"); /* Only return BPF_KPTR when all other types with matchable names fail */ if (field_mask & BPF_KPTR) { @@ -3439,6 +3440,7 @@ static int btf_find_struct_field(const struct btf *btf, case BPF_TIMER: case BPF_LIST_NODE: case BPF_RB_NODE: + case BPF_REFCOUNT: ret = btf_find_struct(btf, member_type, off, sz, field_type, idx < info_cnt ? &info[idx] : &tmp); if (ret < 0) @@ -3504,6 +3506,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, case BPF_TIMER: case BPF_LIST_NODE: case BPF_RB_NODE: + case BPF_REFCOUNT: ret = btf_find_struct(btf, var_type, off, sz, field_type, idx < info_cnt ? &info[idx] : &tmp); if (ret < 0) @@ -3734,6 +3737,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type rec->spin_lock_off = -EINVAL; rec->timer_off = -EINVAL; + rec->refcount_off = -EINVAL; for (i = 0; i < cnt; i++) { field_type_size = btf_field_type_size(info_arr[i].type); if (info_arr[i].off + field_type_size > value_size) { @@ -3763,6 +3767,11 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type /* Cache offset for faster lookup at runtime */ rec->timer_off = rec->fields[i].offset; break; + case BPF_REFCOUNT: + WARN_ON_ONCE(rec->refcount_off >= 0); + /* Cache offset for faster lookup at runtime */ + rec->refcount_off = rec->fields[i].offset; + break; case BPF_KPTR_UNREF: case BPF_KPTR_REF: ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]); @@ -5308,6 +5317,7 @@ static const char *alloc_obj_fields[] = { "bpf_list_node", "bpf_rb_root", "bpf_rb_node", + "bpf_refcount", }; static struct btf_struct_metas * @@ -5381,7 +5391,7 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) type = &tab->types[tab->cnt]; type->btf_id = i; record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE | - BPF_RB_ROOT | BPF_RB_NODE, t->size); + BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT, t->size); /* The record cannot be unset, treat it as an error if so */ if (IS_ERR_OR_NULL(record)) { ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index c08b7933bf8f..28eac7434d32 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -552,6 +552,7 @@ void btf_record_free(struct btf_record *rec) case BPF_RB_NODE: case BPF_SPIN_LOCK: case BPF_TIMER: + case BPF_REFCOUNT: /* Nothing to release */ break; default: @@ -599,6 +600,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec) case BPF_RB_NODE: case BPF_SPIN_LOCK: case BPF_TIMER: + case BPF_REFCOUNT: /* Nothing to acquire */ break; default: @@ -705,6 +707,7 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj) break; case BPF_LIST_NODE: case BPF_RB_NODE: + case BPF_REFCOUNT: break; default: WARN_ON_ONCE(1); @@ -1032,7 +1035,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, map->record = btf_parse_fields(btf, value_type, BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | - BPF_RB_ROOT, + BPF_RB_ROOT | BPF_REFCOUNT, map->value_size); if (!IS_ERR_OR_NULL(map->record)) { int i; @@ -1071,6 +1074,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, break; case BPF_KPTR_UNREF: case BPF_KPTR_REF: + case BPF_REFCOUNT: if (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_LRU_HASH && diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 3823100b7934..4b20a7269bee 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -6985,6 +6985,10 @@ struct bpf_rb_node { __u64 :64; } __attribute__((aligned(8))); +struct bpf_refcount { + __u32 :32; +} __attribute__((aligned(4))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. -- cgit v1.2.3 From 3b3009ea8abb713b022d94fba95ec270cf6e7eae Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 17 Apr 2023 10:32:26 -0400 Subject: net/handshake: Create a NETLINK service for handling handshake requests When a kernel consumer needs a transport layer security session, it first needs a handshake to negotiate and establish a session. This negotiation can be done in user space via one of the several existing library implementations, or it can be done in the kernel. No in-kernel handshake implementations yet exist. In their absence, we add a netlink service that can: a. Notify a user space daemon that a handshake is needed. b. Once notified, the daemon calls the kernel back via this netlink service to get the handshake parameters, including an open socket on which to establish the session. c. Once the handshake is complete, the daemon reports the session status and other information via a second netlink operation. This operation marks that it is safe for the kernel to use the open socket and the security session established there. The notification service uses a multicast group. Each handshake mechanism (eg, tlshd) adopts its own group number so that the handshake services are completely independent of one another. The kernel can then tell via netlink_has_listeners() whether a handshake service is active and prepared to handle a handshake request. A new netlink operation, ACCEPT, acts like accept(2) in that it instantiates a file descriptor in the user space daemon's fd table. If this operation is successful, the reply carries the fd number, which can be treated as an open and ready file descriptor. While user space is performing the handshake, the kernel keeps its muddy paws off the open socket. A second new netlink operation, DONE, indicates that the user space daemon is finished with the socket and it is safe for the kernel to use again. The operation also indicates whether a session was established successfully. Signed-off-by: Chuck Lever Signed-off-by: Jakub Kicinski --- Documentation/netlink/specs/handshake.yaml | 122 +++++++++++ MAINTAINERS | 9 + include/trace/events/handshake.h | 159 ++++++++++++++ include/uapi/linux/handshake.h | 71 ++++++ net/Kconfig | 5 + net/Makefile | 1 + net/handshake/Makefile | 11 + net/handshake/genl.c | 57 +++++ net/handshake/genl.h | 23 ++ net/handshake/handshake.h | 82 +++++++ net/handshake/netlink.c | 312 ++++++++++++++++++++++++++ net/handshake/request.c | 339 +++++++++++++++++++++++++++++ net/handshake/trace.c | 20 ++ 13 files changed, 1211 insertions(+) create mode 100644 Documentation/netlink/specs/handshake.yaml create mode 100644 include/trace/events/handshake.h create mode 100644 include/uapi/linux/handshake.h create mode 100644 net/handshake/Makefile create mode 100644 net/handshake/genl.c create mode 100644 net/handshake/genl.h create mode 100644 net/handshake/handshake.h create mode 100644 net/handshake/netlink.c create mode 100644 net/handshake/request.c create mode 100644 net/handshake/trace.c (limited to 'include/uapi/linux') diff --git a/Documentation/netlink/specs/handshake.yaml b/Documentation/netlink/specs/handshake.yaml new file mode 100644 index 000000000000..0333d92b1438 --- /dev/null +++ b/Documentation/netlink/specs/handshake.yaml @@ -0,0 +1,122 @@ +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) +# +# Author: Chuck Lever +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# + +name: handshake + +protocol: genetlink + +doc: Netlink protocol to request a transport layer security handshake. + +definitions: + - + type: enum + name: handler-class + value-start: 0 + entries: [ none, max ] + - + type: enum + name: msg-type + value-start: 0 + entries: [ unspec, clienthello, serverhello ] + - + type: enum + name: auth + value-start: 0 + entries: [ unspec, unauth, psk, x509 ] + +attribute-sets: + - + name: x509 + attributes: + - + name: cert + type: u32 + - + name: privkey + type: u32 + - + name: accept + attributes: + - + name: sockfd + type: u32 + - + name: handler-class + type: u32 + enum: handler-class + - + name: message-type + type: u32 + enum: msg-type + - + name: timeout + type: u32 + - + name: auth-mode + type: u32 + enum: auth + - + name: peer-identity + type: u32 + multi-attr: true + - + name: certificate + type: nest + nested-attributes: x509 + multi-attr: true + - + name: done + attributes: + - + name: status + type: u32 + - + name: sockfd + type: u32 + - + name: remote-auth + type: u32 + multi-attr: true + +operations: + list: + - + name: ready + doc: Notify handlers that a new handshake request is waiting + notify: accept + - + name: accept + doc: Handler retrieves next queued handshake request + attribute-set: accept + flags: [ admin-perm ] + do: + request: + attributes: + - handler-class + reply: + attributes: + - sockfd + - message-type + - timeout + - auth-mode + - peer-identity + - certificate + - + name: done + doc: Handler reports handshake completion + attribute-set: done + do: + request: + attributes: + - status + - sockfd + - remote-auth + +mcast-groups: + list: + - + name: none diff --git a/MAINTAINERS b/MAINTAINERS index 4fc57dfd5fd0..cdc7748d15b8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8947,6 +8947,15 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git F: drivers/media/usb/hackrf/ +HANDSHAKE UPCALL FOR TRANSPORT LAYER SECURITY +M: Chuck Lever +L: kernel-tls-handshake@lists.linux.dev +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/netlink/specs/handshake.yaml +F: include/trace/events/handshake.h +F: net/handshake/ + HANTRO VPU CODEC DRIVER M: Ezequiel Garcia M: Philipp Zabel diff --git a/include/trace/events/handshake.h b/include/trace/events/handshake.h new file mode 100644 index 000000000000..8dadcab5f12a --- /dev/null +++ b/include/trace/events/handshake.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM handshake + +#if !defined(_TRACE_HANDSHAKE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HANDSHAKE_H + +#include +#include + +DECLARE_EVENT_CLASS(handshake_event_class, + TP_PROTO( + const struct net *net, + const struct handshake_req *req, + const struct sock *sk + ), + TP_ARGS(net, req, sk), + TP_STRUCT__entry( + __field(const void *, req) + __field(const void *, sk) + __field(unsigned int, netns_ino) + ), + TP_fast_assign( + __entry->req = req; + __entry->sk = sk; + __entry->netns_ino = net->ns.inum; + ), + TP_printk("req=%p sk=%p", + __entry->req, __entry->sk + ) +); +#define DEFINE_HANDSHAKE_EVENT(name) \ + DEFINE_EVENT(handshake_event_class, name, \ + TP_PROTO( \ + const struct net *net, \ + const struct handshake_req *req, \ + const struct sock *sk \ + ), \ + TP_ARGS(net, req, sk)) + +DECLARE_EVENT_CLASS(handshake_fd_class, + TP_PROTO( + const struct net *net, + const struct handshake_req *req, + const struct sock *sk, + int fd + ), + TP_ARGS(net, req, sk, fd), + TP_STRUCT__entry( + __field(const void *, req) + __field(const void *, sk) + __field(int, fd) + __field(unsigned int, netns_ino) + ), + TP_fast_assign( + __entry->req = req; + __entry->sk = req->hr_sk; + __entry->fd = fd; + __entry->netns_ino = net->ns.inum; + ), + TP_printk("req=%p sk=%p fd=%d", + __entry->req, __entry->sk, __entry->fd + ) +); +#define DEFINE_HANDSHAKE_FD_EVENT(name) \ + DEFINE_EVENT(handshake_fd_class, name, \ + TP_PROTO( \ + const struct net *net, \ + const struct handshake_req *req, \ + const struct sock *sk, \ + int fd \ + ), \ + TP_ARGS(net, req, sk, fd)) + +DECLARE_EVENT_CLASS(handshake_error_class, + TP_PROTO( + const struct net *net, + const struct handshake_req *req, + const struct sock *sk, + int err + ), + TP_ARGS(net, req, sk, err), + TP_STRUCT__entry( + __field(const void *, req) + __field(const void *, sk) + __field(int, err) + __field(unsigned int, netns_ino) + ), + TP_fast_assign( + __entry->req = req; + __entry->sk = sk; + __entry->err = err; + __entry->netns_ino = net->ns.inum; + ), + TP_printk("req=%p sk=%p err=%d", + __entry->req, __entry->sk, __entry->err + ) +); +#define DEFINE_HANDSHAKE_ERROR(name) \ + DEFINE_EVENT(handshake_error_class, name, \ + TP_PROTO( \ + const struct net *net, \ + const struct handshake_req *req, \ + const struct sock *sk, \ + int err \ + ), \ + TP_ARGS(net, req, sk, err)) + + +/* + * Request lifetime events + */ + +DEFINE_HANDSHAKE_EVENT(handshake_submit); +DEFINE_HANDSHAKE_ERROR(handshake_submit_err); +DEFINE_HANDSHAKE_EVENT(handshake_cancel); +DEFINE_HANDSHAKE_EVENT(handshake_cancel_none); +DEFINE_HANDSHAKE_EVENT(handshake_cancel_busy); +DEFINE_HANDSHAKE_EVENT(handshake_destruct); + + +TRACE_EVENT(handshake_complete, + TP_PROTO( + const struct net *net, + const struct handshake_req *req, + const struct sock *sk, + int status + ), + TP_ARGS(net, req, sk, status), + TP_STRUCT__entry( + __field(const void *, req) + __field(const void *, sk) + __field(int, status) + __field(unsigned int, netns_ino) + ), + TP_fast_assign( + __entry->req = req; + __entry->sk = sk; + __entry->status = status; + __entry->netns_ino = net->ns.inum; + ), + TP_printk("req=%p sk=%p status=%d", + __entry->req, __entry->sk, __entry->status + ) +); + +/* + * Netlink events + */ + +DEFINE_HANDSHAKE_ERROR(handshake_notify_err); +DEFINE_HANDSHAKE_FD_EVENT(handshake_cmd_accept); +DEFINE_HANDSHAKE_ERROR(handshake_cmd_accept_err); +DEFINE_HANDSHAKE_FD_EVENT(handshake_cmd_done); +DEFINE_HANDSHAKE_ERROR(handshake_cmd_done_err); + +#endif /* _TRACE_HANDSHAKE_H */ + +#include diff --git a/include/uapi/linux/handshake.h b/include/uapi/linux/handshake.h new file mode 100644 index 000000000000..7f66ff489b87 --- /dev/null +++ b/include/uapi/linux/handshake.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/handshake.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_HANDSHAKE_H +#define _UAPI_LINUX_HANDSHAKE_H + +#define HANDSHAKE_FAMILY_NAME "handshake" +#define HANDSHAKE_FAMILY_VERSION 1 + +enum handshake_handler_class { + HANDSHAKE_HANDLER_CLASS_NONE, + HANDSHAKE_HANDLER_CLASS_MAX, +}; + +enum handshake_msg_type { + HANDSHAKE_MSG_TYPE_UNSPEC, + HANDSHAKE_MSG_TYPE_CLIENTHELLO, + HANDSHAKE_MSG_TYPE_SERVERHELLO, +}; + +enum handshake_auth { + HANDSHAKE_AUTH_UNSPEC, + HANDSHAKE_AUTH_UNAUTH, + HANDSHAKE_AUTH_PSK, + HANDSHAKE_AUTH_X509, +}; + +enum { + HANDSHAKE_A_X509_CERT = 1, + HANDSHAKE_A_X509_PRIVKEY, + + __HANDSHAKE_A_X509_MAX, + HANDSHAKE_A_X509_MAX = (__HANDSHAKE_A_X509_MAX - 1) +}; + +enum { + HANDSHAKE_A_ACCEPT_SOCKFD = 1, + HANDSHAKE_A_ACCEPT_HANDLER_CLASS, + HANDSHAKE_A_ACCEPT_MESSAGE_TYPE, + HANDSHAKE_A_ACCEPT_TIMEOUT, + HANDSHAKE_A_ACCEPT_AUTH_MODE, + HANDSHAKE_A_ACCEPT_PEER_IDENTITY, + HANDSHAKE_A_ACCEPT_CERTIFICATE, + + __HANDSHAKE_A_ACCEPT_MAX, + HANDSHAKE_A_ACCEPT_MAX = (__HANDSHAKE_A_ACCEPT_MAX - 1) +}; + +enum { + HANDSHAKE_A_DONE_STATUS = 1, + HANDSHAKE_A_DONE_SOCKFD, + HANDSHAKE_A_DONE_REMOTE_AUTH, + + __HANDSHAKE_A_DONE_MAX, + HANDSHAKE_A_DONE_MAX = (__HANDSHAKE_A_DONE_MAX - 1) +}; + +enum { + HANDSHAKE_CMD_READY = 1, + HANDSHAKE_CMD_ACCEPT, + HANDSHAKE_CMD_DONE, + + __HANDSHAKE_CMD_MAX, + HANDSHAKE_CMD_MAX = (__HANDSHAKE_CMD_MAX - 1) +}; + +#define HANDSHAKE_MCGRP_NONE "none" + +#endif /* _UAPI_LINUX_HANDSHAKE_H */ diff --git a/net/Kconfig b/net/Kconfig index f806722bccf4..4b800706cc76 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -68,6 +68,11 @@ source "net/iucv/Kconfig" source "net/smc/Kconfig" source "net/xdp/Kconfig" +config NET_HANDSHAKE + bool + depends on SUNRPC || NVME_TARGET_TCP || NVME_TCP + default y + config INET bool "TCP/IP networking" help diff --git a/net/Makefile b/net/Makefile index 87592009366f..4c4dc535453d 100644 --- a/net/Makefile +++ b/net/Makefile @@ -79,3 +79,4 @@ obj-$(CONFIG_NET_NCSI) += ncsi/ obj-$(CONFIG_XDP_SOCKETS) += xdp/ obj-$(CONFIG_MPTCP) += mptcp/ obj-$(CONFIG_MCTP) += mctp/ +obj-$(CONFIG_NET_HANDSHAKE) += handshake/ diff --git a/net/handshake/Makefile b/net/handshake/Makefile new file mode 100644 index 000000000000..d38736de45da --- /dev/null +++ b/net/handshake/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Generic HANDSHAKE service +# +# Author: Chuck Lever +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# + +obj-y += handshake.o +handshake-y := genl.o netlink.o request.o trace.o diff --git a/net/handshake/genl.c b/net/handshake/genl.c new file mode 100644 index 000000000000..652f37d19bd6 --- /dev/null +++ b/net/handshake/genl.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/handshake.yaml */ +/* YNL-GEN kernel source */ + +#include +#include + +#include "genl.h" + +#include + +/* HANDSHAKE_CMD_ACCEPT - do */ +static const struct nla_policy handshake_accept_nl_policy[HANDSHAKE_A_ACCEPT_HANDLER_CLASS + 1] = { + [HANDSHAKE_A_ACCEPT_HANDLER_CLASS] = NLA_POLICY_MAX(NLA_U32, 1), +}; + +/* HANDSHAKE_CMD_DONE - do */ +static const struct nla_policy handshake_done_nl_policy[HANDSHAKE_A_DONE_REMOTE_AUTH + 1] = { + [HANDSHAKE_A_DONE_STATUS] = { .type = NLA_U32, }, + [HANDSHAKE_A_DONE_SOCKFD] = { .type = NLA_U32, }, + [HANDSHAKE_A_DONE_REMOTE_AUTH] = { .type = NLA_U32, }, +}; + +/* Ops table for handshake */ +static const struct genl_split_ops handshake_nl_ops[] = { + { + .cmd = HANDSHAKE_CMD_ACCEPT, + .doit = handshake_nl_accept_doit, + .policy = handshake_accept_nl_policy, + .maxattr = HANDSHAKE_A_ACCEPT_HANDLER_CLASS, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = HANDSHAKE_CMD_DONE, + .doit = handshake_nl_done_doit, + .policy = handshake_done_nl_policy, + .maxattr = HANDSHAKE_A_DONE_REMOTE_AUTH, + .flags = GENL_CMD_CAP_DO, + }, +}; + +static const struct genl_multicast_group handshake_nl_mcgrps[] = { + [HANDSHAKE_NLGRP_NONE] = { "none", }, +}; + +struct genl_family handshake_nl_family __ro_after_init = { + .name = HANDSHAKE_FAMILY_NAME, + .version = HANDSHAKE_FAMILY_VERSION, + .netnsok = true, + .parallel_ops = true, + .module = THIS_MODULE, + .split_ops = handshake_nl_ops, + .n_split_ops = ARRAY_SIZE(handshake_nl_ops), + .mcgrps = handshake_nl_mcgrps, + .n_mcgrps = ARRAY_SIZE(handshake_nl_mcgrps), +}; diff --git a/net/handshake/genl.h b/net/handshake/genl.h new file mode 100644 index 000000000000..a1eb7ccccc7f --- /dev/null +++ b/net/handshake/genl.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/handshake.yaml */ +/* YNL-GEN kernel header */ + +#ifndef _LINUX_HANDSHAKE_GEN_H +#define _LINUX_HANDSHAKE_GEN_H + +#include +#include + +#include + +int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info); +int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info); + +enum { + HANDSHAKE_NLGRP_NONE, +}; + +extern struct genl_family handshake_nl_family; + +#endif /* _LINUX_HANDSHAKE_GEN_H */ diff --git a/net/handshake/handshake.h b/net/handshake/handshake.h new file mode 100644 index 000000000000..52568dbe24f1 --- /dev/null +++ b/net/handshake/handshake.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Generic netlink handshake service + * + * Author: Chuck Lever + * + * Copyright (c) 2023, Oracle and/or its affiliates. + */ + +#ifndef _INTERNAL_HANDSHAKE_H +#define _INTERNAL_HANDSHAKE_H + +/* Per-net namespace context */ +struct handshake_net { + spinlock_t hn_lock; /* protects next 3 fields */ + int hn_pending; + int hn_pending_max; + struct list_head hn_requests; + + unsigned long hn_flags; +}; + +enum hn_flags_bits { + HANDSHAKE_F_NET_DRAINING, +}; + +struct handshake_proto; + +/* One handshake request */ +struct handshake_req { + struct list_head hr_list; + struct rhash_head hr_rhash; + unsigned long hr_flags; + const struct handshake_proto *hr_proto; + struct sock *hr_sk; + void (*hr_odestruct)(struct sock *sk); + + /* Always the last field */ + char hr_priv[]; +}; + +enum hr_flags_bits { + HANDSHAKE_F_REQ_COMPLETED, +}; + +/* Invariants for all handshake requests for one transport layer + * security protocol + */ +struct handshake_proto { + int hp_handler_class; + size_t hp_privsize; + + int (*hp_accept)(struct handshake_req *req, + struct genl_info *info, int fd); + void (*hp_done)(struct handshake_req *req, + unsigned int status, + struct genl_info *info); + void (*hp_destroy)(struct handshake_req *req); +}; + +/* netlink.c */ +int handshake_genl_notify(struct net *net, const struct handshake_proto *proto, + gfp_t flags); +struct nlmsghdr *handshake_genl_put(struct sk_buff *msg, + struct genl_info *info); +struct handshake_net *handshake_pernet(struct net *net); + +/* request.c */ +struct handshake_req *handshake_req_alloc(const struct handshake_proto *proto, + gfp_t flags); +int handshake_req_hash_init(void); +void handshake_req_hash_destroy(void); +void *handshake_req_private(struct handshake_req *req); +struct handshake_req *handshake_req_hash_lookup(struct sock *sk); +struct handshake_req *handshake_req_next(struct handshake_net *hn, int class); +int handshake_req_submit(struct socket *sock, struct handshake_req *req, + gfp_t flags); +void handshake_complete(struct handshake_req *req, unsigned int status, + struct genl_info *info); +bool handshake_req_cancel(struct sock *sk); + +#endif /* _INTERNAL_HANDSHAKE_H */ diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c new file mode 100644 index 000000000000..7264cac04047 --- /dev/null +++ b/net/handshake/netlink.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic netlink handshake service + * + * Author: Chuck Lever + * + * Copyright (c) 2023, Oracle and/or its affiliates. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include "handshake.h" +#include "genl.h" + +#include + +/** + * handshake_genl_notify - Notify handlers that a request is waiting + * @net: target network namespace + * @proto: handshake protocol + * @flags: memory allocation control flags + * + * Returns zero on success or a negative errno if notification failed. + */ +int handshake_genl_notify(struct net *net, const struct handshake_proto *proto, + gfp_t flags) +{ + struct sk_buff *msg; + void *hdr; + + if (!genl_has_listeners(&handshake_nl_family, net, + proto->hp_handler_class)) + return -ESRCH; + + msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, 0, 0, &handshake_nl_family, 0, + HANDSHAKE_CMD_READY); + if (!hdr) + goto out_free; + + if (nla_put_u32(msg, HANDSHAKE_A_ACCEPT_HANDLER_CLASS, + proto->hp_handler_class) < 0) { + genlmsg_cancel(msg, hdr); + goto out_free; + } + + genlmsg_end(msg, hdr); + return genlmsg_multicast_netns(&handshake_nl_family, net, msg, + 0, proto->hp_handler_class, flags); + +out_free: + nlmsg_free(msg); + return -EMSGSIZE; +} + +/** + * handshake_genl_put - Create a generic netlink message header + * @msg: buffer in which to create the header + * @info: generic netlink message context + * + * Returns a ready-to-use header, or NULL. + */ +struct nlmsghdr *handshake_genl_put(struct sk_buff *msg, + struct genl_info *info) +{ + return genlmsg_put(msg, info->snd_portid, info->snd_seq, + &handshake_nl_family, 0, info->genlhdr->cmd); +} +EXPORT_SYMBOL(handshake_genl_put); + +/* + * dup() a kernel socket for use as a user space file descriptor + * in the current process. The kernel socket must have an + * instatiated struct file. + * + * Implicit argument: "current()" + */ +static int handshake_dup(struct socket *sock) +{ + struct file *file; + int newfd; + + if (!sock->file) + return -EBADF; + + file = get_file(sock->file); + newfd = get_unused_fd_flags(O_CLOEXEC); + if (newfd < 0) { + fput(file); + return newfd; + } + + fd_install(newfd, file); + return newfd; +} + +int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct net *net = sock_net(skb->sk); + struct handshake_net *hn = handshake_pernet(net); + struct handshake_req *req = NULL; + struct socket *sock; + int class, fd, err; + + err = -EOPNOTSUPP; + if (!hn) + goto out_status; + + err = -EINVAL; + if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_ACCEPT_HANDLER_CLASS)) + goto out_status; + class = nla_get_u32(info->attrs[HANDSHAKE_A_ACCEPT_HANDLER_CLASS]); + + err = -EAGAIN; + req = handshake_req_next(hn, class); + if (!req) + goto out_status; + + sock = req->hr_sk->sk_socket; + fd = handshake_dup(sock); + if (fd < 0) { + err = fd; + goto out_complete; + } + err = req->hr_proto->hp_accept(req, info, fd); + if (err) + goto out_complete; + + trace_handshake_cmd_accept(net, req, req->hr_sk, fd); + return 0; + +out_complete: + handshake_complete(req, -EIO, NULL); + fput(sock->file); +out_status: + trace_handshake_cmd_accept_err(net, req, NULL, err); + return err; +} + +int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct net *net = sock_net(skb->sk); + struct socket *sock = NULL; + struct handshake_req *req; + int fd, status, err; + + if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD)) + return -EINVAL; + fd = nla_get_u32(info->attrs[HANDSHAKE_A_DONE_SOCKFD]); + + err = 0; + sock = sockfd_lookup(fd, &err); + if (err) { + err = -EBADF; + goto out_status; + } + + req = handshake_req_hash_lookup(sock->sk); + if (!req) { + err = -EBUSY; + fput(sock->file); + goto out_status; + } + + trace_handshake_cmd_done(net, req, sock->sk, fd); + + status = -EIO; + if (info->attrs[HANDSHAKE_A_DONE_STATUS]) + status = nla_get_u32(info->attrs[HANDSHAKE_A_DONE_STATUS]); + + handshake_complete(req, status, info); + fput(sock->file); + return 0; + +out_status: + trace_handshake_cmd_done_err(net, req, sock->sk, err); + return err; +} + +static unsigned int handshake_net_id; + +static int __net_init handshake_net_init(struct net *net) +{ + struct handshake_net *hn = net_generic(net, handshake_net_id); + unsigned long tmp; + struct sysinfo si; + + /* + * Arbitrary limit to prevent handshakes that do not make + * progress from clogging up the system. The cap scales up + * with the amount of physical memory on the system. + */ + si_meminfo(&si); + tmp = si.totalram / (25 * si.mem_unit); + hn->hn_pending_max = clamp(tmp, 3UL, 50UL); + + spin_lock_init(&hn->hn_lock); + hn->hn_pending = 0; + hn->hn_flags = 0; + INIT_LIST_HEAD(&hn->hn_requests); + return 0; +} + +static void __net_exit handshake_net_exit(struct net *net) +{ + struct handshake_net *hn = net_generic(net, handshake_net_id); + struct handshake_req *req; + LIST_HEAD(requests); + + /* + * Drain the net's pending list. Requests that have been + * accepted and are in progress will be destroyed when + * the socket is closed. + */ + spin_lock(&hn->hn_lock); + set_bit(HANDSHAKE_F_NET_DRAINING, &hn->hn_flags); + list_splice_init(&requests, &hn->hn_requests); + spin_unlock(&hn->hn_lock); + + while (!list_empty(&requests)) { + req = list_first_entry(&requests, struct handshake_req, hr_list); + list_del(&req->hr_list); + + /* + * Requests on this list have not yet been + * accepted, so they do not have an fd to put. + */ + + handshake_complete(req, -ETIMEDOUT, NULL); + } +} + +static struct pernet_operations __net_initdata handshake_genl_net_ops = { + .init = handshake_net_init, + .exit = handshake_net_exit, + .id = &handshake_net_id, + .size = sizeof(struct handshake_net), +}; + +/** + * handshake_pernet - Get the handshake private per-net structure + * @net: network namespace + * + * Returns a pointer to the net's private per-net structure for the + * handshake module, or NULL if handshake_init() failed. + */ +struct handshake_net *handshake_pernet(struct net *net) +{ + return handshake_net_id ? + net_generic(net, handshake_net_id) : NULL; +} + +static int __init handshake_init(void) +{ + int ret; + + ret = handshake_req_hash_init(); + if (ret) { + pr_warn("handshake: hash initialization failed (%d)\n", ret); + return ret; + } + + ret = genl_register_family(&handshake_nl_family); + if (ret) { + pr_warn("handshake: netlink registration failed (%d)\n", ret); + handshake_req_hash_destroy(); + return ret; + } + + /* + * ORDER: register_pernet_subsys must be done last. + * + * If initialization does not make it past pernet_subsys + * registration, then handshake_net_id will remain 0. That + * shunts the handshake consumer API to return ENOTSUPP + * to prevent it from dereferencing something that hasn't + * been allocated. + */ + ret = register_pernet_subsys(&handshake_genl_net_ops); + if (ret) { + pr_warn("handshake: pernet registration failed (%d)\n", ret); + genl_unregister_family(&handshake_nl_family); + handshake_req_hash_destroy(); + } + + return ret; +} + +static void __exit handshake_exit(void) +{ + unregister_pernet_subsys(&handshake_genl_net_ops); + handshake_net_id = 0; + + handshake_req_hash_destroy(); + genl_unregister_family(&handshake_nl_family); +} + +module_init(handshake_init); +module_exit(handshake_exit); diff --git a/net/handshake/request.c b/net/handshake/request.c new file mode 100644 index 000000000000..d5b2bc6de057 --- /dev/null +++ b/net/handshake/request.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Handshake request lifetime events + * + * Author: Chuck Lever + * + * Copyright (c) 2023, Oracle and/or its affiliates. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include "handshake.h" + +#include + +/* + * We need both a handshake_req -> sock mapping, and a sock -> + * handshake_req mapping. Both are one-to-one. + * + * To avoid adding another pointer field to struct sock, net/handshake + * maintains a hash table, indexed by the memory address of @sock, to + * find the struct handshake_req outstanding for that socket. The + * reverse direction uses a simple pointer field in the handshake_req + * struct. + */ + +static struct rhashtable handshake_rhashtbl ____cacheline_aligned_in_smp; + +static const struct rhashtable_params handshake_rhash_params = { + .key_len = sizeof_field(struct handshake_req, hr_sk), + .key_offset = offsetof(struct handshake_req, hr_sk), + .head_offset = offsetof(struct handshake_req, hr_rhash), + .automatic_shrinking = true, +}; + +int handshake_req_hash_init(void) +{ + return rhashtable_init(&handshake_rhashtbl, &handshake_rhash_params); +} + +void handshake_req_hash_destroy(void) +{ + rhashtable_destroy(&handshake_rhashtbl); +} + +struct handshake_req *handshake_req_hash_lookup(struct sock *sk) +{ + return rhashtable_lookup_fast(&handshake_rhashtbl, &sk, + handshake_rhash_params); +} + +static bool handshake_req_hash_add(struct handshake_req *req) +{ + int ret; + + ret = rhashtable_lookup_insert_fast(&handshake_rhashtbl, + &req->hr_rhash, + handshake_rhash_params); + return ret == 0; +} + +static void handshake_req_destroy(struct handshake_req *req) +{ + if (req->hr_proto->hp_destroy) + req->hr_proto->hp_destroy(req); + rhashtable_remove_fast(&handshake_rhashtbl, &req->hr_rhash, + handshake_rhash_params); + kfree(req); +} + +static void handshake_sk_destruct(struct sock *sk) +{ + void (*sk_destruct)(struct sock *sk); + struct handshake_req *req; + + req = handshake_req_hash_lookup(sk); + if (!req) + return; + + trace_handshake_destruct(sock_net(sk), req, sk); + sk_destruct = req->hr_odestruct; + handshake_req_destroy(req); + if (sk_destruct) + sk_destruct(sk); +} + +/** + * handshake_req_alloc - Allocate a handshake request + * @proto: security protocol + * @flags: memory allocation flags + * + * Returns an initialized handshake_req or NULL. + */ +struct handshake_req *handshake_req_alloc(const struct handshake_proto *proto, + gfp_t flags) +{ + struct handshake_req *req; + + if (!proto) + return NULL; + if (proto->hp_handler_class <= HANDSHAKE_HANDLER_CLASS_NONE) + return NULL; + if (proto->hp_handler_class >= HANDSHAKE_HANDLER_CLASS_MAX) + return NULL; + if (!proto->hp_accept || !proto->hp_done) + return NULL; + + req = kzalloc(struct_size(req, hr_priv, proto->hp_privsize), flags); + if (!req) + return NULL; + + INIT_LIST_HEAD(&req->hr_list); + req->hr_proto = proto; + return req; +} +EXPORT_SYMBOL(handshake_req_alloc); + +/** + * handshake_req_private - Get per-handshake private data + * @req: handshake arguments + * + */ +void *handshake_req_private(struct handshake_req *req) +{ + return (void *)&req->hr_priv; +} +EXPORT_SYMBOL(handshake_req_private); + +static bool __add_pending_locked(struct handshake_net *hn, + struct handshake_req *req) +{ + if (WARN_ON_ONCE(!list_empty(&req->hr_list))) + return false; + hn->hn_pending++; + list_add_tail(&req->hr_list, &hn->hn_requests); + return true; +} + +static void __remove_pending_locked(struct handshake_net *hn, + struct handshake_req *req) +{ + hn->hn_pending--; + list_del_init(&req->hr_list); +} + +/* + * Returns %true if the request was found on @net's pending list, + * otherwise %false. + * + * If @req was on a pending list, it has not yet been accepted. + */ +static bool remove_pending(struct handshake_net *hn, struct handshake_req *req) +{ + bool ret = false; + + spin_lock(&hn->hn_lock); + if (!list_empty(&req->hr_list)) { + __remove_pending_locked(hn, req); + ret = true; + } + spin_unlock(&hn->hn_lock); + + return ret; +} + +struct handshake_req *handshake_req_next(struct handshake_net *hn, int class) +{ + struct handshake_req *req, *pos; + + req = NULL; + spin_lock(&hn->hn_lock); + list_for_each_entry(pos, &hn->hn_requests, hr_list) { + if (pos->hr_proto->hp_handler_class != class) + continue; + __remove_pending_locked(hn, pos); + req = pos; + break; + } + spin_unlock(&hn->hn_lock); + + return req; +} + +/** + * handshake_req_submit - Submit a handshake request + * @sock: open socket on which to perform the handshake + * @req: handshake arguments + * @flags: memory allocation flags + * + * Return values: + * %0: Request queued + * %-EINVAL: Invalid argument + * %-EBUSY: A handshake is already under way for this socket + * %-ESRCH: No handshake agent is available + * %-EAGAIN: Too many pending handshake requests + * %-ENOMEM: Failed to allocate memory + * %-EMSGSIZE: Failed to construct notification message + * %-EOPNOTSUPP: Handshake module not initialized + * + * A zero return value from handshake_req_submit() means that + * exactly one subsequent completion callback is guaranteed. + * + * A negative return value from handshake_req_submit() means that + * no completion callback will be done and that @req has been + * destroyed. + */ +int handshake_req_submit(struct socket *sock, struct handshake_req *req, + gfp_t flags) +{ + struct handshake_net *hn; + struct net *net; + int ret; + + if (!sock || !req || !sock->file) { + kfree(req); + return -EINVAL; + } + + req->hr_sk = sock->sk; + if (!req->hr_sk) { + kfree(req); + return -EINVAL; + } + req->hr_odestruct = req->hr_sk->sk_destruct; + req->hr_sk->sk_destruct = handshake_sk_destruct; + + ret = -EOPNOTSUPP; + net = sock_net(req->hr_sk); + hn = handshake_pernet(net); + if (!hn) + goto out_err; + + ret = -EAGAIN; + if (READ_ONCE(hn->hn_pending) >= hn->hn_pending_max) + goto out_err; + + spin_lock(&hn->hn_lock); + ret = -EOPNOTSUPP; + if (test_bit(HANDSHAKE_F_NET_DRAINING, &hn->hn_flags)) + goto out_unlock; + ret = -EBUSY; + if (!handshake_req_hash_add(req)) + goto out_unlock; + if (!__add_pending_locked(hn, req)) + goto out_unlock; + spin_unlock(&hn->hn_lock); + + ret = handshake_genl_notify(net, req->hr_proto, flags); + if (ret) { + trace_handshake_notify_err(net, req, req->hr_sk, ret); + if (remove_pending(hn, req)) + goto out_err; + } + + /* Prevent socket release while a handshake request is pending */ + sock_hold(req->hr_sk); + + trace_handshake_submit(net, req, req->hr_sk); + return 0; + +out_unlock: + spin_unlock(&hn->hn_lock); +out_err: + trace_handshake_submit_err(net, req, req->hr_sk, ret); + handshake_req_destroy(req); + return ret; +} +EXPORT_SYMBOL(handshake_req_submit); + +void handshake_complete(struct handshake_req *req, unsigned int status, + struct genl_info *info) +{ + struct sock *sk = req->hr_sk; + struct net *net = sock_net(sk); + + if (!test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) { + trace_handshake_complete(net, req, sk, status); + req->hr_proto->hp_done(req, status, info); + + /* Handshake request is no longer pending */ + sock_put(sk); + } +} + +/** + * handshake_req_cancel - Cancel an in-progress handshake + * @sk: socket on which there is an ongoing handshake + * + * Request cancellation races with request completion. To determine + * who won, callers examine the return value from this function. + * + * Return values: + * %true - Uncompleted handshake request was canceled + * %false - Handshake request already completed or not found + */ +bool handshake_req_cancel(struct sock *sk) +{ + struct handshake_req *req; + struct handshake_net *hn; + struct net *net; + + net = sock_net(sk); + req = handshake_req_hash_lookup(sk); + if (!req) { + trace_handshake_cancel_none(net, req, sk); + return false; + } + + hn = handshake_pernet(net); + if (hn && remove_pending(hn, req)) { + /* Request hadn't been accepted */ + goto out_true; + } + if (test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) { + /* Request already completed */ + trace_handshake_cancel_busy(net, req, sk); + return false; + } + +out_true: + trace_handshake_cancel(net, req, sk); + + /* Handshake request is no longer pending */ + sock_put(sk); + return true; +} +EXPORT_SYMBOL(handshake_req_cancel); diff --git a/net/handshake/trace.c b/net/handshake/trace.c new file mode 100644 index 000000000000..1c4d8e27e17a --- /dev/null +++ b/net/handshake/trace.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Trace points for transport security layer handshakes. + * + * Author: Chuck Lever + * + * Copyright (c) 2023, Oracle and/or its affiliates. + */ + +#include + +#include +#include +#include + +#include "handshake.h" + +#define CREATE_TRACE_POINTS + +#include -- cgit v1.2.3 From 2fd5532044a89d2403b543520b4902e196f7d165 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 17 Apr 2023 10:32:33 -0400 Subject: net/handshake: Add a kernel API for requesting a TLSv1.3 handshake To enable kernel consumers of TLS to request a TLS handshake, add support to net/handshake/ to request a handshake upcall. This patch also acts as a template for adding handshake upcall support for other kernel transport layer security providers. Signed-off-by: Chuck Lever Signed-off-by: Jakub Kicinski --- Documentation/netlink/specs/handshake.yaml | 4 +- Documentation/networking/index.rst | 1 + Documentation/networking/tls-handshake.rst | 217 +++++++++++++++ MAINTAINERS | 2 + include/net/handshake.h | 43 +++ include/uapi/linux/handshake.h | 2 + net/handshake/Makefile | 2 +- net/handshake/genl.c | 3 +- net/handshake/genl.h | 1 + net/handshake/tlshd.c | 417 +++++++++++++++++++++++++++++ 10 files changed, 689 insertions(+), 3 deletions(-) create mode 100644 Documentation/networking/tls-handshake.rst create mode 100644 include/net/handshake.h create mode 100644 net/handshake/tlshd.c (limited to 'include/uapi/linux') diff --git a/Documentation/netlink/specs/handshake.yaml b/Documentation/netlink/specs/handshake.yaml index 0333d92b1438..614f1a585511 100644 --- a/Documentation/netlink/specs/handshake.yaml +++ b/Documentation/netlink/specs/handshake.yaml @@ -16,7 +16,7 @@ definitions: type: enum name: handler-class value-start: 0 - entries: [ none, max ] + entries: [ none, tlshd, max ] - type: enum name: msg-type @@ -120,3 +120,5 @@ mcast-groups: list: - name: none + - + name: tlshd diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 24bb256d6d53..a164ff074356 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -36,6 +36,7 @@ Contents: scaling tls tls-offload + tls-handshake nfc 6lowpan 6pack diff --git a/Documentation/networking/tls-handshake.rst b/Documentation/networking/tls-handshake.rst new file mode 100644 index 000000000000..a2817a88e905 --- /dev/null +++ b/Documentation/networking/tls-handshake.rst @@ -0,0 +1,217 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======================= +In-Kernel TLS Handshake +======================= + +Overview +======== + +Transport Layer Security (TLS) is a Upper Layer Protocol (ULP) that runs +over TCP. TLS provides end-to-end data integrity and confidentiality in +addition to peer authentication. + +The kernel's kTLS implementation handles the TLS record subprotocol, but +does not handle the TLS handshake subprotocol which is used to establish +a TLS session. Kernel consumers can use the API described here to +request TLS session establishment. + +There are several possible ways to provide a handshake service in the +kernel. The API described here is designed to hide the details of those +implementations so that in-kernel TLS consumers do not need to be +aware of how the handshake gets done. + + +User handshake agent +==================== + +As of this writing, there is no TLS handshake implementation in the +Linux kernel. To provide a handshake service, a handshake agent +(typically in user space) is started in each network namespace where a +kernel consumer might require a TLS handshake. Handshake agents listen +for events sent from the kernel that indicate a handshake request is +waiting. + +An open socket is passed to a handshake agent via a netlink operation, +which creates a socket descriptor in the agent's file descriptor table. +If the handshake completes successfully, the handshake agent promotes +the socket to use the TLS ULP and sets the session information using the +SOL_TLS socket options. The handshake agent returns the socket to the +kernel via a second netlink operation. + + +Kernel Handshake API +==================== + +A kernel TLS consumer initiates a client-side TLS handshake on an open +socket by invoking one of the tls_client_hello() functions. First, it +fills in a structure that contains the parameters of the request: + +.. code-block:: c + + struct tls_handshake_args { + struct socket *ta_sock; + tls_done_func_t ta_done; + void *ta_data; + unsigned int ta_timeout_ms; + key_serial_t ta_keyring; + key_serial_t ta_my_cert; + key_serial_t ta_my_privkey; + unsigned int ta_num_peerids; + key_serial_t ta_my_peerids[5]; + }; + +The @ta_sock field references an open and connected socket. The consumer +must hold a reference on the socket to prevent it from being destroyed +while the handshake is in progress. The consumer must also have +instantiated a struct file in sock->file. + + +@ta_done contains a callback function that is invoked when the handshake +has completed. Further explanation of this function is in the "Handshake +Completion" sesction below. + +The consumer can fill in the @ta_timeout_ms field to force the servicing +handshake agent to exit after a number of milliseconds. This enables the +socket to be fully closed once both the kernel and the handshake agent +have closed their endpoints. + +Authentication material such as x.509 certificates, private certificate +keys, and pre-shared keys are provided to the handshake agent in keys +that are instantiated by the consumer before making the handshake +request. The consumer can provide a private keyring that is linked into +the handshake agent's process keyring in the @ta_keyring field to prevent +access of those keys by other subsystems. + +To request an x.509-authenticated TLS session, the consumer fills in +the @ta_my_cert and @ta_my_privkey fields with the serial numbers of +keys containing an x.509 certificate and the private key for that +certificate. Then, it invokes this function: + +.. code-block:: c + + ret = tls_client_hello_x509(args, gfp_flags); + +The function returns zero when the handshake request is under way. A +zero return guarantees the callback function @ta_done will be invoked +for this socket. The function returns a negative errno if the handshake +could not be started. A negative errno guarantees the callback function +@ta_done will not be invoked on this socket. + + +To initiate a client-side TLS handshake with a pre-shared key, use: + +.. code-block:: c + + ret = tls_client_hello_psk(args, gfp_flags); + +However, in this case, the consumer fills in the @ta_my_peerids array +with serial numbers of keys containing the peer identities it wishes +to offer, and the @ta_num_peerids field with the number of array +entries it has filled in. The other fields are filled in as above. + + +To initiate an anonymous client-side TLS handshake use: + +.. code-block:: c + + ret = tls_client_hello_anon(args, gfp_flags); + +The handshake agent presents no peer identity information to the remote +during this type of handshake. Only server authentication (ie the client +verifies the server's identity) is performed during the handshake. Thus +the established session uses encryption only. + + +Consumers that are in-kernel servers use: + +.. code-block:: c + + ret = tls_server_hello_x509(args, gfp_flags); + +or + +.. code-block:: c + + ret = tls_server_hello_psk(args, gfp_flags); + +The argument structure is filled in as above. + + +If the consumer needs to cancel the handshake request, say, due to a ^C +or other exigent event, the consumer can invoke: + +.. code-block:: c + + bool tls_handshake_cancel(sock); + +This function returns true if the handshake request associated with +@sock has been canceled. The consumer's handshake completion callback +will not be invoked. If this function returns false, then the consumer's +completion callback has already been invoked. + + +Handshake Completion +==================== + +When the handshake agent has completed processing, it notifies the +kernel that the socket may be used by the consumer again. At this point, +the consumer's handshake completion callback, provided in the @ta_done +field in the tls_handshake_args structure, is invoked. + +The synopsis of this function is: + +.. code-block:: c + + typedef void (*tls_done_func_t)(void *data, int status, + key_serial_t peerid); + +The consumer provides a cookie in the @ta_data field of the +tls_handshake_args structure that is returned in the @data parameter of +this callback. The consumer uses the cookie to match the callback to the +thread waiting for the handshake to complete. + +The success status of the handshake is returned via the @status +parameter: + ++------------+----------------------------------------------+ +| status | meaning | ++============+==============================================+ +| 0 | TLS session established successfully | ++------------+----------------------------------------------+ +| -EACCESS | Remote peer rejected the handshake or | +| | authentication failed | ++------------+----------------------------------------------+ +| -ENOMEM | Temporary resource allocation failure | ++------------+----------------------------------------------+ +| -EINVAL | Consumer provided an invalid argument | ++------------+----------------------------------------------+ +| -ENOKEY | Missing authentication material | ++------------+----------------------------------------------+ +| -EIO | An unexpected fault occurred | ++------------+----------------------------------------------+ + +The @peerid parameter contains the serial number of a key containing the +remote peer's identity or the value TLS_NO_PEERID if the session is not +authenticated. + +A best practice is to close and destroy the socket immediately if the +handshake failed. + + +Other considerations +-------------------- + +While a handshake is under way, the kernel consumer must alter the +socket's sk_data_ready callback function to ignore all incoming data. +Once the handshake completion callback function has been invoked, normal +receive operation can be resumed. + +Once a TLS session is established, the consumer must provide a buffer +for and then examine the control message (CMSG) that is part of every +subsequent sock_recvmsg(). Each control message indicates whether the +received message data is TLS record data or session metadata. + +See tls.rst for details on how a kTLS consumer recognizes incoming +(decrypted) application data, alerts, and handshake packets once the +socket has been promoted to use the TLS ULP. diff --git a/MAINTAINERS b/MAINTAINERS index cdc7748d15b8..04ebde8ccb75 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8953,6 +8953,8 @@ L: kernel-tls-handshake@lists.linux.dev L: netdev@vger.kernel.org S: Maintained F: Documentation/netlink/specs/handshake.yaml +F: Documentation/networking/tls-handshake.rst +F: include/net/handshake.h F: include/trace/events/handshake.h F: net/handshake/ diff --git a/include/net/handshake.h b/include/net/handshake.h new file mode 100644 index 000000000000..3352b1ab43b3 --- /dev/null +++ b/include/net/handshake.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Generic netlink HANDSHAKE service. + * + * Author: Chuck Lever + * + * Copyright (c) 2023, Oracle and/or its affiliates. + */ + +#ifndef _NET_HANDSHAKE_H +#define _NET_HANDSHAKE_H + +enum { + TLS_NO_KEYRING = 0, + TLS_NO_PEERID = 0, + TLS_NO_CERT = 0, + TLS_NO_PRIVKEY = 0, +}; + +typedef void (*tls_done_func_t)(void *data, int status, + key_serial_t peerid); + +struct tls_handshake_args { + struct socket *ta_sock; + tls_done_func_t ta_done; + void *ta_data; + unsigned int ta_timeout_ms; + key_serial_t ta_keyring; + key_serial_t ta_my_cert; + key_serial_t ta_my_privkey; + unsigned int ta_num_peerids; + key_serial_t ta_my_peerids[5]; +}; + +int tls_client_hello_anon(const struct tls_handshake_args *args, gfp_t flags); +int tls_client_hello_x509(const struct tls_handshake_args *args, gfp_t flags); +int tls_client_hello_psk(const struct tls_handshake_args *args, gfp_t flags); +int tls_server_hello_x509(const struct tls_handshake_args *args, gfp_t flags); +int tls_server_hello_psk(const struct tls_handshake_args *args, gfp_t flags); + +bool tls_handshake_cancel(struct sock *sk); + +#endif /* _NET_HANDSHAKE_H */ diff --git a/include/uapi/linux/handshake.h b/include/uapi/linux/handshake.h index 7f66ff489b87..1de4d0b95325 100644 --- a/include/uapi/linux/handshake.h +++ b/include/uapi/linux/handshake.h @@ -11,6 +11,7 @@ enum handshake_handler_class { HANDSHAKE_HANDLER_CLASS_NONE, + HANDSHAKE_HANDLER_CLASS_TLSHD, HANDSHAKE_HANDLER_CLASS_MAX, }; @@ -67,5 +68,6 @@ enum { }; #define HANDSHAKE_MCGRP_NONE "none" +#define HANDSHAKE_MCGRP_TLSHD "tlshd" #endif /* _UAPI_LINUX_HANDSHAKE_H */ diff --git a/net/handshake/Makefile b/net/handshake/Makefile index d38736de45da..a089f7e3df24 100644 --- a/net/handshake/Makefile +++ b/net/handshake/Makefile @@ -8,4 +8,4 @@ # obj-y += handshake.o -handshake-y := genl.o netlink.o request.o trace.o +handshake-y := genl.o netlink.o request.o tlshd.o trace.o diff --git a/net/handshake/genl.c b/net/handshake/genl.c index 652f37d19bd6..9f29efb1493e 100644 --- a/net/handshake/genl.c +++ b/net/handshake/genl.c @@ -12,7 +12,7 @@ /* HANDSHAKE_CMD_ACCEPT - do */ static const struct nla_policy handshake_accept_nl_policy[HANDSHAKE_A_ACCEPT_HANDLER_CLASS + 1] = { - [HANDSHAKE_A_ACCEPT_HANDLER_CLASS] = NLA_POLICY_MAX(NLA_U32, 1), + [HANDSHAKE_A_ACCEPT_HANDLER_CLASS] = NLA_POLICY_MAX(NLA_U32, 2), }; /* HANDSHAKE_CMD_DONE - do */ @@ -42,6 +42,7 @@ static const struct genl_split_ops handshake_nl_ops[] = { static const struct genl_multicast_group handshake_nl_mcgrps[] = { [HANDSHAKE_NLGRP_NONE] = { "none", }, + [HANDSHAKE_NLGRP_TLSHD] = { "tlshd", }, }; struct genl_family handshake_nl_family __ro_after_init = { diff --git a/net/handshake/genl.h b/net/handshake/genl.h index a1eb7ccccc7f..2c1f1aa6a02a 100644 --- a/net/handshake/genl.h +++ b/net/handshake/genl.h @@ -16,6 +16,7 @@ int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info); enum { HANDSHAKE_NLGRP_NONE, + HANDSHAKE_NLGRP_TLSHD, }; extern struct genl_family handshake_nl_family; diff --git a/net/handshake/tlshd.c b/net/handshake/tlshd.c new file mode 100644 index 000000000000..1b8353296060 --- /dev/null +++ b/net/handshake/tlshd.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Establish a TLS session for a kernel socket consumer + * using the tlshd user space handler. + * + * Author: Chuck Lever + * + * Copyright (c) 2021-2023, Oracle and/or its affiliates. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include "handshake.h" + +struct tls_handshake_req { + void (*th_consumer_done)(void *data, int status, + key_serial_t peerid); + void *th_consumer_data; + + int th_type; + unsigned int th_timeout_ms; + int th_auth_mode; + key_serial_t th_keyring; + key_serial_t th_certificate; + key_serial_t th_privkey; + + unsigned int th_num_peerids; + key_serial_t th_peerid[5]; +}; + +static struct tls_handshake_req * +tls_handshake_req_init(struct handshake_req *req, + const struct tls_handshake_args *args) +{ + struct tls_handshake_req *treq = handshake_req_private(req); + + treq->th_timeout_ms = args->ta_timeout_ms; + treq->th_consumer_done = args->ta_done; + treq->th_consumer_data = args->ta_data; + treq->th_keyring = args->ta_keyring; + treq->th_num_peerids = 0; + treq->th_certificate = TLS_NO_CERT; + treq->th_privkey = TLS_NO_PRIVKEY; + return treq; +} + +static void tls_handshake_remote_peerids(struct tls_handshake_req *treq, + struct genl_info *info) +{ + struct nlattr *head = nlmsg_attrdata(info->nlhdr, GENL_HDRLEN); + int rem, len = nlmsg_attrlen(info->nlhdr, GENL_HDRLEN); + struct nlattr *nla; + unsigned int i; + + i = 0; + nla_for_each_attr(nla, head, len, rem) { + if (nla_type(nla) == HANDSHAKE_A_DONE_REMOTE_AUTH) + i++; + } + if (!i) + return; + treq->th_num_peerids = min_t(unsigned int, i, + ARRAY_SIZE(treq->th_peerid)); + + i = 0; + nla_for_each_attr(nla, head, len, rem) { + if (nla_type(nla) == HANDSHAKE_A_DONE_REMOTE_AUTH) + treq->th_peerid[i++] = nla_get_u32(nla); + if (i >= treq->th_num_peerids) + break; + } +} + +/** + * tls_handshake_done - callback to handle a CMD_DONE request + * @req: socket on which the handshake was performed + * @status: session status code + * @info: full results of session establishment + * + */ +static void tls_handshake_done(struct handshake_req *req, + unsigned int status, struct genl_info *info) +{ + struct tls_handshake_req *treq = handshake_req_private(req); + + treq->th_peerid[0] = TLS_NO_PEERID; + if (info) + tls_handshake_remote_peerids(treq, info); + + treq->th_consumer_done(treq->th_consumer_data, -status, + treq->th_peerid[0]); +} + +#if IS_ENABLED(CONFIG_KEYS) +static int tls_handshake_private_keyring(struct tls_handshake_req *treq) +{ + key_ref_t process_keyring_ref, keyring_ref; + int ret; + + if (treq->th_keyring == TLS_NO_KEYRING) + return 0; + + process_keyring_ref = lookup_user_key(KEY_SPEC_PROCESS_KEYRING, + KEY_LOOKUP_CREATE, + KEY_NEED_WRITE); + if (IS_ERR(process_keyring_ref)) { + ret = PTR_ERR(process_keyring_ref); + goto out; + } + + keyring_ref = lookup_user_key(treq->th_keyring, KEY_LOOKUP_CREATE, + KEY_NEED_LINK); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); + goto out_put_key; + } + + ret = key_link(key_ref_to_ptr(process_keyring_ref), + key_ref_to_ptr(keyring_ref)); + + key_ref_put(keyring_ref); +out_put_key: + key_ref_put(process_keyring_ref); +out: + return ret; +} +#else +static int tls_handshake_private_keyring(struct tls_handshake_req *treq) +{ + return 0; +} +#endif + +static int tls_handshake_put_peer_identity(struct sk_buff *msg, + struct tls_handshake_req *treq) +{ + unsigned int i; + + for (i = 0; i < treq->th_num_peerids; i++) + if (nla_put_u32(msg, HANDSHAKE_A_ACCEPT_PEER_IDENTITY, + treq->th_peerid[i]) < 0) + return -EMSGSIZE; + return 0; +} + +static int tls_handshake_put_certificate(struct sk_buff *msg, + struct tls_handshake_req *treq) +{ + struct nlattr *entry_attr; + + if (treq->th_certificate == TLS_NO_CERT && + treq->th_privkey == TLS_NO_PRIVKEY) + return 0; + + entry_attr = nla_nest_start(msg, HANDSHAKE_A_ACCEPT_CERTIFICATE); + if (!entry_attr) + return -EMSGSIZE; + + if (nla_put_u32(msg, HANDSHAKE_A_X509_CERT, + treq->th_certificate) || + nla_put_u32(msg, HANDSHAKE_A_X509_PRIVKEY, + treq->th_privkey)) { + nla_nest_cancel(msg, entry_attr); + return -EMSGSIZE; + } + + nla_nest_end(msg, entry_attr); + return 0; +} + +/** + * tls_handshake_accept - callback to construct a CMD_ACCEPT response + * @req: handshake parameters to return + * @info: generic netlink message context + * @fd: file descriptor to be returned + * + * Returns zero on success, or a negative errno on failure. + */ +static int tls_handshake_accept(struct handshake_req *req, + struct genl_info *info, int fd) +{ + struct tls_handshake_req *treq = handshake_req_private(req); + struct nlmsghdr *hdr; + struct sk_buff *msg; + int ret; + + ret = tls_handshake_private_keyring(treq); + if (ret < 0) + goto out; + + ret = -ENOMEM; + msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + goto out; + hdr = handshake_genl_put(msg, info); + if (!hdr) + goto out_cancel; + + ret = -EMSGSIZE; + ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_SOCKFD, fd); + if (ret < 0) + goto out_cancel; + ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_MESSAGE_TYPE, treq->th_type); + if (ret < 0) + goto out_cancel; + if (treq->th_timeout_ms) { + ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_TIMEOUT, treq->th_timeout_ms); + if (ret < 0) + goto out_cancel; + } + + ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_AUTH_MODE, + treq->th_auth_mode); + if (ret < 0) + goto out_cancel; + switch (treq->th_auth_mode) { + case HANDSHAKE_AUTH_PSK: + ret = tls_handshake_put_peer_identity(msg, treq); + if (ret < 0) + goto out_cancel; + break; + case HANDSHAKE_AUTH_X509: + ret = tls_handshake_put_certificate(msg, treq); + if (ret < 0) + goto out_cancel; + break; + } + + genlmsg_end(msg, hdr); + return genlmsg_reply(msg, info); + +out_cancel: + genlmsg_cancel(msg, hdr); +out: + return ret; +} + +static const struct handshake_proto tls_handshake_proto = { + .hp_handler_class = HANDSHAKE_HANDLER_CLASS_TLSHD, + .hp_privsize = sizeof(struct tls_handshake_req), + + .hp_accept = tls_handshake_accept, + .hp_done = tls_handshake_done, +}; + +/** + * tls_client_hello_anon - request an anonymous TLS handshake on a socket + * @args: socket and handshake parameters for this request + * @flags: memory allocation control flags + * + * Return values: + * %0: Handshake request enqueue; ->done will be called when complete + * %-ESRCH: No user agent is available + * %-ENOMEM: Memory allocation failed + */ +int tls_client_hello_anon(const struct tls_handshake_args *args, gfp_t flags) +{ + struct tls_handshake_req *treq; + struct handshake_req *req; + + req = handshake_req_alloc(&tls_handshake_proto, flags); + if (!req) + return -ENOMEM; + treq = tls_handshake_req_init(req, args); + treq->th_type = HANDSHAKE_MSG_TYPE_CLIENTHELLO; + treq->th_auth_mode = HANDSHAKE_AUTH_UNAUTH; + + return handshake_req_submit(args->ta_sock, req, flags); +} +EXPORT_SYMBOL(tls_client_hello_anon); + +/** + * tls_client_hello_x509 - request an x.509-based TLS handshake on a socket + * @args: socket and handshake parameters for this request + * @flags: memory allocation control flags + * + * Return values: + * %0: Handshake request enqueue; ->done will be called when complete + * %-ESRCH: No user agent is available + * %-ENOMEM: Memory allocation failed + */ +int tls_client_hello_x509(const struct tls_handshake_args *args, gfp_t flags) +{ + struct tls_handshake_req *treq; + struct handshake_req *req; + + req = handshake_req_alloc(&tls_handshake_proto, flags); + if (!req) + return -ENOMEM; + treq = tls_handshake_req_init(req, args); + treq->th_type = HANDSHAKE_MSG_TYPE_CLIENTHELLO; + treq->th_auth_mode = HANDSHAKE_AUTH_X509; + treq->th_certificate = args->ta_my_cert; + treq->th_privkey = args->ta_my_privkey; + + return handshake_req_submit(args->ta_sock, req, flags); +} +EXPORT_SYMBOL(tls_client_hello_x509); + +/** + * tls_client_hello_psk - request a PSK-based TLS handshake on a socket + * @args: socket and handshake parameters for this request + * @flags: memory allocation control flags + * + * Return values: + * %0: Handshake request enqueue; ->done will be called when complete + * %-EINVAL: Wrong number of local peer IDs + * %-ESRCH: No user agent is available + * %-ENOMEM: Memory allocation failed + */ +int tls_client_hello_psk(const struct tls_handshake_args *args, gfp_t flags) +{ + struct tls_handshake_req *treq; + struct handshake_req *req; + unsigned int i; + + if (!args->ta_num_peerids || + args->ta_num_peerids > ARRAY_SIZE(treq->th_peerid)) + return -EINVAL; + + req = handshake_req_alloc(&tls_handshake_proto, flags); + if (!req) + return -ENOMEM; + treq = tls_handshake_req_init(req, args); + treq->th_type = HANDSHAKE_MSG_TYPE_CLIENTHELLO; + treq->th_auth_mode = HANDSHAKE_AUTH_PSK; + treq->th_num_peerids = args->ta_num_peerids; + for (i = 0; i < args->ta_num_peerids; i++) + treq->th_peerid[i] = args->ta_my_peerids[i]; + + return handshake_req_submit(args->ta_sock, req, flags); +} +EXPORT_SYMBOL(tls_client_hello_psk); + +/** + * tls_server_hello_x509 - request a server TLS handshake on a socket + * @args: socket and handshake parameters for this request + * @flags: memory allocation control flags + * + * Return values: + * %0: Handshake request enqueue; ->done will be called when complete + * %-ESRCH: No user agent is available + * %-ENOMEM: Memory allocation failed + */ +int tls_server_hello_x509(const struct tls_handshake_args *args, gfp_t flags) +{ + struct tls_handshake_req *treq; + struct handshake_req *req; + + req = handshake_req_alloc(&tls_handshake_proto, flags); + if (!req) + return -ENOMEM; + treq = tls_handshake_req_init(req, args); + treq->th_type = HANDSHAKE_MSG_TYPE_SERVERHELLO; + treq->th_auth_mode = HANDSHAKE_AUTH_X509; + treq->th_certificate = args->ta_my_cert; + treq->th_privkey = args->ta_my_privkey; + + return handshake_req_submit(args->ta_sock, req, flags); +} +EXPORT_SYMBOL(tls_server_hello_x509); + +/** + * tls_server_hello_psk - request a server TLS handshake on a socket + * @args: socket and handshake parameters for this request + * @flags: memory allocation control flags + * + * Return values: + * %0: Handshake request enqueue; ->done will be called when complete + * %-ESRCH: No user agent is available + * %-ENOMEM: Memory allocation failed + */ +int tls_server_hello_psk(const struct tls_handshake_args *args, gfp_t flags) +{ + struct tls_handshake_req *treq; + struct handshake_req *req; + + req = handshake_req_alloc(&tls_handshake_proto, flags); + if (!req) + return -ENOMEM; + treq = tls_handshake_req_init(req, args); + treq->th_type = HANDSHAKE_MSG_TYPE_SERVERHELLO; + treq->th_auth_mode = HANDSHAKE_AUTH_PSK; + treq->th_num_peerids = 1; + treq->th_peerid[0] = args->ta_my_peerids[0]; + + return handshake_req_submit(args->ta_sock, req, flags); +} +EXPORT_SYMBOL(tls_server_hello_psk); + +/** + * tls_handshake_cancel - cancel a pending handshake + * @sk: socket on which there is an ongoing handshake + * + * Request cancellation races with request completion. To determine + * who won, callers examine the return value from this function. + * + * Return values: + * %true - Uncompleted handshake request was canceled + * %false - Handshake request already completed or not found + */ +bool tls_handshake_cancel(struct sock *sk) +{ + return handshake_req_cancel(sk); +} +EXPORT_SYMBOL(tls_handshake_cancel); -- cgit v1.2.3 From 83f6d600796c65ab34b08dbddb5795099dfda4d1 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 19 Apr 2023 18:34:58 +0300 Subject: bridge: vlan: Allow setting VLAN neighbor suppression state Add a new VLAN attribute that allows user space to set the neighbor suppression state of the port VLAN. Example: # bridge -d -j -p vlan show dev swp1 vid 10 | jq '.[]["vlans"][]["neigh_suppress"]' false # bridge vlan set vid 10 dev swp1 neigh_suppress on # bridge -d -j -p vlan show dev swp1 vid 10 | jq '.[]["vlans"][]["neigh_suppress"]' true # bridge vlan set vid 10 dev swp1 neigh_suppress off # bridge -d -j -p vlan show dev swp1 vid 10 | jq '.[]["vlans"][]["neigh_suppress"]' false # bridge vlan set vid 10 dev br0 neigh_suppress on Error: bridge: Can't set neigh_suppress for non-port vlans. Signed-off-by: Ido Schimmel Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_bridge.h | 1 + net/bridge/br_vlan.c | 1 + net/bridge/br_vlan_options.c | 20 +++++++++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index c9d624f528c5..f95326fce6bb 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -525,6 +525,7 @@ enum { BRIDGE_VLANDB_ENTRY_MCAST_ROUTER, BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS, BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS, + BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS, __BRIDGE_VLANDB_ENTRY_MAX, }; #define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1) diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 8a3dbc09ba38..15f44d026e75 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -2134,6 +2134,7 @@ static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = [BRIDGE_VLANDB_ENTRY_MCAST_ROUTER] = { .type = NLA_U8 }, [BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS] = { .type = NLA_REJECT }, [BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS] = { .type = NLA_U32 }, + [BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS] = NLA_POLICY_MAX(NLA_U8, 1), }; static int br_vlan_rtm_process_one(struct net_device *dev, diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c index e378c2f3a9e2..8fa89b04ee94 100644 --- a/net/bridge/br_vlan_options.c +++ b/net/bridge/br_vlan_options.c @@ -52,7 +52,9 @@ bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v, const struct net_bridge_port *p) { if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, br_vlan_get_state(v)) || - !__vlan_tun_put(skb, v)) + !__vlan_tun_put(skb, v) || + nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS, + !!(v->priv_flags & BR_VLFLAG_NEIGH_SUPPRESS_ENABLED))) return false; #ifdef CONFIG_BRIDGE_IGMP_SNOOPING @@ -80,6 +82,7 @@ size_t br_vlan_opts_nl_size(void) + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS */ + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS */ #endif + + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS */ + 0; } @@ -239,6 +242,21 @@ static int br_vlan_process_one_opts(const struct net_bridge *br, } #endif + if (tb[BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS]) { + bool enabled = v->priv_flags & BR_VLFLAG_NEIGH_SUPPRESS_ENABLED; + bool val = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS]); + + if (!p) { + NL_SET_ERR_MSG_MOD(extack, "Can't set neigh_suppress for non-port vlans"); + return -EINVAL; + } + + if (val != enabled) { + v->priv_flags ^= BR_VLFLAG_NEIGH_SUPPRESS_ENABLED; + *changed = true; + } + } + return 0; } -- cgit v1.2.3 From 160656d7201d861a1f2a0bf279a765e8cda2317a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 19 Apr 2023 18:34:59 +0300 Subject: bridge: Allow setting per-{Port, VLAN} neighbor suppression state Add a new bridge port attribute that allows user space to enable per-{Port, VLAN} neighbor suppression. Example: # bridge -d -j -p link show dev swp1 | jq '.[]["neigh_vlan_suppress"]' false # bridge link set dev swp1 neigh_vlan_suppress on # bridge -d -j -p link show dev swp1 | jq '.[]["neigh_vlan_suppress"]' true # bridge link set dev swp1 neigh_vlan_suppress off # bridge -d -j -p link show dev swp1 | jq '.[]["neigh_vlan_suppress"]' false Signed-off-by: Ido Schimmel Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 1 + net/bridge/br_netlink.c | 8 +++++++- net/core/rtnetlink.c | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 8d679688efe0..4ac1000b0ef2 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -569,6 +569,7 @@ enum { IFLA_BRPORT_MAB, IFLA_BRPORT_MCAST_N_GROUPS, IFLA_BRPORT_MCAST_MAX_GROUPS, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index fefb1c0e248b..05c5863d2e20 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -189,6 +189,7 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */ + nla_total_size(1) /* IFLA_BRPORT_LOCKED */ + nla_total_size(1) /* IFLA_BRPORT_MAB */ + + nla_total_size(1) /* IFLA_BRPORT_NEIGH_VLAN_SUPPRESS */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ @@ -278,7 +279,9 @@ static int br_port_fill_attrs(struct sk_buff *skb, !!(p->flags & BR_MRP_LOST_IN_CONT)) || nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) || nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED)) || - nla_put_u8(skb, IFLA_BRPORT_MAB, !!(p->flags & BR_PORT_MAB))) + nla_put_u8(skb, IFLA_BRPORT_MAB, !!(p->flags & BR_PORT_MAB)) || + nla_put_u8(skb, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, + !!(p->flags & BR_NEIGH_VLAN_SUPPRESS))) return -EMSGSIZE; timerval = br_timer_value(&p->message_age_timer); @@ -891,6 +894,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 }, [IFLA_BRPORT_MCAST_N_GROUPS] = { .type = NLA_REJECT }, [IFLA_BRPORT_MCAST_MAX_GROUPS] = { .type = NLA_U32 }, + [IFLA_BRPORT_NEIGH_VLAN_SUPPRESS] = NLA_POLICY_MAX(NLA_U8, 1), }; /* Change the state of the port and notify spanning tree */ @@ -957,6 +961,8 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[], br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED); br_set_port_flag(p, tb, IFLA_BRPORT_LOCKED, BR_PORT_LOCKED); br_set_port_flag(p, tb, IFLA_BRPORT_MAB, BR_PORT_MAB); + br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, + BR_NEIGH_VLAN_SUPPRESS); if ((p->flags & BR_PORT_MAB) && (!(p->flags & BR_PORT_LOCKED) || !(p->flags & BR_LEARNING))) { diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e844d75220fb..653901a1bf75 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -61,7 +61,7 @@ #include "dev.h" #define RTNL_MAX_TYPE 50 -#define RTNL_SLAVE_MAX_TYPE 42 +#define RTNL_SLAVE_MAX_TYPE 43 struct rtnl_link { rtnl_doit_func doit; -- cgit v1.2.3 From dfc39d4026fb2432363c0f77543c4cf3adca4c7b Mon Sep 17 00:00:00 2001 From: Jianfeng Tan Date: Wed, 19 Apr 2023 15:24:16 +0800 Subject: net/packet: support mergeable feature of virtio Packet sockets, like tap, can be used as the backend for kernel vhost. In packet sockets, virtio net header size is currently hardcoded to be the size of struct virtio_net_hdr, which is 10 bytes; however, it is not always the case: some virtio features, such as mrg_rxbuf, need virtio net header to be 12-byte long. Mergeable buffers, as a virtio feature, is worthy of supporting: packets that are larger than one-mbuf size will be dropped in vhost worker's handle_rx if mrg_rxbuf feature is not used, but large packets cannot be avoided and increasing mbuf's size is not economical. With this virtio feature enabled by virtio-user, packet sockets with hardcoded 10-byte virtio net header will parse mac head incorrectly in packet_snd by taking the last two bytes of virtio net header as part of mac header. This incorrect mac header parsing will cause packet to be dropped due to invalid ether head checking in later under-layer device packet receiving. By adding extra field vnet_hdr_sz with utilizing holes in struct packet_sock to record currently used virtio net header size and supporting extra sockopt PACKET_VNET_HDR_SZ to set specified vnet_hdr_sz, packet sockets can know the exact length of virtio net header that virtio user gives. In packet_snd, tpacket_snd and packet_recvmsg, instead of using hardcoded virtio net header size, it can get the exact vnet_hdr_sz from corresponding packet_sock, and parse mac header correctly based on this information to avoid the packets being mistakenly dropped. Signed-off-by: Jianfeng Tan Co-developed-by: Anqi Shen Signed-off-by: Anqi Shen Reviewed-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/uapi/linux/if_packet.h | 1 + net/packet/af_packet.c | 95 +++++++++++++++++++++++++----------------- net/packet/diag.c | 2 +- net/packet/internal.h | 2 +- 4 files changed, 60 insertions(+), 40 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h index 78c981d6a9d4..9efc42382fdb 100644 --- a/include/uapi/linux/if_packet.h +++ b/include/uapi/linux/if_packet.h @@ -59,6 +59,7 @@ struct sockaddr_ll { #define PACKET_ROLLOVER_STATS 21 #define PACKET_FANOUT_DATA 22 #define PACKET_IGNORE_OUTGOING 23 +#define PACKET_VNET_HDR_SZ 24 #define PACKET_FANOUT_HASH 0 #define PACKET_FANOUT_LB 1 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 568f8d76e3c1..6080c0db0814 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2090,18 +2090,18 @@ static unsigned int run_filter(struct sk_buff *skb, } static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, - size_t *len) + size_t *len, int vnet_hdr_sz) { - struct virtio_net_hdr vnet_hdr; + struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 }; - if (*len < sizeof(vnet_hdr)) + if (*len < vnet_hdr_sz) return -EINVAL; - *len -= sizeof(vnet_hdr); + *len -= vnet_hdr_sz; - if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0)) + if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0)) return -EINVAL; - return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); + return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz); } /* @@ -2250,7 +2250,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, __u32 ts_status; bool is_drop_n_account = false; unsigned int slot_id = 0; - bool do_vnet = false; + int vnet_hdr_sz = 0; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. * We may add members to them until current aligned size without forcing @@ -2308,10 +2308,9 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; - if (packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR)) { - netoff += sizeof(struct virtio_net_hdr); - do_vnet = true; - } + vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); + if (vnet_hdr_sz) + netoff += vnet_hdr_sz; macoff = netoff - maclen; } if (netoff > USHRT_MAX) { @@ -2337,7 +2336,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) { snaplen = 0; - do_vnet = false; + vnet_hdr_sz = 0; } } } else if (unlikely(macoff + snaplen > @@ -2351,7 +2350,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (unlikely((int)snaplen < 0)) { snaplen = 0; macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; - do_vnet = false; + vnet_hdr_sz = 0; } } spin_lock(&sk->sk_receive_queue.lock); @@ -2367,7 +2366,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, __set_bit(slot_id, po->rx_ring.rx_owner_map); } - if (do_vnet && + if (vnet_hdr_sz && virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), vio_le(), true, 0)) { @@ -2551,16 +2550,26 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) } static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, - struct virtio_net_hdr *vnet_hdr) + struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz) { - if (*len < sizeof(*vnet_hdr)) + int ret; + + if (*len < vnet_hdr_sz) return -EINVAL; - *len -= sizeof(*vnet_hdr); + *len -= vnet_hdr_sz; if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) return -EFAULT; - return __packet_snd_vnet_parse(vnet_hdr, *len); + ret = __packet_snd_vnet_parse(vnet_hdr, *len); + if (ret) + return ret; + + /* move iter to point to the start of mac header */ + if (vnet_hdr_sz != sizeof(struct virtio_net_hdr)) + iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr)); + + return 0; } static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, @@ -2722,6 +2731,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) void *ph; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); + int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); unsigned char *addr = NULL; int tp_len, size_max; void *data; @@ -2779,8 +2789,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) size_max = po->tx_ring.frame_size - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); - if ((size_max > dev->mtu + reserve + VLAN_HLEN) && - !packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR)) + if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz) size_max = dev->mtu + reserve + VLAN_HLEN; reinit_completion(&po->skb_completion); @@ -2809,10 +2818,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) status = TP_STATUS_SEND_REQUEST; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; - if (packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR)) { + if (vnet_hdr_sz) { vnet_hdr = data; - data += sizeof(*vnet_hdr); - tp_len -= sizeof(*vnet_hdr); + data += vnet_hdr_sz; + tp_len -= vnet_hdr_sz; if (tp_len < 0 || __packet_snd_vnet_parse(vnet_hdr, tp_len)) { tp_len = -EINVAL; @@ -2837,7 +2846,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) addr, hlen, copylen, &sockc); if (likely(tp_len >= 0) && tp_len > dev->mtu + reserve && - !packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR) && + !vnet_hdr_sz && !packet_extra_vlan_len_allowed(dev, skb)) tp_len = -EMSGSIZE; @@ -2856,7 +2865,7 @@ tpacket_error: } } - if (packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR)) { + if (vnet_hdr_sz) { if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { tp_len = -EINVAL; goto tpacket_error; @@ -2946,7 +2955,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) struct virtio_net_hdr vnet_hdr = { 0 }; int offset = 0; struct packet_sock *po = pkt_sk(sk); - bool has_vnet_hdr = false; + int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); int hlen, tlen, linear; int extra_len = 0; @@ -2990,11 +2999,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (sock->type == SOCK_RAW) reserve = dev->hard_header_len; - if (packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR)) { - err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); + if (vnet_hdr_sz) { + err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz); if (err) goto out_unlock; - has_vnet_hdr = true; } if (unlikely(sock_flag(sk, SOCK_NOFCS))) { @@ -3064,11 +3072,11 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) packet_parse_headers(skb, sock); - if (has_vnet_hdr) { + if (vnet_hdr_sz) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); if (err) goto out_free; - len += sizeof(vnet_hdr); + len += vnet_hdr_sz; virtio_net_hdr_set_proto(skb, &vnet_hdr); } @@ -3408,7 +3416,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; - int vnet_hdr_len = 0; + int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz); unsigned int origlen = 0; err = -EINVAL; @@ -3449,11 +3457,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, packet_rcv_try_clear_pressure(pkt_sk(sk)); - if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_HAS_VNET_HDR)) { - err = packet_rcv_vnet(msg, skb, &len); + if (vnet_hdr_len) { + err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len); if (err) goto out_free; - vnet_hdr_len = sizeof(struct virtio_net_hdr); } /* You lose any data beyond the buffer you gave. If it worries @@ -3915,8 +3922,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, return 0; } case PACKET_VNET_HDR: + case PACKET_VNET_HDR_SZ: { - int val; + int val, hdr_len; if (sock->type != SOCK_RAW) return -EINVAL; @@ -3925,11 +3933,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; + if (optname == PACKET_VNET_HDR_SZ) { + if (val && val != sizeof(struct virtio_net_hdr) && + val != sizeof(struct virtio_net_hdr_mrg_rxbuf)) + return -EINVAL; + hdr_len = val; + } else { + hdr_len = val ? sizeof(struct virtio_net_hdr) : 0; + } lock_sock(sk); if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ret = -EBUSY; } else { - packet_sock_flag_set(po, PACKET_SOCK_HAS_VNET_HDR, val); + WRITE_ONCE(po->vnet_hdr_sz, hdr_len); ret = 0; } release_sock(sk); @@ -4062,7 +4078,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV); break; case PACKET_VNET_HDR: - val = packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR); + val = !!READ_ONCE(po->vnet_hdr_sz); + break; + case PACKET_VNET_HDR_SZ: + val = READ_ONCE(po->vnet_hdr_sz); break; case PACKET_VERSION: val = po->tp_version; diff --git a/net/packet/diag.c b/net/packet/diag.c index de4ced5cf3e8..d0c4eda4cdc6 100644 --- a/net/packet/diag.c +++ b/net/packet/diag.c @@ -27,7 +27,7 @@ static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb) pinfo.pdi_flags |= PDI_AUXDATA; if (packet_sock_flag(po, PACKET_SOCK_ORIGDEV)) pinfo.pdi_flags |= PDI_ORIGDEV; - if (packet_sock_flag(po, PACKET_SOCK_HAS_VNET_HDR)) + if (READ_ONCE(po->vnet_hdr_sz)) pinfo.pdi_flags |= PDI_VNETHDR; if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) pinfo.pdi_flags |= PDI_LOSS; diff --git a/net/packet/internal.h b/net/packet/internal.h index 27930f69f368..63f4865202c1 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -118,6 +118,7 @@ struct packet_sock { struct mutex pg_vec_lock; unsigned long flags; int ifindex; /* bound device */ + u8 vnet_hdr_sz; __be16 num; struct packet_rollover *rollover; struct packet_mclist *mclist; @@ -139,7 +140,6 @@ enum packet_sock_flags { PACKET_SOCK_AUXDATA, PACKET_SOCK_TX_HAS_OFF, PACKET_SOCK_TP_LOSS, - PACKET_SOCK_HAS_VNET_HDR, PACKET_SOCK_RUNNING, PACKET_SOCK_PRESSURE, PACKET_SOCK_QDISC_BYPASS, -- cgit v1.2.3 From 84601d6ee68ae820dec97450934797046d62db4b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 21 Apr 2023 19:02:54 +0200 Subject: bpf: add bpf_link support for BPF_NETFILTER programs Add bpf_link support skeleton. To keep this reviewable, no bpf program can be invoked yet, if a program is attached only a c-stub is called and not the actual bpf program. Defaults to 'y' if both netfilter and bpf syscall are enabled in kconfig. Uapi example usage: union bpf_attr attr = { }; attr.link_create.prog_fd = progfd; attr.link_create.attach_type = 0; /* unused */ attr.link_create.netfilter.pf = PF_INET; attr.link_create.netfilter.hooknum = NF_INET_LOCAL_IN; attr.link_create.netfilter.priority = -128; err = bpf(BPF_LINK_CREATE, &attr, sizeof(attr)); ... this would attach progfd to ipv4:input hook. Such hook gets removed automatically if the calling program exits. BPF_NETFILTER program invocation is added in followup change. NF_HOOK_OP_BPF enum will eventually be read from nfnetlink_hook, it allows to tell userspace which program is attached at the given hook when user runs 'nft hook list' command rather than just the priority and not-very-helpful 'this hook runs a bpf prog but I can't tell which one'. Will also be used to disallow registration of two bpf programs with same priority in a followup patch. v4: arm32 cmpxchg only supports 32bit operand s/prio/priority/ v3: restrict prog attachment to ip/ip6 for now, lets lift restrictions if more use cases pop up (arptables, ebtables, netdev ingress/egress etc). Signed-off-by: Florian Westphal Link: https://lore.kernel.org/r/20230421170300.24115-2-fw@strlen.de Signed-off-by: Alexei Starovoitov --- include/linux/netfilter.h | 1 + include/net/netfilter/nf_bpf_link.h | 10 +++ include/uapi/linux/bpf.h | 14 ++++ kernel/bpf/syscall.c | 6 ++ net/netfilter/Kconfig | 3 + net/netfilter/Makefile | 1 + net/netfilter/nf_bpf_link.c | 159 ++++++++++++++++++++++++++++++++++++ 7 files changed, 194 insertions(+) create mode 100644 include/net/netfilter/nf_bpf_link.h create mode 100644 net/netfilter/nf_bpf_link.c (limited to 'include/uapi/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index c8e03bcaecaa..0762444e3767 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -80,6 +80,7 @@ typedef unsigned int nf_hookfn(void *priv, enum nf_hook_ops_type { NF_HOOK_OP_UNDEFINED, NF_HOOK_OP_NF_TABLES, + NF_HOOK_OP_BPF, }; struct nf_hook_ops { diff --git a/include/net/netfilter/nf_bpf_link.h b/include/net/netfilter/nf_bpf_link.h new file mode 100644 index 000000000000..eeaeaf3d15de --- /dev/null +++ b/include/net/netfilter/nf_bpf_link.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#if IS_ENABLED(CONFIG_NETFILTER_BPF_LINK) +int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +#else +static inline int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} +#endif diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4b20a7269bee..1bb11a6ee667 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -986,6 +986,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_LSM, BPF_PROG_TYPE_SK_LOOKUP, BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */ + BPF_PROG_TYPE_NETFILTER, }; enum bpf_attach_type { @@ -1050,6 +1051,7 @@ enum bpf_link_type { BPF_LINK_TYPE_PERF_EVENT = 7, BPF_LINK_TYPE_KPROBE_MULTI = 8, BPF_LINK_TYPE_STRUCT_OPS = 9, + BPF_LINK_TYPE_NETFILTER = 10, MAX_BPF_LINK_TYPE, }; @@ -1560,6 +1562,12 @@ union bpf_attr { */ __u64 cookie; } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; }; } link_create; @@ -6410,6 +6418,12 @@ struct bpf_link_info { struct { __u32 map_id; } struct_ops; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; }; } __attribute__((aligned(8))); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index bcf1a1920ddd..14f39c1e573e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -35,6 +35,7 @@ #include #include #include +#include #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ @@ -2462,6 +2463,7 @@ static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) case BPF_PROG_TYPE_CGROUP_SYSCTL: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_EXT: /* extends any prog */ + case BPF_PROG_TYPE_NETFILTER: return true; case BPF_PROG_TYPE_CGROUP_SKB: /* always unpriv */ @@ -4588,6 +4590,7 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) switch (prog->type) { case BPF_PROG_TYPE_EXT: + case BPF_PROG_TYPE_NETFILTER: break; case BPF_PROG_TYPE_PERF_EVENT: case BPF_PROG_TYPE_TRACEPOINT: @@ -4654,6 +4657,9 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) case BPF_PROG_TYPE_XDP: ret = bpf_xdp_link_attach(attr, prog); break; + case BPF_PROG_TYPE_NETFILTER: + ret = bpf_nf_link_attach(attr, prog); + break; #endif case BPF_PROG_TYPE_PERF_EVENT: case BPF_PROG_TYPE_TRACEPOINT: diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index d0bf630482c1..441d1f134110 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -30,6 +30,9 @@ config NETFILTER_FAMILY_BRIDGE config NETFILTER_FAMILY_ARP bool +config NETFILTER_BPF_LINK + def_bool BPF_SYSCALL + config NETFILTER_NETLINK_HOOK tristate "Netfilter base hook dump support" depends on NETFILTER_ADVANCED diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 5ffef1cd6143..d4958e7e7631 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -22,6 +22,7 @@ nf_conntrack-$(CONFIG_DEBUG_INFO_BTF) += nf_conntrack_bpf.o endif obj-$(CONFIG_NETFILTER) = netfilter.o +obj-$(CONFIG_NETFILTER_BPF_LINK) += nf_bpf_link.o obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c new file mode 100644 index 000000000000..efa4f3390742 --- /dev/null +++ b/net/netfilter/nf_bpf_link.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include +#include + +static unsigned int nf_hook_run_bpf(void *bpf_prog, struct sk_buff *skb, + const struct nf_hook_state *s) +{ + return NF_ACCEPT; +} + +struct bpf_nf_link { + struct bpf_link link; + struct nf_hook_ops hook_ops; + struct net *net; + u32 dead; +}; + +static void bpf_nf_link_release(struct bpf_link *link) +{ + struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link); + + if (nf_link->dead) + return; + + /* prevent hook-not-found warning splat from netfilter core when + * .detach was already called + */ + if (!cmpxchg(&nf_link->dead, 0, 1)) + nf_unregister_net_hook(nf_link->net, &nf_link->hook_ops); +} + +static void bpf_nf_link_dealloc(struct bpf_link *link) +{ + struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link); + + kfree(nf_link); +} + +static int bpf_nf_link_detach(struct bpf_link *link) +{ + bpf_nf_link_release(link); + return 0; +} + +static void bpf_nf_link_show_info(const struct bpf_link *link, + struct seq_file *seq) +{ + struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link); + + seq_printf(seq, "pf:\t%u\thooknum:\t%u\tprio:\t%d\n", + nf_link->hook_ops.pf, nf_link->hook_ops.hooknum, + nf_link->hook_ops.priority); +} + +static int bpf_nf_link_fill_link_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link); + + info->netfilter.pf = nf_link->hook_ops.pf; + info->netfilter.hooknum = nf_link->hook_ops.hooknum; + info->netfilter.priority = nf_link->hook_ops.priority; + info->netfilter.flags = 0; + + return 0; +} + +static int bpf_nf_link_update(struct bpf_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog) +{ + return -EOPNOTSUPP; +} + +static const struct bpf_link_ops bpf_nf_link_lops = { + .release = bpf_nf_link_release, + .dealloc = bpf_nf_link_dealloc, + .detach = bpf_nf_link_detach, + .show_fdinfo = bpf_nf_link_show_info, + .fill_link_info = bpf_nf_link_fill_link_info, + .update_prog = bpf_nf_link_update, +}; + +static int bpf_nf_check_pf_and_hooks(const union bpf_attr *attr) +{ + switch (attr->link_create.netfilter.pf) { + case NFPROTO_IPV4: + case NFPROTO_IPV6: + if (attr->link_create.netfilter.hooknum >= NF_INET_NUMHOOKS) + return -EPROTO; + break; + default: + return -EAFNOSUPPORT; + } + + if (attr->link_create.netfilter.flags) + return -EOPNOTSUPP; + + /* make sure conntrack confirm is always last. + * + * In the future, if userspace can e.g. request defrag, then + * "defrag_requested && prio before NF_IP_PRI_CONNTRACK_DEFRAG" + * should fail. + */ + switch (attr->link_create.netfilter.priority) { + case NF_IP_PRI_FIRST: return -ERANGE; /* sabotage_in and other warts */ + case NF_IP_PRI_LAST: return -ERANGE; /* e.g. conntrack confirm */ + } + + return 0; +} + +int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct net *net = current->nsproxy->net_ns; + struct bpf_link_primer link_primer; + struct bpf_nf_link *link; + int err; + + if (attr->link_create.flags) + return -EINVAL; + + err = bpf_nf_check_pf_and_hooks(attr); + if (err) + return err; + + link = kzalloc(sizeof(*link), GFP_USER); + if (!link) + return -ENOMEM; + + bpf_link_init(&link->link, BPF_LINK_TYPE_NETFILTER, &bpf_nf_link_lops, prog); + + link->hook_ops.hook = nf_hook_run_bpf; + link->hook_ops.hook_ops_type = NF_HOOK_OP_BPF; + link->hook_ops.priv = prog; + + link->hook_ops.pf = attr->link_create.netfilter.pf; + link->hook_ops.priority = attr->link_create.netfilter.priority; + link->hook_ops.hooknum = attr->link_create.netfilter.hooknum; + + link->net = net; + link->dead = false; + + err = bpf_link_prime(&link->link, &link_primer); + if (err) { + kfree(link); + return err; + } + + err = nf_register_net_hook(net, &link->hook_ops); + if (err) { + bpf_link_cleanup(&link_primer); + return err; + } + + return bpf_link_settle(&link_primer); +} -- cgit v1.2.3 From 506a74db7e019a277e987fa65654bdd953859d5b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 21 Apr 2023 19:02:56 +0200 Subject: netfilter: nfnetlink hook: dump bpf prog id This allows userspace ("nft list hooks") to show which bpf program is attached to which hook. Without this, user only knows bpf prog is attached at prio x, y, z at INPUT and FORWARD, but can't tell which program is where. v4: kdoc fixups (Simon Horman) Link: https://lore.kernel.org/bpf/ZEELzpNCnYJuZyod@corigine.com/ Signed-off-by: Florian Westphal Link: https://lore.kernel.org/r/20230421170300.24115-4-fw@strlen.de Signed-off-by: Alexei Starovoitov --- include/uapi/linux/netfilter/nfnetlink_hook.h | 24 +++++++- net/netfilter/nfnetlink_hook.c | 81 ++++++++++++++++++++++----- 2 files changed, 89 insertions(+), 16 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/netfilter/nfnetlink_hook.h b/include/uapi/linux/netfilter/nfnetlink_hook.h index bbcd285b22e1..84a561a74b98 100644 --- a/include/uapi/linux/netfilter/nfnetlink_hook.h +++ b/include/uapi/linux/netfilter/nfnetlink_hook.h @@ -32,8 +32,12 @@ enum nfnl_hook_attributes { /** * enum nfnl_hook_chain_info_attributes - chain description * - * NFNLA_HOOK_INFO_DESC: nft chain and table name (enum nft_table_attributes) (NLA_NESTED) - * NFNLA_HOOK_INFO_TYPE: chain type (enum nfnl_hook_chaintype) (NLA_U32) + * @NFNLA_HOOK_INFO_DESC: nft chain and table name (NLA_NESTED) + * @NFNLA_HOOK_INFO_TYPE: chain type (enum nfnl_hook_chaintype) (NLA_U32) + * + * NFNLA_HOOK_INFO_DESC depends on NFNLA_HOOK_INFO_TYPE value: + * NFNL_HOOK_TYPE_NFTABLES: enum nft_table_attributes + * NFNL_HOOK_TYPE_BPF: enum nfnl_hook_bpf_attributes */ enum nfnl_hook_chain_info_attributes { NFNLA_HOOK_INFO_UNSPEC, @@ -55,10 +59,24 @@ enum nfnl_hook_chain_desc_attributes { /** * enum nfnl_hook_chaintype - chain type * - * @NFNL_HOOK_TYPE_NFTABLES nf_tables base chain + * @NFNL_HOOK_TYPE_NFTABLES: nf_tables base chain + * @NFNL_HOOK_TYPE_BPF: bpf program */ enum nfnl_hook_chaintype { NFNL_HOOK_TYPE_NFTABLES = 0x1, + NFNL_HOOK_TYPE_BPF, +}; + +/** + * enum nfnl_hook_bpf_attributes - bpf prog description + * + * @NFNLA_HOOK_BPF_ID: bpf program id (NLA_U32) + */ +enum nfnl_hook_bpf_attributes { + NFNLA_HOOK_BPF_UNSPEC, + NFNLA_HOOK_BPF_ID, + __NFNLA_HOOK_BPF_MAX, }; +#define NFNLA_HOOK_BPF_MAX (__NFNLA_HOOK_BPF_MAX - 1) #endif /* _NFNL_HOOK_H */ diff --git a/net/netfilter/nfnetlink_hook.c b/net/netfilter/nfnetlink_hook.c index 8120aadf6a0f..ade8ee1988b1 100644 --- a/net/netfilter/nfnetlink_hook.c +++ b/net/netfilter/nfnetlink_hook.c @@ -5,6 +5,7 @@ * Author: Florian Westphal */ +#include #include #include #include @@ -57,35 +58,76 @@ struct nfnl_dump_hook_data { u8 hook; }; +static struct nlattr *nfnl_start_info_type(struct sk_buff *nlskb, enum nfnl_hook_chaintype t) +{ + struct nlattr *nest = nla_nest_start(nlskb, NFNLA_HOOK_CHAIN_INFO); + int ret; + + if (!nest) + return NULL; + + ret = nla_put_be32(nlskb, NFNLA_HOOK_INFO_TYPE, htonl(t)); + if (ret == 0) + return nest; + + nla_nest_cancel(nlskb, nest); + return NULL; +} + +static int nfnl_hook_put_bpf_prog_info(struct sk_buff *nlskb, + const struct nfnl_dump_hook_data *ctx, + unsigned int seq, + const struct bpf_prog *prog) +{ + struct nlattr *nest, *nest2; + int ret; + + if (!IS_ENABLED(CONFIG_NETFILTER_BPF_LINK)) + return 0; + + if (WARN_ON_ONCE(!prog)) + return 0; + + nest = nfnl_start_info_type(nlskb, NFNL_HOOK_TYPE_BPF); + if (!nest) + return -EMSGSIZE; + + nest2 = nla_nest_start(nlskb, NFNLA_HOOK_INFO_DESC); + if (!nest2) + goto cancel_nest; + + ret = nla_put_be32(nlskb, NFNLA_HOOK_BPF_ID, htonl(prog->aux->id)); + if (ret) + goto cancel_nest; + + nla_nest_end(nlskb, nest2); + nla_nest_end(nlskb, nest); + return 0; + +cancel_nest: + nla_nest_cancel(nlskb, nest); + return -EMSGSIZE; +} + static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb, const struct nfnl_dump_hook_data *ctx, unsigned int seq, - const struct nf_hook_ops *ops) + struct nft_chain *chain) { struct net *net = sock_net(nlskb->sk); struct nlattr *nest, *nest2; - struct nft_chain *chain; int ret = 0; - if (ops->hook_ops_type != NF_HOOK_OP_NF_TABLES) - return 0; - - chain = ops->priv; if (WARN_ON_ONCE(!chain)) return 0; if (!nft_is_active(net, chain)) return 0; - nest = nla_nest_start(nlskb, NFNLA_HOOK_CHAIN_INFO); + nest = nfnl_start_info_type(nlskb, NFNL_HOOK_TYPE_NFTABLES); if (!nest) return -EMSGSIZE; - ret = nla_put_be32(nlskb, NFNLA_HOOK_INFO_TYPE, - htonl(NFNL_HOOK_TYPE_NFTABLES)); - if (ret) - goto cancel_nest; - nest2 = nla_nest_start(nlskb, NFNLA_HOOK_INFO_DESC); if (!nest2) goto cancel_nest; @@ -171,7 +213,20 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb, if (ret) goto nla_put_failure; - ret = nfnl_hook_put_nft_chain_info(nlskb, ctx, seq, ops); + switch (ops->hook_ops_type) { + case NF_HOOK_OP_NF_TABLES: + ret = nfnl_hook_put_nft_chain_info(nlskb, ctx, seq, ops->priv); + break; + case NF_HOOK_OP_BPF: + ret = nfnl_hook_put_bpf_prog_info(nlskb, ctx, seq, ops->priv); + break; + case NF_HOOK_OP_UNDEFINED: + break; + default: + WARN_ON_ONCE(1); + break; + } + if (ret) goto nla_put_failure; -- cgit v1.2.3