summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-04-11 07:22:32 +0300
committerAlexei Starovoitov <ast@kernel.org>2026-04-11 07:22:32 +0300
commitd6bae125f21f74201a3eab0996b2f27be3abc4bf (patch)
treeb3366ddffd45d0d817355f50bc2df98844a79f79 /include/linux
parent497fa510ee46af2bf04991cd64cb919266d0ca5a (diff)
parent136deea435dc83d7fe2304303bb9bccb54f69bb0 (diff)
downloadlinux-d6bae125f21f74201a3eab0996b2f27be3abc4bf.tar.xz
Merge branch 'use-kmalloc_nolock-universally-in-bpf-local-storage'
Amery Hung says: ==================== Use kmalloc_nolock() universally in BPF local storage Socket local storage did not convert to use kmalloc_nolock() since there were observable performance degredation due to kfree_nolock() hitting the slow path and the lack of kfree_rcu()-like batching freeing. Now that these concern were addressed in slub, convert all remaining local storage flavors to use kmalloc_nolock(). v1 -> v2: - Fix build (CI, Alexei) ==================== Link: https://patch.msgid.link/20260411015419.114016-1-ameryhung@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf_local_storage.h15
1 files changed, 5 insertions, 10 deletions
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 8157e8da61d4..9e4f5c45c974 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -54,7 +54,6 @@ struct bpf_local_storage_map {
u32 bucket_log;
u16 elem_size;
u16 cache_idx;
- bool use_kmalloc_nolock;
};
struct bpf_local_storage_data {
@@ -86,8 +85,7 @@ struct bpf_local_storage_elem {
*/
};
atomic_t state;
- bool use_kmalloc_nolock;
- /* 3 bytes hole */
+ /* 4 bytes hole */
/* The data is stored in another cacheline to minimize
* the number of cachelines access during a cache hit.
*/
@@ -104,7 +102,6 @@ struct bpf_local_storage {
rqspinlock_t lock; /* Protect adding/removing from the "list" */
u64 mem_charge; /* Copy of mem charged to owner. Protected by "lock" */
refcount_t owner_refcnt;/* Used to pin owner when map_free is uncharging */
- bool use_kmalloc_nolock;
};
/* U16_MAX is much more than enough for sk local storage
@@ -137,8 +134,7 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
struct bpf_map *
bpf_local_storage_map_alloc(union bpf_attr *attr,
- struct bpf_local_storage_cache *cache,
- bool use_kmalloc_nolock);
+ struct bpf_local_storage_cache *cache);
void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
struct bpf_local_storage_map *smap,
@@ -192,7 +188,7 @@ int bpf_selem_link_map(struct bpf_local_storage_map *smap,
struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
- bool swap_uptrs, gfp_t gfp_flags);
+ bool swap_uptrs);
void bpf_selem_free(struct bpf_local_storage_elem *selem,
bool reuse_now);
@@ -200,12 +196,11 @@ void bpf_selem_free(struct bpf_local_storage_elem *selem,
int
bpf_local_storage_alloc(void *owner,
struct bpf_local_storage_map *smap,
- struct bpf_local_storage_elem *first_selem,
- gfp_t gfp_flags);
+ struct bpf_local_storage_elem *first_selem);
struct bpf_local_storage_data *
bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
- void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
+ void *value, u64 map_flags, bool swap_uptrs);
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);