summaryrefslogtreecommitdiff
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorJoel Stanley <joel@jms.id.au>2019-12-12 01:32:01 +0300
committerJoel Stanley <joel@jms.id.au>2019-12-12 01:32:06 +0300
commitfdc60468f3e452364d432f1a7c3f83d58bba1b84 (patch)
treede7197f150958ae72cb87b6cf4885610f3edd704 /kernel/bpf/syscall.c
parent20572eecd7248b66d855a8e4812debd9f828ccba (diff)
parent8539dfa4fcbcf58c3c2f92ac57b964add884d12b (diff)
downloadlinux-dev-5.3.tar.xz
Merge tag 'v5.3.15' into dev-5.3dev-5.3
This is the 5.3.15 stable release Signed-off-by: Joel Stanley <joel@jms.id.au>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index aac966b32c42..ee3087462bc9 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -126,7 +126,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
return map;
}
-void *bpf_map_area_alloc(size_t size, int numa_node)
+void *bpf_map_area_alloc(u64 size, int numa_node)
{
/* We really just want to fail instead of triggering OOM killer
* under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
@@ -141,6 +141,9 @@ void *bpf_map_area_alloc(size_t size, int numa_node)
const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
void *area;
+ if (size >= SIZE_MAX)
+ return NULL;
+
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
numa_node);
@@ -197,7 +200,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
atomic_long_sub(pages, &user->locked_vm);
}
-int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size)
+int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
{
u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
struct user_struct *user;