summaryrefslogtreecommitdiff
path: root/kernel/bpf/hashtab.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-02-06 11:36:10 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-02-06 11:36:10 +0300
commita769f30c7bd7cf917eea05a2b448f3588a87d767 (patch)
tree10a06089a9525926e23675528dd8190d05849143 /kernel/bpf/hashtab.c
parent307a082484eb0201ad123f39f95510a9da11985b (diff)
parentd5adbfcd5f7bcc6fa58a41c5c5ada0e5c826ce2c (diff)
downloadlinux-a769f30c7bd7cf917eea05a2b448f3588a87d767.tar.xz
Merge 4.10-rc7 into staging-next
This resolves the merge errors that were reported in linux-next and it picks up the staging and IIO fixes that we need/want in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/bpf/hashtab.c')
-rw-r--r--kernel/bpf/hashtab.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 34debc1a9641..a753bbe7df0a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,7 +13,6 @@
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
-#include <linux/vmalloc.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
@@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab)
free_percpu(pptr);
}
free_elems:
- vfree(htab->elems);
+ bpf_map_area_free(htab->elems);
}
static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
@@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab)
{
int err = -ENOMEM, i;
- htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
+ htab->elems = bpf_map_area_alloc(htab->elem_size *
+ htab->map.max_entries);
if (!htab->elems)
return -ENOMEM;
@@ -274,7 +274,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
*/
goto free_htab;
- if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
+ if (htab->map.value_size >= KMALLOC_MAX_SIZE -
MAX_BPF_STACK - sizeof(struct htab_elem))
/* if value_size is bigger, the user space won't be able to
* access the elements via bpf syscall. This check also makes
@@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
goto free_htab;
err = -ENOMEM;
- htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
- GFP_USER | __GFP_NOWARN);
-
- if (!htab->buckets) {
- htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
- if (!htab->buckets)
- goto free_htab;
- }
+ htab->buckets = bpf_map_area_alloc(htab->n_buckets *
+ sizeof(struct bucket));
+ if (!htab->buckets)
+ goto free_htab;
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_HEAD(&htab->buckets[i].head);
@@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
free_extra_elems:
free_percpu(htab->extra_elems);
free_buckets:
- kvfree(htab->buckets);
+ bpf_map_area_free(htab->buckets);
free_htab:
kfree(htab);
return ERR_PTR(err);
@@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map)
prealloc_destroy(htab);
free_percpu(htab->extra_elems);
- kvfree(htab->buckets);
+ bpf_map_area_free(htab->buckets);
kfree(htab);
}