summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJoel Stanley <joel@jms.id.au>2020-08-10 07:07:46 +0300
committerJoel Stanley <joel@jms.id.au>2020-08-10 07:07:49 +0300
commit4789fd48a313d36fe6b8fc1da5e0788f5ea074cb (patch)
tree2c8c27b52f0e38abb7416ce81b04c3b85511b694 /kernel
parent8a9b346382056b52cd7ff141ae9f15a0fcfeb13d (diff)
parentd9939285fc818425ae92bd99f8c97b6b9ef3bb88 (diff)
downloadlinux-dev-5.4.tar.xz
Merge tag 'v5.4.57' into dev-5.4dev-5.4
This is the 5.4.57 stable release Signed-off-by: Joel Stanley <joel@jms.id.au>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/hashtab.c12
-rw-r--r--kernel/bpf/syscall.c4
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/time/timer.c8
4 files changed, 20 insertions, 6 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 22066a62c8c9..039d64b1bfb7 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -675,15 +675,20 @@ static void htab_elem_free_rcu(struct rcu_head *head)
preempt_enable();
}
-static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
+static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
{
struct bpf_map *map = &htab->map;
+ void *ptr;
if (map->ops->map_fd_put_ptr) {
- void *ptr = fd_htab_map_get_ptr(map, l);
-
+ ptr = fd_htab_map_get_ptr(map, l);
map->ops->map_fd_put_ptr(ptr);
}
+}
+
+static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
+{
+ htab_put_fd_value(htab, l);
if (htab_is_prealloc(htab)) {
__pcpu_freelist_push(&htab->freelist, &l->fnode);
@@ -735,6 +740,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
*/
pl_new = this_cpu_ptr(htab->extra_elems);
l_new = *pl_new;
+ htab_put_fd_value(htab, old_elem);
*pl_new = old_elem;
} else {
struct pcpu_freelist_node *l;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 8bc904f9badb..bf03d04a9e2f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2029,10 +2029,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
break;
case BPF_SK_MSG_VERDICT:
- return sock_map_get_from_fd(attr, NULL);
+ return sock_map_prog_detach(attr, BPF_PROG_TYPE_SK_MSG);
case BPF_SK_SKB_STREAM_PARSER:
case BPF_SK_SKB_STREAM_VERDICT:
- return sock_map_get_from_fd(attr, NULL);
+ return sock_map_prog_detach(attr, BPF_PROG_TYPE_SK_SKB);
case BPF_LIRC_MODE2:
return lirc_prog_detach(attr);
case BPF_FLOW_DISSECTOR:
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 291680ba8504..aa83538efc23 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -2205,7 +2205,7 @@ static void handle_swbp(struct pt_regs *regs)
if (!uprobe) {
if (is_swbp > 0) {
/* No matching uprobe; signal SIGTRAP. */
- send_sig(SIGTRAP, current, 0);
+ force_sig(SIGTRAP);
} else {
/*
* Either we raced with uprobe_unregister() or we can't
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 1e9b81a930c0..a3ae244b1bcd 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -43,6 +43,7 @@
#include <linux/sched/debug.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/random.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -1742,6 +1743,13 @@ void update_process_times(int user_tick)
scheduler_tick();
if (IS_ENABLED(CONFIG_POSIX_TIMERS))
run_posix_cpu_timers();
+
+ /* The current CPU might make use of net randoms without receiving IRQs
+ * to renew them often enough. Let's update the net_rand_state from a
+ * non-constant value that's not affine to the number of calls to make
+ * sure it's updated when there's some activity (we don't care in idle).
+ */
+ this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
}
/**