diff options
author | Liu Shixin <liushixin2@huawei.com> | 2023-11-15 11:21:37 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-12-07 03:12:44 +0300 |
commit | 4eff7d62abdeb293233fdda2a2ecc4e0907a9a30 (patch) | |
tree | 9a7644fd46a84cae8c022cc751dcdf6ca31ecdc0 /mm/kmemleak.c | |
parent | 727d16f1993bcf46ee2888c13e3fc1463babed8d (diff) | |
download | linux-4eff7d62abdeb293233fdda2a2ecc4e0907a9a30.tar.xz |
Revert "mm/kmemleak: move the initialisation of object to __link_object"
Patch series "Fix invalid wait context of set_track_prepare()".
Geert reported an invalid wait context[1] which is resulted by moving
set_track_prepare() inside kmemleak_lock. This is not allowed because in
RT mode, the spinlocks can be preempted but raw_spinlocks can not, so it
is not allowd to acquire spinlocks while holding raw_spinlocks. The
second patch fix same problem in kmemleak_update_trace().
This patch (of 2):
Move the initialisation of object back to__alloc_object() because
set_track_prepare() attempt to acquire zone->lock(spinlocks) while
__link_object is holding kmemleak_lock(raw_spinlocks). This is not right
for RT mode.
This reverts commit 245245c2fffd00 ("mm/kmemleak: move the initialisation
of object to __link_object").
Link: https://lkml.kernel.org/r/20231115082138.2649870-1-liushixin2@huawei.com
Link: https://lkml.kernel.org/r/20231115082138.2649870-2-liushixin2@huawei.com
Fixes: 245245c2fffd ("mm/kmemleak: move the initialisation of object to __link_object")
Signed-off-by: Liu Shixin <liushixin2@huawei.com>
Reported-by: Geert Uytterhoeven <geert+renesas@glider.be>
Closes: https://lore.kernel.org/linux-mm/CAMuHMdWj0UzwNaxUvcocTfh481qRJpOWwXxsJCTJfu1oCqvgdA@mail.gmail.com/ [1]
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Patrick Wang <patrick.wang.shcn@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/kmemleak.c')
-rw-r--r-- | mm/kmemleak.c | 36 |
1 files changed, 19 insertions, 17 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 1eacca03bedd..22bab3738a9e 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -642,32 +642,16 @@ static struct kmemleak_object *__alloc_object(gfp_t gfp) if (!object) { pr_warn("Cannot allocate a kmemleak_object structure\n"); kmemleak_disable(); + return NULL; } - return object; -} - -static int __link_object(struct kmemleak_object *object, unsigned long ptr, - size_t size, int min_count, bool is_phys) -{ - - struct kmemleak_object *parent; - struct rb_node **link, *rb_parent; - unsigned long untagged_ptr; - unsigned long untagged_objp; - INIT_LIST_HEAD(&object->object_list); INIT_LIST_HEAD(&object->gray_list); INIT_HLIST_HEAD(&object->area_list); raw_spin_lock_init(&object->lock); atomic_set(&object->use_count, 1); - object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0); - object->pointer = ptr; - object->size = kfence_ksize((void *)ptr) ?: size; object->excess_ref = 0; - object->min_count = min_count; object->count = 0; /* white color initially */ - object->jiffies = jiffies; object->checksum = 0; object->del_state = 0; @@ -692,6 +676,24 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr, /* kernel backtrace */ object->trace_handle = set_track_prepare(); + return object; +} + +static int __link_object(struct kmemleak_object *object, unsigned long ptr, + size_t size, int min_count, bool is_phys) +{ + + struct kmemleak_object *parent; + struct rb_node **link, *rb_parent; + unsigned long untagged_ptr; + unsigned long untagged_objp; + + object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0); + object->pointer = ptr; + object->size = kfence_ksize((void *)ptr) ?: size; + object->min_count = min_count; + object->jiffies = jiffies; + untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); /* * Only update min_addr and max_addr with object |