diff options
author | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2020-02-25 20:29:58 +0300 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2020-02-25 20:39:23 +0300 |
commit | ff36e78fdb251b9fa65028554689806961e011eb (patch) | |
tree | f5af925d509224e06a10936196be6c06bcbdc6ae /lib/debugobjects.c | |
parent | 143d9c3e7b6aa2b785abba04266ed75f2b52e94a (diff) | |
parent | 1b245ec5b685ebf8e6e5d1e6b5bcc03b6608e8b0 (diff) | |
download | linux-ff36e78fdb251b9fa65028554689806961e011eb.tar.xz |
Merge drm/drm-next into drm-intel-next-queued
Some DSI and VBT pending patches from Hans will apply
cleanly and with less ugly conflicts if they are rebuilt
on top of other patches that recently landed on drm-next.
Reference: https://patchwork.freedesktop.org/series/70952/
Cc: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r-- | lib/debugobjects.c | 46 |
1 files changed, 25 insertions, 21 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 61261195f5b6..48054dbf1b51 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -132,14 +132,18 @@ static void fill_pool(void) struct debug_obj *obj; unsigned long flags; - if (likely(obj_pool_free >= debug_objects_pool_min_level)) + if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level)) return; /* * Reuse objs from the global free list; they will be reinitialized * when allocating. + * + * Both obj_nr_tofree and obj_pool_free are checked locklessly; the + * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical + * sections. */ - while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { + while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { raw_spin_lock_irqsave(&pool_lock, flags); /* * Recheck with the lock held as the worker thread might have @@ -148,9 +152,9 @@ static void fill_pool(void) while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { obj = hlist_entry(obj_to_free.first, typeof(*obj), node); hlist_del(&obj->node); - obj_nr_tofree--; + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); hlist_add_head(&obj->node, &obj_pool); - obj_pool_free++; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); } @@ -158,7 +162,7 @@ static void fill_pool(void) if (unlikely(!obj_cache)) return; - while (obj_pool_free < debug_objects_pool_min_level) { + while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { struct debug_obj *new[ODEBUG_BATCH_SIZE]; int cnt; @@ -174,7 +178,7 @@ static void fill_pool(void) while (cnt) { hlist_add_head(&new[--cnt]->node, &obj_pool); debug_objects_allocated++; - obj_pool_free++; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); } @@ -236,7 +240,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) obj = __alloc_object(&obj_pool); if (obj) { obj_pool_used++; - obj_pool_free--; + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); /* * Looking ahead, allocate one batch of debug objects and @@ -255,7 +259,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) &percpu_pool->free_objs); percpu_pool->obj_free++; obj_pool_used++; - obj_pool_free--; + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); } } @@ -309,8 +313,8 @@ static void free_obj_work(struct work_struct *work) obj = hlist_entry(obj_to_free.first, typeof(*obj), node); hlist_del(&obj->node); hlist_add_head(&obj->node, &obj_pool); - obj_pool_free++; - obj_nr_tofree--; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); return; @@ -324,7 +328,7 @@ free_objs: if (obj_nr_tofree) { hlist_move_list(&obj_to_free, &tofree); debug_objects_freed += obj_nr_tofree; - obj_nr_tofree = 0; + WRITE_ONCE(obj_nr_tofree, 0); } raw_spin_unlock_irqrestore(&pool_lock, flags); @@ -375,10 +379,10 @@ free_to_obj_pool: obj_pool_used--; if (work) { - obj_nr_tofree++; + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); hlist_add_head(&obj->node, &obj_to_free); if (lookahead_count) { - obj_nr_tofree += lookahead_count; + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count); obj_pool_used -= lookahead_count; while (lookahead_count) { hlist_add_head(&objs[--lookahead_count]->node, @@ -396,15 +400,15 @@ free_to_obj_pool: for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { obj = __alloc_object(&obj_pool); hlist_add_head(&obj->node, &obj_to_free); - obj_pool_free--; - obj_nr_tofree++; + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); } } } else { - obj_pool_free++; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); hlist_add_head(&obj->node, &obj_pool); if (lookahead_count) { - obj_pool_free += lookahead_count; + WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count); obj_pool_used -= lookahead_count; while (lookahead_count) { hlist_add_head(&objs[--lookahead_count]->node, @@ -423,7 +427,7 @@ free_to_obj_pool: static void free_object(struct debug_obj *obj) { __free_object(obj); - if (!obj_freeing && obj_nr_tofree) { + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { WRITE_ONCE(obj_freeing, true); schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); } @@ -982,7 +986,7 @@ repeat: debug_objects_maxchecked = objs_checked; /* Schedule work to actually kmem_cache_free() objects */ - if (!obj_freeing && obj_nr_tofree) { + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { WRITE_ONCE(obj_freeing, true); schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); } @@ -1008,12 +1012,12 @@ static int debug_stats_show(struct seq_file *m, void *v) seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); seq_printf(m, "warnings :%d\n", debug_objects_warnings); seq_printf(m, "fixups :%d\n", debug_objects_fixups); - seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free); + seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free); seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); - seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); + seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree)); seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); seq_printf(m, "objs_freed :%d\n", debug_objects_freed); return 0; |