summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHui Zhu <zhuhui@kylinos.cn>2026-03-10 04:56:57 +0300
committerAndrew Morton <akpm@linux-foundation.org>2026-04-05 23:53:25 +0300
commit86e69c020b62ee109e10db0ae53ba97f3465df8e (patch)
treeaf3ed6b8d9bfb59e1655a2c3669cfc0818b89cf2
parentd239462787b072c78eb19fc1f155c3d411256282 (diff)
downloadlinux-86e69c020b62ee109e10db0ae53ba97f3465df8e.tar.xz
mm/swap: strengthen locking assertions and invariants in cluster allocation
swap_cluster_alloc_table() requires several locks to be held by its callers: ci->lock, the per-CPU swap_cluster lock, and, for non-solid-state devices (non-SWP_SOLIDSTATE), the si->global_cluster_lock. While most call paths (e.g., via cluster_alloc_swap_entry() or alloc_swap_scan_list()) correctly acquire these locks before invocation, the path through swap_reclaim_work() -> swap_reclaim_full_clusters() -> isolate_lock_cluster() is distinct. This path operates exclusively on si->full_clusters, where the swap allocation tables are guaranteed to be already allocated. Consequently, isolate_lock_cluster() should never trigger a call to swap_cluster_alloc_table() for these clusters. Strengthen the locking and state assertions to formalize these invariants: 1. Add a lockdep_assert_held() for si->global_cluster_lock in swap_cluster_alloc_table() for non-SWP_SOLIDSTATE devices. 2. Reorder existing lockdep assertions in swap_cluster_alloc_table() to match the actual lock acquisition order (per-CPU lock, then global lock, then cluster lock). 3. Add a VM_WARN_ON_ONCE() in isolate_lock_cluster() to ensure that table allocations are only attempted for clusters being isolated from the free list. Attempting to allocate a table for a cluster from other lists (like the full list during reclaim) indicates a violation of subsystem invariants. These changes ensure locking consistency and help catch potential synchronization or logic issues during development. [zhuhui@kylinos.cn: remove redundant comment, per Barry] Link: https://lkml.kernel.org/r/20260311022241.177801-1-hui.zhu@linux.dev [zhuhui@kylinos.cn: initialize `flags', per Chris] Link: https://lkml.kernel.org/r/20260312023024.903143-1-hui.zhu@linux.dev Link: https://lkml.kernel.org/r/20260310015657.42395-1-hui.zhu@linux.dev Signed-off-by: Hui Zhu <zhuhui@kylinos.cn> Reviewed-by: Youngjun Park <youngjun.park@lge.com> Reviewed-by: Barry Song <baohua@kernel.org> Acked-by: Chris Li <chrisl@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: Kairui Song <kasong@tencent.com> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Nhat Pham <nphamcs@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/swapfile.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 915bc93964db..71a7d6959f3e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -498,8 +498,10 @@ swap_cluster_alloc_table(struct swap_info_struct *si,
* Only cluster isolation from the allocator does table allocation.
* Swap allocator uses percpu clusters and holds the local lock.
*/
- lockdep_assert_held(&ci->lock);
lockdep_assert_held(&this_cpu_ptr(&percpu_swap_cluster)->lock);
+ if (!(si->flags & SWP_SOLIDSTATE))
+ lockdep_assert_held(&si->global_cluster_lock);
+ lockdep_assert_held(&ci->lock);
/* The cluster must be free and was just isolated from the free list. */
VM_WARN_ON_ONCE(ci->flags || !cluster_is_empty(ci));
@@ -600,6 +602,7 @@ static struct swap_cluster_info *isolate_lock_cluster(
struct swap_info_struct *si, struct list_head *list)
{
struct swap_cluster_info *ci, *found = NULL;
+ u8 flags = CLUSTER_FLAG_NONE;
spin_lock(&si->lock);
list_for_each_entry(ci, list, list) {
@@ -612,6 +615,7 @@ static struct swap_cluster_info *isolate_lock_cluster(
ci->flags != CLUSTER_FLAG_FULL);
list_del(&ci->list);
+ flags = ci->flags;
ci->flags = CLUSTER_FLAG_NONE;
found = ci;
break;
@@ -620,6 +624,7 @@ static struct swap_cluster_info *isolate_lock_cluster(
if (found && !cluster_table_is_alloced(found)) {
/* Only an empty free cluster's swap table can be freed. */
+ VM_WARN_ON_ONCE(flags != CLUSTER_FLAG_FREE);
VM_WARN_ON_ONCE(list != &si->free_clusters);
VM_WARN_ON_ONCE(!cluster_is_empty(found));
return swap_cluster_alloc_table(si, found);