summaryrefslogtreecommitdiff
path: root/fs/dlm/debug_fs.c
diff options
context:
space:
mode:
authorAlexander Aring <aahringo@redhat.com>2024-04-15 21:39:42 +0300
committerDavid Teigland <teigland@redhat.com>2024-04-16 22:45:31 +0300
commite91313591b29ce724fe2f1bdf29f2482878fc275 (patch)
treef3058f289d907f09b98a946527b388e6e6f57a93 /fs/dlm/debug_fs.c
parentb1f2381c1a8d52b973944090ed8b42c750152533 (diff)
downloadlinux-e91313591b29ce724fe2f1bdf29f2482878fc275.tar.xz
dlm: use rwlock for rsb hash table
The conversion to rhashtable introduced a hash table lock per lockspace, in place of per bucket locks. To make this more scalable, switch to using a rwlock for hash table access. The common case fast path uses it as a read lock. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm/debug_fs.c')
-rw-r--r--fs/dlm/debug_fs.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 70567919f1b7..6ab3ed4074c6 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -413,7 +413,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
else
list = &ls->ls_keep;
- spin_lock_bh(&ls->ls_rsbtbl_lock);
+ read_lock_bh(&ls->ls_rsbtbl_lock);
return seq_list_start(list, *pos);
}
@@ -434,7 +434,7 @@ static void table_seq_stop(struct seq_file *seq, void *iter_ptr)
{
struct dlm_ls *ls = seq->private;
- spin_unlock_bh(&ls->ls_rsbtbl_lock);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
}
static const struct seq_operations format1_seq_ops = {