From ad191e0eeebf64a60ca2d16ca01a223d2b1dd25e Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Thu, 28 Mar 2024 11:48:33 -0400 Subject: dlm: fix user space lock decision to copy lvb This patch fixes the copy lvb decision for user space lock requests. Checking dlm_lvb_operations is done earlier, where granted/requested lock modes are available to use in the matrix. The decision had been moved to the wrong location, where granted mode and requested mode where the same, which causes the dlm_lvb_operations matix to produce the wrong copy decision. For PW or EX requests, the caller could get invalid lvb data. Fixes: 61bed0baa4db ("fs: dlm: use a non-static queue for callbacks") Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/ast.c | 14 ++++++++++++++ fs/dlm/dlm_internal.h | 1 + fs/dlm/user.c | 15 ++------------- 3 files changed, 17 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 1f2f70a1b824..decedc4ee15f 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -12,6 +12,7 @@ #include #include "dlm_internal.h" +#include "lvb_table.h" #include "memory.h" #include "lock.h" #include "user.h" @@ -42,6 +43,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, struct dlm_ls *ls = lkb->lkb_resource->res_ls; int rv = DLM_ENQUEUE_CALLBACK_SUCCESS; struct dlm_callback *cb; + int copy_lvb = 0; int prev_mode; if (flags & DLM_CB_BAST) { @@ -73,6 +75,17 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, goto out; } } + } else if (flags & DLM_CB_CAST) { + if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { + if (lkb->lkb_last_cast) + prev_mode = lkb->lkb_last_cb->mode; + else + prev_mode = -1; + + if (!status && lkb->lkb_lksb->sb_lvbptr && + dlm_lvb_operations[prev_mode + 1][mode + 1]) + copy_lvb = 1; + } } cb = dlm_allocate_cb(); @@ -85,6 +98,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, cb->mode = mode; cb->sb_status = status; cb->sb_flags = (sbflags & 0x000000FF); + cb->copy_lvb = copy_lvb; kref_init(&cb->ref); if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags)) rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED; diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 3b4dbce849f0..a9137c90f348 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -222,6 +222,7 @@ struct dlm_callback { int sb_status; /* copy to lksb status */ uint8_t sb_flags; /* copy to lksb flags */ int8_t mode; /* rq mode of bast, gr mode of cast */ + int copy_lvb; struct list_head list; struct kref ref; diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 9f9b68448830..12a483deeef5 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -21,7 +21,6 @@ #include "dlm_internal.h" #include "lockspace.h" #include "lock.h" -#include "lvb_table.h" #include "user.h" #include "ast.h" #include "config.h" @@ -806,8 +805,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, struct dlm_lkb *lkb; DECLARE_WAITQUEUE(wait, current); struct dlm_callback *cb; - int rv, ret, copy_lvb = 0; - int old_mode, new_mode; + int rv, ret; if (count == sizeof(struct dlm_device_version)) { rv = copy_version_to_user(buf, count); @@ -864,9 +862,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list); - /* rem_lkb_callback sets a new lkb_last_cast */ - old_mode = lkb->lkb_last_cast->mode; - rv = dlm_dequeue_lkb_callback(lkb, &cb); switch (rv) { case DLM_DEQUEUE_CALLBACK_EMPTY: @@ -895,12 +890,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, if (cb->flags & DLM_CB_BAST) { trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode); } else if (cb->flags & DLM_CB_CAST) { - new_mode = cb->mode; - - if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr && - dlm_lvb_operations[old_mode + 1][new_mode + 1]) - copy_lvb = 1; - lkb->lkb_lksb->sb_status = cb->sb_status; lkb->lkb_lksb->sb_flags = cb->sb_flags; trace_dlm_ast(lkb->lkb_resource->res_ls, lkb); @@ -908,7 +897,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, ret = copy_result_to_user(lkb->lkb_ua, test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), - cb->flags, cb->mode, copy_lvb, buf, count); + cb->flags, cb->mode, cb->copy_lvb, buf, count); kref_put(&cb->ref, dlm_release_callback); -- cgit v1.2.3 From 609ed5bde2bbe55b78142740de1451ece9896a84 Mon Sep 17 00:00:00 2001 From: Kunwu Chan Date: Thu, 28 Mar 2024 11:48:34 -0400 Subject: dlm: Simplify the allocation of slab caches in dlm_midcomms_cache_create Use the new KMEM_CACHE() macro instead of direct kmem_cache_create to simplify the creation of SLAB caches. Signed-off-by: Kunwu Chan Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/midcomms.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c index 2247ebb61be1..8e9920f1b48b 100644 --- a/fs/dlm/midcomms.c +++ b/fs/dlm/midcomms.c @@ -226,8 +226,7 @@ static DEFINE_MUTEX(close_lock); struct kmem_cache *dlm_midcomms_cache_create(void) { - return kmem_cache_create("dlm_mhandle", sizeof(struct dlm_mhandle), - 0, 0, NULL); + return KMEM_CACHE(dlm_mhandle, 0); } static inline const char *dlm_state_str(int state) -- cgit v1.2.3 From 1131f339089bdf7ef7aa0a026bdefe1c9a22e8a1 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Thu, 28 Mar 2024 11:48:37 -0400 Subject: dlm: remove lkb from callback tracepoints Stop using lkb structs in the callback tracepoints so that lkb references are not needed. This prepares for separating lkb structs from callbacks. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/ast.c | 9 +++++++-- fs/dlm/user.c | 9 +++++++-- include/trace/events/dlm.h | 46 +++++++++++++++++++--------------------------- 3 files changed, 33 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index decedc4ee15f..dd7cca3c1472 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -171,6 +171,7 @@ void dlm_callback_work(struct work_struct *work) { struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work); struct dlm_ls *ls = lkb->lkb_resource->res_ls; + struct dlm_rsb *rsb = lkb->lkb_resource; void (*castfn) (void *astparam); void (*bastfn) (void *astparam, int mode); struct dlm_callback *cb; @@ -190,14 +191,18 @@ void dlm_callback_work(struct work_struct *work) bastfn = lkb->lkb_bastfn; if (cb->flags & DLM_CB_BAST) { - trace_dlm_bast(ls, lkb, cb->mode); + trace_dlm_bast(ls->ls_global_id, lkb->lkb_id, + cb->mode, rsb->res_name, + rsb->res_length); lkb->lkb_last_bast_time = ktime_get(); lkb->lkb_last_bast_mode = cb->mode; bastfn(lkb->lkb_astparam, cb->mode); } else if (cb->flags & DLM_CB_CAST) { lkb->lkb_lksb->sb_status = cb->sb_status; lkb->lkb_lksb->sb_flags = cb->sb_flags; - trace_dlm_ast(ls, lkb); + trace_dlm_ast(ls->ls_global_id, lkb->lkb_id, + cb->sb_flags, cb->sb_status, + rsb->res_name, rsb->res_length); lkb->lkb_last_cast_time = ktime_get(); castfn(lkb->lkb_astparam); } diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 12a483deeef5..6f99bbeeac9b 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -805,6 +805,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, struct dlm_lkb *lkb; DECLARE_WAITQUEUE(wait, current); struct dlm_callback *cb; + struct dlm_rsb *rsb; int rv, ret; if (count == sizeof(struct dlm_device_version)) { @@ -887,12 +888,16 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, } spin_unlock(&proc->asts_spin); + rsb = lkb->lkb_resource; if (cb->flags & DLM_CB_BAST) { - trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode); + trace_dlm_bast(rsb->res_ls->ls_global_id, lkb->lkb_id, + cb->mode, rsb->res_name, rsb->res_length); } else if (cb->flags & DLM_CB_CAST) { lkb->lkb_lksb->sb_status = cb->sb_status; lkb->lkb_lksb->sb_flags = cb->sb_flags; - trace_dlm_ast(lkb->lkb_resource->res_ls, lkb); + trace_dlm_ast(rsb->res_ls->ls_global_id, lkb->lkb_id, + cb->sb_flags, cb->sb_status, rsb->res_name, + rsb->res_length); } ret = copy_result_to_user(lkb->lkb_ua, diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h index c1a146f9fc91..af160082c9e3 100644 --- a/include/trace/events/dlm.h +++ b/include/trace/events/dlm.h @@ -189,29 +189,25 @@ TRACE_EVENT(dlm_lock_end, TRACE_EVENT(dlm_bast, - TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode), + TP_PROTO(__u32 ls_id, __u32 lkb_id, int mode, + const char *res_name, size_t res_length), - TP_ARGS(ls, lkb, mode), + TP_ARGS(ls_id, lkb_id, mode, res_name, res_length), TP_STRUCT__entry( __field(__u32, ls_id) __field(__u32, lkb_id) __field(int, mode) - __dynamic_array(unsigned char, res_name, - lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) + __dynamic_array(unsigned char, res_name, res_length) ), TP_fast_assign( - struct dlm_rsb *r; - - __entry->ls_id = ls->ls_global_id; - __entry->lkb_id = lkb->lkb_id; + __entry->ls_id = ls_id; + __entry->lkb_id = lkb_id; __entry->mode = mode; - r = lkb->lkb_resource; - if (r) - memcpy(__get_dynamic_array(res_name), r->res_name, - __get_dynamic_array_len(res_name)); + memcpy(__get_dynamic_array(res_name), res_name, + __get_dynamic_array_len(res_name)); ), TP_printk("ls_id=%u lkb_id=%x mode=%s res_name=%s", @@ -224,31 +220,27 @@ TRACE_EVENT(dlm_bast, TRACE_EVENT(dlm_ast, - TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb), + TP_PROTO(__u32 ls_id, __u32 lkb_id, __u8 sb_flags, int sb_status, + const char *res_name, size_t res_length), - TP_ARGS(ls, lkb), + TP_ARGS(ls_id, lkb_id, sb_flags, sb_status, res_name, res_length), TP_STRUCT__entry( __field(__u32, ls_id) __field(__u32, lkb_id) - __field(u8, sb_flags) + __field(__u8, sb_flags) __field(int, sb_status) - __dynamic_array(unsigned char, res_name, - lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) + __dynamic_array(unsigned char, res_name, res_length) ), TP_fast_assign( - struct dlm_rsb *r; - - __entry->ls_id = ls->ls_global_id; - __entry->lkb_id = lkb->lkb_id; - __entry->sb_flags = lkb->lkb_lksb->sb_flags; - __entry->sb_status = lkb->lkb_lksb->sb_status; + __entry->ls_id = ls_id; + __entry->lkb_id = lkb_id; + __entry->sb_flags = sb_flags; + __entry->sb_status = sb_status; - r = lkb->lkb_resource; - if (r) - memcpy(__get_dynamic_array(res_name), r->res_name, - __get_dynamic_array_len(res_name)); + memcpy(__get_dynamic_array(res_name), res_name, + __get_dynamic_array_len(res_name)); ), TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d res_name=%s", -- cgit v1.2.3 From 16e98462b764002b0f747dc93e5d03bd65d0b019 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Thu, 28 Mar 2024 11:48:38 -0400 Subject: dlm: remove callback queue debugfs functionality Remove the ability to dump pending lkb callbacks from debugfs. The prepares for separating lkb structs from callbacks. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/debug_fs.c | 96 ------------------------------------------------------- 1 file changed, 96 deletions(-) (limited to 'fs') diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 4fa11d9ddbb6..289d959c7700 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -366,52 +366,6 @@ static void print_format4(struct dlm_rsb *r, struct seq_file *s) unlock_rsb(r); } -static void print_format5_lock(struct seq_file *s, struct dlm_lkb *lkb) -{ - struct dlm_callback *cb; - - /* lkb_id lkb_flags mode flags sb_status sb_flags */ - - spin_lock(&lkb->lkb_cb_lock); - list_for_each_entry(cb, &lkb->lkb_callbacks, list) { - seq_printf(s, "%x %x %d %x %d %x\n", - lkb->lkb_id, - dlm_iflags_val(lkb), - cb->mode, - cb->flags, - cb->sb_status, - cb->sb_flags); - } - spin_unlock(&lkb->lkb_cb_lock); -} - -static void print_format5(struct dlm_rsb *r, struct seq_file *s) -{ - struct dlm_lkb *lkb; - - lock_rsb(r); - - list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { - print_format5_lock(s, lkb); - if (seq_has_overflowed(s)) - goto out; - } - - list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { - print_format5_lock(s, lkb); - if (seq_has_overflowed(s)) - goto out; - } - - list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) { - print_format5_lock(s, lkb); - if (seq_has_overflowed(s)) - goto out; - } - out: - unlock_rsb(r); -} - struct rsbtbl_iter { struct dlm_rsb *rsb; unsigned bucket; @@ -455,13 +409,6 @@ static int table_seq_show(struct seq_file *seq, void *iter_ptr) } print_format4(ri->rsb, seq); break; - case 5: - if (ri->header) { - seq_puts(seq, "lkb_id lkb_flags mode flags sb_status sb_flags\n"); - ri->header = 0; - } - print_format5(ri->rsb, seq); - break; } return 0; @@ -471,7 +418,6 @@ static const struct seq_operations format1_seq_ops; static const struct seq_operations format2_seq_ops; static const struct seq_operations format3_seq_ops; static const struct seq_operations format4_seq_ops; -static const struct seq_operations format5_seq_ops; static void *table_seq_start(struct seq_file *seq, loff_t *pos) { @@ -503,8 +449,6 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) ri->format = 3; if (seq->op == &format4_seq_ops) ri->format = 4; - if (seq->op == &format5_seq_ops) - ri->format = 5; tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; @@ -659,18 +603,10 @@ static const struct seq_operations format4_seq_ops = { .show = table_seq_show, }; -static const struct seq_operations format5_seq_ops = { - .start = table_seq_start, - .next = table_seq_next, - .stop = table_seq_stop, - .show = table_seq_show, -}; - static const struct file_operations format1_fops; static const struct file_operations format2_fops; static const struct file_operations format3_fops; static const struct file_operations format4_fops; -static const struct file_operations format5_fops; static int table_open1(struct inode *inode, struct file *file) { @@ -757,20 +693,6 @@ static int table_open4(struct inode *inode, struct file *file) return 0; } -static int table_open5(struct inode *inode, struct file *file) -{ - struct seq_file *seq; - int ret; - - ret = seq_open(file, &format5_seq_ops); - if (ret) - return ret; - - seq = file->private_data; - seq->private = inode->i_private; /* the dlm_ls */ - return 0; -} - static const struct file_operations format1_fops = { .owner = THIS_MODULE, .open = table_open1, @@ -804,14 +726,6 @@ static const struct file_operations format4_fops = { .release = seq_release }; -static const struct file_operations format5_fops = { - .owner = THIS_MODULE, - .open = table_open5, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release -}; - /* * dump lkb's on the ls_waiters list */ @@ -1021,16 +935,6 @@ void dlm_create_debug_file(struct dlm_ls *ls) dlm_root, ls, &waiters_fops); - - /* format 5 */ - - snprintf(name, sizeof(name), "%s_queued_asts", ls->ls_name); - - ls->ls_debug_queued_asts_dentry = debugfs_create_file(name, - 0644, - dlm_root, - ls, - &format5_fops); } void __init dlm_register_debugfs(void) -- cgit v1.2.3 From 4ed424280942f40b033d669eb9204a52e6d41639 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Thu, 28 Mar 2024 11:48:39 -0400 Subject: dlm: save callback debug info earlier Save lkb callback info when queueing the callback so that the lkb struct is not needed in the callback workqueue processing. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/ast.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index dd7cca3c1472..cadbcbe0786b 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -105,8 +105,13 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, list_add_tail(&cb->list, &lkb->lkb_callbacks); - if (flags & DLM_CB_CAST) + if (flags & DLM_CB_BAST) { + lkb->lkb_last_bast_time = ktime_get(); + lkb->lkb_last_bast_mode = cb->mode; + } else if (flags & DLM_CB_CAST) { dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb); + lkb->lkb_last_cast_time = ktime_get(); + } dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb); @@ -194,8 +199,6 @@ void dlm_callback_work(struct work_struct *work) trace_dlm_bast(ls->ls_global_id, lkb->lkb_id, cb->mode, rsb->res_name, rsb->res_length); - lkb->lkb_last_bast_time = ktime_get(); - lkb->lkb_last_bast_mode = cb->mode; bastfn(lkb->lkb_astparam, cb->mode); } else if (cb->flags & DLM_CB_CAST) { lkb->lkb_lksb->sb_status = cb->sb_status; @@ -203,7 +206,6 @@ void dlm_callback_work(struct work_struct *work) trace_dlm_ast(ls->ls_global_id, lkb->lkb_id, cb->sb_flags, cb->sb_status, rsb->res_name, rsb->res_length); - lkb->lkb_last_cast_time = ktime_get(); castfn(lkb->lkb_astparam); } -- cgit v1.2.3 From 0175e51b5134b55c89364aae68ec16271c67e472 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Thu, 28 Mar 2024 11:48:40 -0400 Subject: dlm: combine switch case fail and default statements This patch combines the failure and default cases for enqueue and dequeue a callback to the lkb callback queue that should end in both cases as it should never happen. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/ast.c | 5 ++--- fs/dlm/user.c | 9 ++++----- 2 files changed, 6 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index cadbcbe0786b..5ea0b62f276b 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -160,11 +160,10 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, } spin_unlock(&ls->ls_cb_lock); break; - case DLM_ENQUEUE_CALLBACK_FAILURE: - WARN_ON_ONCE(1); - break; case DLM_ENQUEUE_CALLBACK_SUCCESS: break; + case DLM_ENQUEUE_CALLBACK_FAILURE: + fallthrough; default: WARN_ON_ONCE(1); break; diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 6f99bbeeac9b..fa99b6074e5c 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -231,10 +231,6 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags); switch (rv) { - case DLM_ENQUEUE_CALLBACK_FAILURE: - spin_unlock(&proc->asts_spin); - WARN_ON_ONCE(1); - goto out; case DLM_ENQUEUE_CALLBACK_NEED_SCHED: kref_get(&lkb->lkb_ref); list_add_tail(&lkb->lkb_cb_list, &proc->asts); @@ -242,9 +238,12 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, break; case DLM_ENQUEUE_CALLBACK_SUCCESS: break; + case DLM_ENQUEUE_CALLBACK_FAILURE: + fallthrough; default: + spin_unlock(&proc->asts_spin); WARN_ON_ONCE(1); - break; + goto out; } spin_unlock(&proc->asts_spin); -- cgit v1.2.3 From 986ae3c2a8dfc1e229cabe9cc2e0b01b721c8980 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Thu, 28 Mar 2024 11:48:41 -0400 Subject: dlm: fix race between final callback and remove This patch fixes the following issue: node 1 is dir node 2 is master node 3 is other 1->2: unlock 2: put final lkb, rsb moved to toss 2->1: unlock_reply 1: queue lkb callback with EUNLOCK 2->1: remove 1: receive_remove ignored (rsb on keep because of queued lkb callback) 1: complete lkb callback, put_lkb, move rsb to toss 3->1: lookup 1->3: lookup_reply master=2 3->2: request 2->3: request_reply EBADR In summary: An unexpected lkb reference causes the rsb to remain on the wrong list. The rsb being on the wrong list causes receive_remove to be ignored. An ignored receive_remove causes inconsistent dir and master state. This sequence requires an unusually long delay in delivering the unlock callback, because the remove message from 2->1 usually happens after some seconds. So, it's not known exactly how frequently this sequence occurs in pratice. It's possible that the same end result could also have another unknown cause. The solution for this issue is to further separate callback state from the lkb, so that an lkb reference (and from that, an rsb ref) are not held while a callback remains queued. Then, within the unlock_reply, the lkb will be freed and the rsb moved to the toss list. So, the receive_remove will not be ignored. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/ast.c | 166 +++++++++++++++++++------------------------------- fs/dlm/ast.h | 10 +-- fs/dlm/dlm_internal.h | 60 ++++++++++-------- fs/dlm/lock.c | 20 +++--- fs/dlm/user.c | 86 +++++++------------------- 5 files changed, 129 insertions(+), 213 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 5ea0b62f276b..e9da812352b4 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -37,12 +37,32 @@ void dlm_callback_set_last_ptr(struct dlm_callback **from, *from = to; } -int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, - int status, uint32_t sbflags) +static void dlm_callback_work(struct work_struct *work) { - struct dlm_ls *ls = lkb->lkb_resource->res_ls; + struct dlm_callback *cb = container_of(work, struct dlm_callback, work); + + if (cb->flags & DLM_CB_BAST) { + trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name, + cb->res_length); + cb->bastfn(cb->astparam, cb->mode); + } else if (cb->flags & DLM_CB_CAST) { + trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status, + cb->sb_flags, cb->res_name, cb->res_length); + cb->lkb_lksb->sb_status = cb->sb_status; + cb->lkb_lksb->sb_flags = cb->sb_flags; + cb->astfn(cb->astparam); + } + + kref_put(&cb->ref, dlm_release_callback); +} + +int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, + int status, uint32_t sbflags, + struct dlm_callback **cb) +{ + struct dlm_rsb *rsb = lkb->lkb_resource; int rv = DLM_ENQUEUE_CALLBACK_SUCCESS; - struct dlm_callback *cb; + struct dlm_ls *ls = rsb->res_ls; int copy_lvb = 0; int prev_mode; @@ -88,57 +108,46 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, } } - cb = dlm_allocate_cb(); - if (!cb) { + *cb = dlm_allocate_cb(); + if (!*cb) { rv = DLM_ENQUEUE_CALLBACK_FAILURE; goto out; } - cb->flags = flags; - cb->mode = mode; - cb->sb_status = status; - cb->sb_flags = (sbflags & 0x000000FF); - cb->copy_lvb = copy_lvb; - kref_init(&cb->ref); - if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags)) - rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED; + /* for tracing */ + (*cb)->lkb_id = lkb->lkb_id; + (*cb)->ls_id = ls->ls_global_id; + memcpy((*cb)->res_name, rsb->res_name, rsb->res_length); + (*cb)->res_length = rsb->res_length; - list_add_tail(&cb->list, &lkb->lkb_callbacks); + (*cb)->flags = flags; + (*cb)->mode = mode; + (*cb)->sb_status = status; + (*cb)->sb_flags = (sbflags & 0x000000FF); + (*cb)->copy_lvb = copy_lvb; + (*cb)->lkb_lksb = lkb->lkb_lksb; + kref_init(&(*cb)->ref); if (flags & DLM_CB_BAST) { lkb->lkb_last_bast_time = ktime_get(); - lkb->lkb_last_bast_mode = cb->mode; + lkb->lkb_last_bast_mode = mode; } else if (flags & DLM_CB_CAST) { - dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb); + dlm_callback_set_last_ptr(&lkb->lkb_last_cast, *cb); lkb->lkb_last_cast_time = ktime_get(); } - dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb); + dlm_callback_set_last_ptr(&lkb->lkb_last_cb, *cb); + rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED; - out: +out: return rv; } -int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb) -{ - /* oldest undelivered cb is callbacks first entry */ - *cb = list_first_entry_or_null(&lkb->lkb_callbacks, - struct dlm_callback, list); - if (!*cb) - return DLM_DEQUEUE_CALLBACK_EMPTY; - - /* remove it from callbacks so shift others down */ - list_del(&(*cb)->list); - if (list_empty(&lkb->lkb_callbacks)) - return DLM_DEQUEUE_CALLBACK_LAST; - - return DLM_DEQUEUE_CALLBACK_SUCCESS; -} - void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, - uint32_t sbflags) + uint32_t sbflags) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; + struct dlm_callback *cb; int rv; if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { @@ -146,18 +155,20 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, return; } - spin_lock(&lkb->lkb_cb_lock); - rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags); + rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags, + &cb); switch (rv) { case DLM_ENQUEUE_CALLBACK_NEED_SCHED: - kref_get(&lkb->lkb_ref); + cb->astfn = lkb->lkb_astfn; + cb->bastfn = lkb->lkb_bastfn; + cb->astparam = lkb->lkb_astparam; + INIT_WORK(&cb->work, dlm_callback_work); spin_lock(&ls->ls_cb_lock); - if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { - list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); - } else { - queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); - } + if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) + list_add(&cb->list, &ls->ls_cb_delay); + else + queue_work(ls->ls_callback_wq, &cb->work); spin_unlock(&ls->ls_cb_lock); break; case DLM_ENQUEUE_CALLBACK_SUCCESS: @@ -168,67 +179,12 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, WARN_ON_ONCE(1); break; } - spin_unlock(&lkb->lkb_cb_lock); -} - -void dlm_callback_work(struct work_struct *work) -{ - struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work); - struct dlm_ls *ls = lkb->lkb_resource->res_ls; - struct dlm_rsb *rsb = lkb->lkb_resource; - void (*castfn) (void *astparam); - void (*bastfn) (void *astparam, int mode); - struct dlm_callback *cb; - int rv; - - spin_lock(&lkb->lkb_cb_lock); - rv = dlm_dequeue_lkb_callback(lkb, &cb); - if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY)) { - clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); - spin_unlock(&lkb->lkb_cb_lock); - goto out; - } - spin_unlock(&lkb->lkb_cb_lock); - - for (;;) { - castfn = lkb->lkb_astfn; - bastfn = lkb->lkb_bastfn; - - if (cb->flags & DLM_CB_BAST) { - trace_dlm_bast(ls->ls_global_id, lkb->lkb_id, - cb->mode, rsb->res_name, - rsb->res_length); - bastfn(lkb->lkb_astparam, cb->mode); - } else if (cb->flags & DLM_CB_CAST) { - lkb->lkb_lksb->sb_status = cb->sb_status; - lkb->lkb_lksb->sb_flags = cb->sb_flags; - trace_dlm_ast(ls->ls_global_id, lkb->lkb_id, - cb->sb_flags, cb->sb_status, - rsb->res_name, rsb->res_length); - castfn(lkb->lkb_astparam); - } - - kref_put(&cb->ref, dlm_release_callback); - - spin_lock(&lkb->lkb_cb_lock); - rv = dlm_dequeue_lkb_callback(lkb, &cb); - if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) { - clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); - spin_unlock(&lkb->lkb_cb_lock); - break; - } - spin_unlock(&lkb->lkb_cb_lock); - } - -out: - /* undo kref_get from dlm_add_callback, may cause lkb to be freed */ - dlm_put_lkb(lkb); } int dlm_callback_start(struct dlm_ls *ls) { - ls->ls_callback_wq = alloc_workqueue("dlm_callback", - WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); + ls->ls_callback_wq = alloc_ordered_workqueue("dlm_callback", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!ls->ls_callback_wq) { log_print("can't start dlm_callback workqueue"); return -ENOMEM; @@ -257,7 +213,7 @@ void dlm_callback_suspend(struct dlm_ls *ls) void dlm_callback_resume(struct dlm_ls *ls) { - struct dlm_lkb *lkb, *safe; + struct dlm_callback *cb, *safe; int count = 0, sum = 0; bool empty; @@ -266,9 +222,9 @@ void dlm_callback_resume(struct dlm_ls *ls) more: spin_lock(&ls->ls_cb_lock); - list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { - list_del_init(&lkb->lkb_cb_list); - queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); + list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) { + list_del(&cb->list); + queue_work(ls->ls_callback_wq, &cb->work); count++; if (count == MAX_CB_QUEUE) break; diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h index ce007892dc2d..9bd12409e1ee 100644 --- a/fs/dlm/ast.h +++ b/fs/dlm/ast.h @@ -14,19 +14,15 @@ #define DLM_ENQUEUE_CALLBACK_NEED_SCHED 1 #define DLM_ENQUEUE_CALLBACK_SUCCESS 0 #define DLM_ENQUEUE_CALLBACK_FAILURE -1 -int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, - int status, uint32_t sbflags); -#define DLM_DEQUEUE_CALLBACK_EMPTY 2 -#define DLM_DEQUEUE_CALLBACK_LAST 1 -#define DLM_DEQUEUE_CALLBACK_SUCCESS 0 -int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb); +int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, + int status, uint32_t sbflags, + struct dlm_callback **cb); void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, uint32_t sbflags); void dlm_callback_set_last_ptr(struct dlm_callback **from, struct dlm_callback *to); void dlm_release_callback(struct kref *ref); -void dlm_callback_work(struct work_struct *work); int dlm_callback_start(struct dlm_ls *ls); void dlm_callback_stop(struct dlm_ls *ls); void dlm_callback_suspend(struct dlm_ls *ls); diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index a9137c90f348..cda1526fd15b 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -16,6 +16,7 @@ * This is the main header file to be included in each DLM source file. */ +#include #include #include #include @@ -204,8 +205,7 @@ struct dlm_args { #define DLM_IFL_OVERLAP_CANCEL_BIT 20 #define DLM_IFL_ENDOFLIFE_BIT 21 #define DLM_IFL_DEADLOCK_CANCEL_BIT 24 -#define DLM_IFL_CB_PENDING_BIT 25 -#define __DLM_IFL_MAX_BIT DLM_IFL_CB_PENDING_BIT +#define __DLM_IFL_MAX_BIT DLM_IFL_DEADLOCK_CANCEL_BIT /* lkb_dflags */ @@ -217,12 +217,45 @@ struct dlm_args { #define DLM_CB_CAST 0x00000001 #define DLM_CB_BAST 0x00000002 +/* much of this is just saving user space pointers associated with the + * lock that we pass back to the user lib with an ast + */ + +struct dlm_user_args { + struct dlm_user_proc *proc; /* each process that opens the lockspace + * device has private data + * (dlm_user_proc) on the struct file, + * the process's locks point back to it + */ + struct dlm_lksb lksb; + struct dlm_lksb __user *user_lksb; + void __user *castparam; + void __user *castaddr; + void __user *bastparam; + void __user *bastaddr; + uint64_t xid; +}; + struct dlm_callback { uint32_t flags; /* DLM_CBF_ */ int sb_status; /* copy to lksb status */ uint8_t sb_flags; /* copy to lksb flags */ int8_t mode; /* rq mode of bast, gr mode of cast */ - int copy_lvb; + bool copy_lvb; + struct dlm_lksb *lkb_lksb; + unsigned char lvbptr[DLM_USER_LVB_LEN]; + + union { + void *astparam; /* caller's ast arg */ + struct dlm_user_args ua; + }; + struct work_struct work; + void (*bastfn)(void *astparam, int mode); + void (*astfn)(void *astparam); + char res_name[DLM_RESNAME_MAXLEN]; + size_t res_length; + uint32_t ls_id; + uint32_t lkb_id; struct list_head list; struct kref ref; @@ -256,10 +289,6 @@ struct dlm_lkb { struct list_head lkb_ownqueue; /* list of locks for a process */ ktime_t lkb_timestamp; - spinlock_t lkb_cb_lock; - struct work_struct lkb_cb_work; - struct list_head lkb_cb_list; /* for ls_cb_delay or proc->asts */ - struct list_head lkb_callbacks; struct dlm_callback *lkb_last_cast; struct dlm_callback *lkb_last_cb; int lkb_last_bast_mode; @@ -688,23 +717,6 @@ struct dlm_ls { #define LSFL_CB_DELAY 9 #define LSFL_NODIR 10 -/* much of this is just saving user space pointers associated with the - lock that we pass back to the user lib with an ast */ - -struct dlm_user_args { - struct dlm_user_proc *proc; /* each process that opens the lockspace - device has private data - (dlm_user_proc) on the struct file, - the process's locks point back to it*/ - struct dlm_lksb lksb; - struct dlm_lksb __user *user_lksb; - void __user *castparam; - void __user *castaddr; - void __user *bastparam; - void __user *bastaddr; - uint64_t xid; -}; - #define DLM_PROC_FLAGS_CLOSING 1 #define DLM_PROC_FLAGS_COMPAT 2 diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index fd752dd03896..198672446dcd 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1203,10 +1203,6 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret, kref_init(&lkb->lkb_ref); INIT_LIST_HEAD(&lkb->lkb_ownqueue); INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); - INIT_LIST_HEAD(&lkb->lkb_cb_list); - INIT_LIST_HEAD(&lkb->lkb_callbacks); - spin_lock_init(&lkb->lkb_cb_lock); - INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work); idr_preload(GFP_NOFS); spin_lock(&ls->ls_lkbidr_spin); @@ -6003,6 +5999,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) { + struct dlm_callback *cb, *cb_safe; struct dlm_lkb *lkb, *safe; dlm_lock_recovery(ls); @@ -6032,10 +6029,9 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) dlm_put_lkb(lkb); } - list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { - dlm_purge_lkb_callbacks(lkb); - list_del_init(&lkb->lkb_cb_list); - dlm_put_lkb(lkb); + list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { + list_del(&cb->list); + kref_put(&cb->ref, dlm_release_callback); } spin_unlock(&ls->ls_clear_proc_locks); @@ -6044,6 +6040,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) { + struct dlm_callback *cb, *cb_safe; struct dlm_lkb *lkb, *safe; while (1) { @@ -6073,10 +6070,9 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) spin_unlock(&proc->locks_spin); spin_lock(&proc->asts_spin); - list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { - dlm_purge_lkb_callbacks(lkb); - list_del_init(&lkb->lkb_cb_list); - dlm_put_lkb(lkb); + list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { + list_del(&cb->list); + kref_put(&cb->ref, dlm_release_callback); } spin_unlock(&proc->asts_spin); } diff --git a/fs/dlm/user.c b/fs/dlm/user.c index fa99b6074e5c..334a6d64d413 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -21,6 +21,7 @@ #include "dlm_internal.h" #include "lockspace.h" #include "lock.h" +#include "lvb_table.h" #include "user.h" #include "ast.h" #include "config.h" @@ -144,24 +145,6 @@ static void compat_output(struct dlm_lock_result *res, } #endif -/* should held proc->asts_spin lock */ -void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb) -{ - struct dlm_callback *cb, *safe; - - list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) { - list_del(&cb->list); - kref_put(&cb->ref, dlm_release_callback); - } - - clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); - - /* invalidate */ - dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL); - dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL); - lkb->lkb_last_bast_mode = -1; -} - /* Figure out if this lock is at the end of its life and no longer available for the application to use. The lkb still exists until the final ast is read. A lock becomes EOL in three situations: @@ -198,6 +181,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, struct dlm_ls *ls; struct dlm_user_args *ua; struct dlm_user_proc *proc; + struct dlm_callback *cb; int rv; if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) || @@ -229,11 +213,18 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, spin_lock(&proc->asts_spin); - rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags); + rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags, &cb); switch (rv) { case DLM_ENQUEUE_CALLBACK_NEED_SCHED: - kref_get(&lkb->lkb_ref); - list_add_tail(&lkb->lkb_cb_list, &proc->asts); + cb->ua = *ua; + cb->lkb_lksb = &cb->ua.lksb; + if (cb->copy_lvb) { + memcpy(cb->lvbptr, ua->lksb.sb_lvbptr, + DLM_USER_LVB_LEN); + cb->lkb_lksb->sb_lvbptr = cb->lvbptr; + } + + list_add_tail(&cb->list, &proc->asts); wake_up_interruptible(&proc->wait); break; case DLM_ENQUEUE_CALLBACK_SUCCESS: @@ -801,10 +792,8 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct dlm_user_proc *proc = file->private_data; - struct dlm_lkb *lkb; DECLARE_WAITQUEUE(wait, current); struct dlm_callback *cb; - struct dlm_rsb *rsb; int rv, ret; if (count == sizeof(struct dlm_device_version)) { @@ -824,8 +813,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, #endif return -EINVAL; - try_another: - /* do we really need this? can a read happen after a close? */ if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)) return -EINVAL; @@ -860,55 +847,24 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, without removing lkb_cb_list; so empty lkb_cb_list is always consistent with empty lkb_callbacks */ - lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list); - - rv = dlm_dequeue_lkb_callback(lkb, &cb); - switch (rv) { - case DLM_DEQUEUE_CALLBACK_EMPTY: - /* this shouldn't happen; lkb should have been removed from - * list when last item was dequeued - */ - log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id); - list_del_init(&lkb->lkb_cb_list); - spin_unlock(&proc->asts_spin); - /* removes ref for proc->asts, may cause lkb to be freed */ - dlm_put_lkb(lkb); - WARN_ON_ONCE(1); - goto try_another; - case DLM_DEQUEUE_CALLBACK_LAST: - list_del_init(&lkb->lkb_cb_list); - clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); - break; - case DLM_DEQUEUE_CALLBACK_SUCCESS: - break; - default: - WARN_ON_ONCE(1); - break; - } + cb = list_first_entry(&proc->asts, struct dlm_callback, list); + list_del(&cb->list); spin_unlock(&proc->asts_spin); - rsb = lkb->lkb_resource; if (cb->flags & DLM_CB_BAST) { - trace_dlm_bast(rsb->res_ls->ls_global_id, lkb->lkb_id, - cb->mode, rsb->res_name, rsb->res_length); + trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name, + cb->res_length); } else if (cb->flags & DLM_CB_CAST) { - lkb->lkb_lksb->sb_status = cb->sb_status; - lkb->lkb_lksb->sb_flags = cb->sb_flags; - trace_dlm_ast(rsb->res_ls->ls_global_id, lkb->lkb_id, - cb->sb_flags, cb->sb_status, rsb->res_name, - rsb->res_length); + cb->lkb_lksb->sb_status = cb->sb_status; + cb->lkb_lksb->sb_flags = cb->sb_flags; + trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status, + cb->sb_flags, cb->res_name, cb->res_length); } - ret = copy_result_to_user(lkb->lkb_ua, + ret = copy_result_to_user(&cb->ua, test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), cb->flags, cb->mode, cb->copy_lvb, buf, count); - kref_put(&cb->ref, dlm_release_callback); - - /* removes ref for proc->asts, may cause lkb to be freed */ - if (rv == DLM_DEQUEUE_CALLBACK_LAST) - dlm_put_lkb(lkb); - return ret; } -- cgit v1.2.3 From 2bec1bbd55cf96b313566d2e1b9ff54451685e18 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Thu, 28 Mar 2024 11:48:42 -0400 Subject: dlm: remove callback reference counting Get rid of the unnecessary refcounting on callback structs. Copy interesting callback info into the lkb struct rather than maintaining pointers to callback structs from the lkb. This goes back to the way things were done prior to commit 61bed0baa4db ("fs: dlm: use a non-static queue for callbacks"). Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/ast.c | 56 ++++++++++++++++----------------------------------- fs/dlm/ast.h | 3 --- fs/dlm/debug_fs.c | 2 +- fs/dlm/dlm_internal.h | 8 ++++---- fs/dlm/lock.c | 8 +++++--- fs/dlm/memory.c | 4 ---- fs/dlm/user.c | 2 +- 7 files changed, 28 insertions(+), 55 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index e9da812352b4..03879c94fadb 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -18,25 +18,6 @@ #include "user.h" #include "ast.h" -void dlm_release_callback(struct kref *ref) -{ - struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref); - - dlm_free_cb(cb); -} - -void dlm_callback_set_last_ptr(struct dlm_callback **from, - struct dlm_callback *to) -{ - if (*from) - kref_put(&(*from)->ref, dlm_release_callback); - - if (to) - kref_get(&to->ref); - - *from = to; -} - static void dlm_callback_work(struct work_struct *work) { struct dlm_callback *cb = container_of(work, struct dlm_callback, work); @@ -53,7 +34,7 @@ static void dlm_callback_work(struct work_struct *work) cb->astfn(cb->astparam); } - kref_put(&cb->ref, dlm_release_callback); + dlm_free_cb(cb); } int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, @@ -70,11 +51,11 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, /* if cb is a bast, it should be skipped if the blocking mode is * compatible with the last granted mode */ - if (lkb->lkb_last_cast) { - if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) { + if (lkb->lkb_last_cast_cb_mode != -1) { + if (dlm_modes_compat(mode, lkb->lkb_last_cast_cb_mode)) { log_debug(ls, "skip %x bast mode %d for cast mode %d", lkb->lkb_id, mode, - lkb->lkb_last_cast->mode); + lkb->lkb_last_cast_cb_mode); goto out; } } @@ -85,8 +66,9 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, * is a bast for the same mode or a more restrictive mode. * (the addional > PR check is needed for PR/CW inversion) */ - if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) { - prev_mode = lkb->lkb_last_cb->mode; + if (lkb->lkb_last_cb_mode != -1 && + lkb->lkb_last_cb_flags & DLM_CB_BAST) { + prev_mode = lkb->lkb_last_cb_mode; if ((prev_mode == mode) || (prev_mode > mode && prev_mode > DLM_LOCK_PR)) { @@ -95,19 +77,25 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, goto out; } } + + lkb->lkb_last_bast_time = ktime_get(); + lkb->lkb_last_bast_cb_mode = mode; } else if (flags & DLM_CB_CAST) { if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { - if (lkb->lkb_last_cast) - prev_mode = lkb->lkb_last_cb->mode; - else - prev_mode = -1; + prev_mode = lkb->lkb_last_cast_cb_mode; if (!status && lkb->lkb_lksb->sb_lvbptr && dlm_lvb_operations[prev_mode + 1][mode + 1]) copy_lvb = 1; } + + lkb->lkb_last_cast_cb_mode = mode; + lkb->lkb_last_cast_time = ktime_get(); } + lkb->lkb_last_cb_mode = mode; + lkb->lkb_last_cb_flags = flags; + *cb = dlm_allocate_cb(); if (!*cb) { rv = DLM_ENQUEUE_CALLBACK_FAILURE; @@ -126,17 +114,7 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, (*cb)->sb_flags = (sbflags & 0x000000FF); (*cb)->copy_lvb = copy_lvb; (*cb)->lkb_lksb = lkb->lkb_lksb; - kref_init(&(*cb)->ref); - - if (flags & DLM_CB_BAST) { - lkb->lkb_last_bast_time = ktime_get(); - lkb->lkb_last_bast_mode = mode; - } else if (flags & DLM_CB_CAST) { - dlm_callback_set_last_ptr(&lkb->lkb_last_cast, *cb); - lkb->lkb_last_cast_time = ktime_get(); - } - dlm_callback_set_last_ptr(&lkb->lkb_last_cb, *cb); rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED; out: diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h index 9bd12409e1ee..9093ff043bee 100644 --- a/fs/dlm/ast.h +++ b/fs/dlm/ast.h @@ -19,10 +19,7 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, struct dlm_callback **cb); void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, uint32_t sbflags); -void dlm_callback_set_last_ptr(struct dlm_callback **from, - struct dlm_callback *to); -void dlm_release_callback(struct kref *ref); int dlm_callback_start(struct dlm_ls *ls); void dlm_callback_stop(struct dlm_ls *ls); void dlm_callback_suspend(struct dlm_ls *ls); diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 289d959c7700..19cdedd56629 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -247,7 +247,7 @@ static void print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb, lkb->lkb_status, lkb->lkb_grmode, lkb->lkb_rqmode, - lkb->lkb_last_bast_mode, + lkb->lkb_last_bast_cb_mode, rsb_lookup, lkb->lkb_wait_type, lkb->lkb_lvbseq, diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index cda1526fd15b..1d2ee5c2d23d 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -258,7 +258,6 @@ struct dlm_callback { uint32_t lkb_id; struct list_head list; - struct kref ref; }; struct dlm_lkb { @@ -289,9 +288,10 @@ struct dlm_lkb { struct list_head lkb_ownqueue; /* list of locks for a process */ ktime_t lkb_timestamp; - struct dlm_callback *lkb_last_cast; - struct dlm_callback *lkb_last_cb; - int lkb_last_bast_mode; + int8_t lkb_last_cast_cb_mode; + int8_t lkb_last_bast_cb_mode; + int8_t lkb_last_cb_mode; + uint8_t lkb_last_cb_flags; ktime_t lkb_last_cast_time; /* for debugging */ ktime_t lkb_last_bast_time; /* for debugging */ diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 198672446dcd..c8426f6f518c 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1197,7 +1197,9 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret, if (!lkb) return -ENOMEM; - lkb->lkb_last_bast_mode = -1; + lkb->lkb_last_bast_cb_mode = DLM_LOCK_IV; + lkb->lkb_last_cast_cb_mode = DLM_LOCK_IV; + lkb->lkb_last_cb_mode = DLM_LOCK_IV; lkb->lkb_nodeid = -1; lkb->lkb_grmode = DLM_LOCK_IV; kref_init(&lkb->lkb_ref); @@ -6031,7 +6033,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { list_del(&cb->list); - kref_put(&cb->ref, dlm_release_callback); + dlm_free_cb(cb); } spin_unlock(&ls->ls_clear_proc_locks); @@ -6072,7 +6074,7 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) spin_lock(&proc->asts_spin); list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { list_del(&cb->list); - kref_put(&cb->ref, dlm_release_callback); + dlm_free_cb(cb); } spin_unlock(&proc->asts_spin); } diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index 64f212a066cf..be9398ddf357 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c @@ -127,10 +127,6 @@ void dlm_free_lkb(struct dlm_lkb *lkb) } } - /* drop references if they are set */ - dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL); - dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL); - kmem_cache_free(lkb_cache, lkb); } diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 334a6d64d413..b4971ba4bdd6 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -864,7 +864,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, ret = copy_result_to_user(&cb->ua, test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), cb->flags, cb->mode, cb->copy_lvb, buf, count); - kref_put(&cb->ref, dlm_release_callback); + dlm_free_cb(cb); return ret; } -- cgit v1.2.3 From ca0dcef7cf6c2f746403eac4ac427bd2115e07a8 Mon Sep 17 00:00:00 2001 From: Kunwu Chan Date: Tue, 2 Apr 2024 15:17:56 -0400 Subject: dlm: Simplify the allocation of slab caches in dlm_lowcomms_msg_cache_create Use the new KMEM_CACHE() macro instead of direct kmem_cache_create to simplify the creation of SLAB caches. Signed-off-by: Kunwu Chan Acked-by: Alexander Aring Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 6296c62c10fa..712165a1e567 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -248,7 +248,7 @@ struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void) struct kmem_cache *dlm_lowcomms_msg_cache_create(void) { - return kmem_cache_create("dlm_msg", sizeof(struct dlm_msg), 0, 0, NULL); + return KMEM_CACHE(dlm_msg, 0); } /* need to held writequeue_lock */ -- cgit v1.2.3 From 98808644b920ed7bb33fe7b33d8f09d4e392e6c2 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:17:57 -0400 Subject: dlm: remove allocation parameter in msg allocation Remove the context parameter for message allocations and always use GFP_ATOMIC. This prepares for softirq message processing. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/lock.c | 31 ++++++++++++------------------- fs/dlm/lowcomms.c | 16 +++++++--------- fs/dlm/lowcomms.h | 5 ++--- fs/dlm/memory.c | 8 ++++---- fs/dlm/memory.h | 4 ++-- fs/dlm/midcomms.c | 24 ++++++++++-------------- fs/dlm/midcomms.h | 3 +-- fs/dlm/rcom.c | 7 +++---- 8 files changed, 41 insertions(+), 57 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index c8426f6f518c..2f94ffc3cf82 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -3330,8 +3330,7 @@ int dlm_unlock(dlm_lockspace_t *lockspace, static int _create_message(struct dlm_ls *ls, int mb_len, int to_nodeid, int mstype, struct dlm_message **ms_ret, - struct dlm_mhandle **mh_ret, - gfp_t allocation) + struct dlm_mhandle **mh_ret) { struct dlm_message *ms; struct dlm_mhandle *mh; @@ -3341,7 +3340,7 @@ static int _create_message(struct dlm_ls *ls, int mb_len, pass into midcomms_commit and a message buffer (mb) that we write our data into */ - mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, allocation, &mb); + mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, &mb); if (!mh) return -ENOBUFS; @@ -3363,8 +3362,7 @@ static int _create_message(struct dlm_ls *ls, int mb_len, static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb, int to_nodeid, int mstype, struct dlm_message **ms_ret, - struct dlm_mhandle **mh_ret, - gfp_t allocation) + struct dlm_mhandle **mh_ret) { int mb_len = sizeof(struct dlm_message); @@ -3385,7 +3383,7 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb, } return _create_message(r->res_ls, mb_len, to_nodeid, mstype, - ms_ret, mh_ret, allocation); + ms_ret, mh_ret); } /* further lowcomms enhancements or alternate implementations may make @@ -3454,7 +3452,7 @@ static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype) if (error) return error; - error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS); + error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh); if (error) goto fail; @@ -3514,8 +3512,7 @@ static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb) to_nodeid = lkb->lkb_nodeid; - error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh, - GFP_NOFS); + error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh); if (error) goto out; @@ -3536,8 +3533,7 @@ static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode) to_nodeid = lkb->lkb_nodeid; - error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh, - GFP_NOFS); + error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh); if (error) goto out; @@ -3562,8 +3558,7 @@ static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb) if (error) return error; - error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh, - GFP_NOFS); + error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh); if (error) goto fail; @@ -3587,8 +3582,7 @@ static int send_remove(struct dlm_rsb *r) to_nodeid = dlm_dir_nodeid(r); - error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh, - GFP_ATOMIC); + error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh); if (error) goto out; @@ -3609,7 +3603,7 @@ static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, to_nodeid = lkb->lkb_nodeid; - error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS); + error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh); if (error) goto out; @@ -3651,8 +3645,7 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_mhandle *mh; int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid); - error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh, - GFP_NOFS); + error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh); if (error) goto out; @@ -6103,7 +6096,7 @@ static int send_purge(struct dlm_ls *ls, int nodeid, int pid) int error; error = _create_message(ls, sizeof(struct dlm_message), nodeid, - DLM_MSG_PURGE, &ms, &mh, GFP_NOFS); + DLM_MSG_PURGE, &ms, &mh); if (error) return error; ms->m_nodeid = cpu_to_le32(nodeid); diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 712165a1e567..ab2cfbd2ea77 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1229,14 +1229,13 @@ out: }; static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len, - gfp_t allocation, char **ppc, - void (*cb)(void *data), + char **ppc, void (*cb)(void *data), void *data) { struct writequeue_entry *e; struct dlm_msg *msg; - msg = dlm_allocate_msg(allocation); + msg = dlm_allocate_msg(); if (!msg) return NULL; @@ -1261,9 +1260,8 @@ static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len, * dlm_lowcomms_commit_msg which is a must call if success */ #ifndef __CHECKER__ -struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation, - char **ppc, void (*cb)(void *data), - void *data) +struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, char **ppc, + void (*cb)(void *data), void *data) { struct connection *con; struct dlm_msg *msg; @@ -1284,7 +1282,7 @@ struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation, return NULL; } - msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, data); + msg = dlm_lowcomms_new_msg_con(con, len, ppc, cb, data); if (!msg) { srcu_read_unlock(&connections_srcu, idx); return NULL; @@ -1348,8 +1346,8 @@ int dlm_lowcomms_resend_msg(struct dlm_msg *msg) if (msg->retransmit) return 1; - msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len, - GFP_ATOMIC, &ppc, NULL, NULL); + msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len, &ppc, + NULL, NULL); if (!msg_resend) return -ENOMEM; diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h index 3e8dca66183b..8deb16f8f620 100644 --- a/fs/dlm/lowcomms.h +++ b/fs/dlm/lowcomms.h @@ -39,9 +39,8 @@ void dlm_lowcomms_stop(void); void dlm_lowcomms_init(void); void dlm_lowcomms_exit(void); int dlm_lowcomms_close(int nodeid); -struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation, - char **ppc, void (*cb)(void *data), - void *data); +struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, char **ppc, + void (*cb)(void *data), void *data); void dlm_lowcomms_commit_msg(struct dlm_msg *msg); void dlm_lowcomms_put_msg(struct dlm_msg *msg); int dlm_lowcomms_resend_msg(struct dlm_msg *msg); diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index be9398ddf357..ab663ca66aca 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c @@ -130,9 +130,9 @@ void dlm_free_lkb(struct dlm_lkb *lkb) kmem_cache_free(lkb_cache, lkb); } -struct dlm_mhandle *dlm_allocate_mhandle(gfp_t allocation) +struct dlm_mhandle *dlm_allocate_mhandle(void) { - return kmem_cache_alloc(mhandle_cache, allocation); + return kmem_cache_alloc(mhandle_cache, GFP_ATOMIC); } void dlm_free_mhandle(struct dlm_mhandle *mhandle) @@ -150,9 +150,9 @@ void dlm_free_writequeue(struct writequeue_entry *writequeue) kmem_cache_free(writequeue_cache, writequeue); } -struct dlm_msg *dlm_allocate_msg(gfp_t allocation) +struct dlm_msg *dlm_allocate_msg(void) { - return kmem_cache_alloc(msg_cache, allocation); + return kmem_cache_alloc(msg_cache, GFP_ATOMIC); } void dlm_free_msg(struct dlm_msg *msg) diff --git a/fs/dlm/memory.h b/fs/dlm/memory.h index 6b29563d24f7..15198d46b42a 100644 --- a/fs/dlm/memory.h +++ b/fs/dlm/memory.h @@ -20,11 +20,11 @@ struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls); void dlm_free_lkb(struct dlm_lkb *l); char *dlm_allocate_lvb(struct dlm_ls *ls); void dlm_free_lvb(char *l); -struct dlm_mhandle *dlm_allocate_mhandle(gfp_t allocation); +struct dlm_mhandle *dlm_allocate_mhandle(void); void dlm_free_mhandle(struct dlm_mhandle *mhandle); struct writequeue_entry *dlm_allocate_writequeue(void); void dlm_free_writequeue(struct writequeue_entry *writequeue); -struct dlm_msg *dlm_allocate_msg(gfp_t allocation); +struct dlm_msg *dlm_allocate_msg(void); void dlm_free_msg(struct dlm_msg *msg); struct dlm_callback *dlm_allocate_cb(void); void dlm_free_cb(struct dlm_callback *cb); diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c index 8e9920f1b48b..ed6fb9b9a582 100644 --- a/fs/dlm/midcomms.c +++ b/fs/dlm/midcomms.c @@ -379,8 +379,7 @@ static int dlm_send_ack(int nodeid, uint32_t seq) struct dlm_msg *msg; char *ppc; - msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_ATOMIC, &ppc, - NULL, NULL); + msg = dlm_lowcomms_new_msg(nodeid, mb_len, &ppc, NULL, NULL); if (!msg) return -ENOMEM; @@ -428,7 +427,7 @@ static int dlm_send_fin(struct midcomms_node *node, struct dlm_mhandle *mh; char *ppc; - mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_ATOMIC, &ppc); + mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, &ppc); if (!mh) return -ENOMEM; @@ -976,13 +975,13 @@ static void midcomms_new_msg_cb(void *data) } static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int nodeid, - int len, gfp_t allocation, char **ppc) + int len, char **ppc) { struct dlm_opts *opts; struct dlm_msg *msg; msg = dlm_lowcomms_new_msg(nodeid, len + DLM_MIDCOMMS_OPT_LEN, - allocation, ppc, midcomms_new_msg_cb, mh); + ppc, midcomms_new_msg_cb, mh); if (!msg) return NULL; @@ -1001,8 +1000,7 @@ static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int node * dlm_midcomms_commit_mhandle which is a must call if success */ #ifndef __CHECKER__ -struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, - gfp_t allocation, char **ppc) +struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc) { struct midcomms_node *node; struct dlm_mhandle *mh; @@ -1017,7 +1015,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, /* this is a bug, however we going on and hope it will be resolved */ WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags)); - mh = dlm_allocate_mhandle(allocation); + mh = dlm_allocate_mhandle(); if (!mh) goto err; @@ -1028,8 +1026,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, switch (node->version) { case DLM_VERSION_3_1: - msg = dlm_lowcomms_new_msg(nodeid, len, allocation, ppc, - NULL, NULL); + msg = dlm_lowcomms_new_msg(nodeid, len, ppc, NULL, NULL); if (!msg) { dlm_free_mhandle(mh); goto err; @@ -1040,8 +1037,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, /* send ack back if necessary */ dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD); - msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation, - ppc); + msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, ppc); if (!msg) { dlm_free_mhandle(mh); goto err; @@ -1501,8 +1497,8 @@ int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf, rd.node = node; rd.buf = buf; - msg = dlm_lowcomms_new_msg(node->nodeid, buflen, GFP_NOFS, - &msgbuf, midcomms_new_rawmsg_cb, &rd); + msg = dlm_lowcomms_new_msg(node->nodeid, buflen, &msgbuf, + midcomms_new_rawmsg_cb, &rd); if (!msg) return -ENOMEM; diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h index e7246fb3ef57..278d26fdeb2c 100644 --- a/fs/dlm/midcomms.h +++ b/fs/dlm/midcomms.h @@ -16,8 +16,7 @@ struct midcomms_node; int dlm_validate_incoming_buffer(int nodeid, unsigned char *buf, int len); int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int buflen); -struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, - gfp_t allocation, char **ppc); +struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc); void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, const void *name, int namelen); int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len); diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c index 3b734aed26b5..2e3f529f3ff2 100644 --- a/fs/dlm/rcom.c +++ b/fs/dlm/rcom.c @@ -55,7 +55,7 @@ static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len, struct dlm_mhandle *mh; char *mb; - mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, GFP_NOFS, &mb); + mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, &mb); if (!mh) { log_print("%s to %d type %d len %d ENOBUFS", __func__, to_nodeid, type, len); @@ -75,8 +75,7 @@ static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type, struct dlm_msg *msg; char *mb; - msg = dlm_lowcomms_new_msg(to_nodeid, mb_len, GFP_NOFS, &mb, - NULL, NULL); + msg = dlm_lowcomms_new_msg(to_nodeid, mb_len, &mb, NULL, NULL); if (!msg) { log_print("create_rcom to %d type %d len %d ENOBUFS", to_nodeid, type, len); @@ -510,7 +509,7 @@ int dlm_send_ls_not_ready(int nodeid, const struct dlm_rcom *rc_in) char *mb; int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config); - mh = dlm_midcomms_get_mhandle(nodeid, mb_len, GFP_NOFS, &mb); + mh = dlm_midcomms_get_mhandle(nodeid, mb_len, &mb); if (!mh) return -ENOBUFS; -- cgit v1.2.3 From 455597a55f402e52e1c577c921bf5fe3aa4d2281 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:17:58 -0400 Subject: dlm: switch to GFP_ATOMIC in dlm allocations Replace GFP_NOFS with GFP_ATOMIC. Also stop using idr_preload which uses a non-bh spin_lock. This is further preparation for softirq message processing. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/lock.c | 2 -- fs/dlm/memory.c | 6 +++--- fs/dlm/recover.c | 2 -- fs/dlm/requestqueue.c | 2 +- 4 files changed, 4 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 2f94ffc3cf82..d87464614bc5 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1206,13 +1206,11 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret, INIT_LIST_HEAD(&lkb->lkb_ownqueue); INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); - idr_preload(GFP_NOFS); spin_lock(&ls->ls_lkbidr_spin); rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT); if (rv >= 0) lkb->lkb_id = rv; spin_unlock(&ls->ls_lkbidr_spin); - idr_preload_end(); if (rv < 0) { log_error(ls, "create_lkb idr error %d", rv); diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index ab663ca66aca..15a8b1cee433 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c @@ -84,7 +84,7 @@ char *dlm_allocate_lvb(struct dlm_ls *ls) { char *p; - p = kzalloc(ls->ls_lvblen, GFP_NOFS); + p = kzalloc(ls->ls_lvblen, GFP_ATOMIC); return p; } @@ -97,7 +97,7 @@ struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls) { struct dlm_rsb *r; - r = kmem_cache_zalloc(rsb_cache, GFP_NOFS); + r = kmem_cache_zalloc(rsb_cache, GFP_ATOMIC); return r; } @@ -112,7 +112,7 @@ struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls) { struct dlm_lkb *lkb; - lkb = kmem_cache_zalloc(lkb_cache, GFP_NOFS); + lkb = kmem_cache_zalloc(lkb_cache, GFP_ATOMIC); return lkb; } diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 53917c0aa3c0..ce6dc914cb86 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -310,7 +310,6 @@ static int recover_idr_add(struct dlm_rsb *r) struct dlm_ls *ls = r->res_ls; int rv; - idr_preload(GFP_NOFS); spin_lock(&ls->ls_recover_idr_lock); if (r->res_id) { rv = -1; @@ -326,7 +325,6 @@ static int recover_idr_add(struct dlm_rsb *r) rv = 0; out_unlock: spin_unlock(&ls->ls_recover_idr_lock); - idr_preload_end(); return rv; } diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c index 892d6ca21e74..c05940afd063 100644 --- a/fs/dlm/requestqueue.c +++ b/fs/dlm/requestqueue.c @@ -37,7 +37,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, int length = le16_to_cpu(ms->m_header.h_length) - sizeof(struct dlm_message); - e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS); + e = kmalloc(sizeof(struct rq_entry) + length, GFP_ATOMIC); if (!e) { log_print("dlm_add_requestqueue: out of memory len %d", length); return; -- cgit v1.2.3 From 29e345f3c68e2bcf094162fc36394d348ccfb9ff Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:17:59 -0400 Subject: dlm: move root_list functionality to recover.c Move dlm_create_root_list() and dlm_release_root_list() to recover.c and declare them static because they are only used there. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/recover.c | 42 ------------------------------------------ fs/dlm/recover.h | 2 -- fs/dlm/recoverd.c | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 44 deletions(-) (limited to 'fs') diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index ce6dc914cb86..6abc283f8f36 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -889,48 +889,6 @@ void dlm_recover_rsbs(struct dlm_ls *ls) /* Create a single list of all root rsb's to be used during recovery */ -int dlm_create_root_list(struct dlm_ls *ls) -{ - struct rb_node *n; - struct dlm_rsb *r; - int i, error = 0; - - down_write(&ls->ls_root_sem); - if (!list_empty(&ls->ls_root_list)) { - log_error(ls, "root list not empty"); - error = -EINVAL; - goto out; - } - - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - spin_lock(&ls->ls_rsbtbl[i].lock); - for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { - r = rb_entry(n, struct dlm_rsb, res_hashnode); - list_add(&r->res_root_list, &ls->ls_root_list); - dlm_hold_rsb(r); - } - - if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss)) - log_error(ls, "dlm_create_root_list toss not empty"); - spin_unlock(&ls->ls_rsbtbl[i].lock); - } - out: - up_write(&ls->ls_root_sem); - return error; -} - -void dlm_release_root_list(struct dlm_ls *ls) -{ - struct dlm_rsb *r, *safe; - - down_write(&ls->ls_root_sem); - list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { - list_del_init(&r->res_root_list); - dlm_put_rsb(r); - } - up_write(&ls->ls_root_sem); -} - void dlm_clear_toss(struct dlm_ls *ls) { struct rb_node *n, *next; diff --git a/fs/dlm/recover.h b/fs/dlm/recover.h index dbc51013ecad..0b54550ee055 100644 --- a/fs/dlm/recover.h +++ b/fs/dlm/recover.h @@ -23,8 +23,6 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq); int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc); int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq); void dlm_recovered_lock(struct dlm_rsb *r); -int dlm_create_root_list(struct dlm_ls *ls); -void dlm_release_root_list(struct dlm_ls *ls); void dlm_clear_toss(struct dlm_ls *ls); void dlm_recover_rsbs(struct dlm_ls *ls); diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index 4d17491dea2f..8eb42554ccb0 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -20,6 +20,45 @@ #include "requestqueue.h" #include "recoverd.h" +static void dlm_create_root_list(struct dlm_ls *ls) +{ + struct rb_node *n; + struct dlm_rsb *r; + int i; + + down_write(&ls->ls_root_sem); + if (!list_empty(&ls->ls_root_list)) { + log_error(ls, "root list not empty"); + goto out; + } + + for (i = 0; i < ls->ls_rsbtbl_size; i++) { + spin_lock_bh(&ls->ls_rsbtbl[i].lock); + for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { + r = rb_entry(n, struct dlm_rsb, res_hashnode); + list_add(&r->res_root_list, &ls->ls_root_list); + dlm_hold_rsb(r); + } + + if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss)) + log_error(ls, "%s toss not empty", __func__); + spin_unlock_bh(&ls->ls_rsbtbl[i].lock); + } + out: + up_write(&ls->ls_root_sem); +} + +static void dlm_release_root_list(struct dlm_ls *ls) +{ + struct dlm_rsb *r, *safe; + + down_write(&ls->ls_root_sem); + list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { + list_del_init(&r->res_root_list); + dlm_put_rsb(r); + } + up_write(&ls->ls_root_sem); +} /* If the start for which we're re-enabling locking (seq) has been superseded by a newer stop (ls_recover_seq), we need to leave locking disabled. -- cgit v1.2.3 From aff46e0f24cd3adc54ec83f4cf834ff9ccb69307 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:00 -0400 Subject: dlm: use a new list for recovery of master rsb names Add a new "masters_list" for master rsb structs, with a new rwlock. The new list is created and used during the recovery process to send the master rsb names to new nodes. With this change, the current "root_list" can be used without locking. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/dir.c | 22 +++++++----------- fs/dlm/dlm_internal.h | 3 +++ fs/dlm/lock.c | 2 ++ fs/dlm/lockspace.c | 2 ++ fs/dlm/recoverd.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 79 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index f6acba4310a7..10753486049a 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -216,16 +216,13 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name, if (!rv) return r; - down_read(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { + list_for_each_entry(r, &ls->ls_masters_list, res_masters_list) { if (len == r->res_length && !memcmp(name, r->res_name, len)) { - up_read(&ls->ls_root_sem); log_debug(ls, "find_rsb_root revert to root_list %s", r->res_name); return r; } } - up_read(&ls->ls_root_sem); return NULL; } @@ -241,7 +238,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, int offset = 0, dir_nodeid; __be16 be_namelen; - down_read(&ls->ls_root_sem); + read_lock(&ls->ls_masters_lock); if (inlen > 1) { r = find_rsb_root(ls, inbuf, inlen); @@ -250,16 +247,13 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, nodeid, inlen, inlen, inbuf); goto out; } - list = r->res_root_list.next; + list = r->res_masters_list.next; } else { - list = ls->ls_root_list.next; + list = ls->ls_masters_list.next; } - for (offset = 0; list != &ls->ls_root_list; list = list->next) { - r = list_entry(list, struct dlm_rsb, res_root_list); - if (r->res_nodeid) - continue; - + for (offset = 0; list != &ls->ls_masters_list; list = list->next) { + r = list_entry(list, struct dlm_rsb, res_masters_list); dir_nodeid = dlm_dir_nodeid(r); if (dir_nodeid != nodeid) continue; @@ -294,7 +288,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, * terminating record. */ - if ((list == &ls->ls_root_list) && + if ((list == &ls->ls_masters_list) && (offset + sizeof(uint16_t) <= outlen)) { be_namelen = cpu_to_be16(0xFFFF); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); @@ -302,6 +296,6 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, ls->ls_recover_dir_sent_msg++; } out: - up_read(&ls->ls_root_sem); + read_unlock(&ls->ls_masters_lock); } diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 1d2ee5c2d23d..3524f2b33f2c 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -342,6 +342,7 @@ struct dlm_rsb { struct list_head res_waitqueue; struct list_head res_root_list; /* used for recovery */ + struct list_head res_masters_list; /* used for recovery */ struct list_head res_recover_list; /* used for recovery */ int res_recover_locks_count; @@ -675,6 +676,8 @@ struct dlm_ls { struct list_head ls_root_list; /* root resources */ struct rw_semaphore ls_root_sem; /* protect root_list */ + struct list_head ls_masters_list; /* root resources */ + rwlock_t ls_masters_lock; /* protect root_list */ const struct dlm_lockspace_ops *ls_ops; void *ls_ops_arg; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index d87464614bc5..e0ab7432ca4d 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -423,6 +423,7 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, INIT_LIST_HEAD(&r->res_waitqueue); INIT_LIST_HEAD(&r->res_root_list); INIT_LIST_HEAD(&r->res_recover_list); + INIT_LIST_HEAD(&r->res_masters_list); *r_ret = r; return 0; @@ -1168,6 +1169,7 @@ static void kill_rsb(struct kref *kref) DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r);); + DLM_ASSERT(list_empty(&r->res_masters_list), dlm_dump_rsb(r);); } /* Attaching/detaching lkb's from rsb's is for rsb reference counting. diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 0455dddb0797..c427c76b5f07 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -582,6 +582,8 @@ static int new_lockspace(const char *name, const char *cluster, init_waitqueue_head(&ls->ls_wait_general); INIT_LIST_HEAD(&ls->ls_root_list); init_rwsem(&ls->ls_root_sem); + INIT_LIST_HEAD(&ls->ls_masters_list); + rwlock_init(&ls->ls_masters_lock); spin_lock(&lslist_lock); ls->ls_create_count = 1; diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index 8eb42554ccb0..dfce8fc6a783 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -20,6 +20,48 @@ #include "requestqueue.h" #include "recoverd.h" +static int dlm_create_masters_list(struct dlm_ls *ls) +{ + struct rb_node *n; + struct dlm_rsb *r; + int i, error = 0; + + write_lock(&ls->ls_masters_lock); + if (!list_empty(&ls->ls_masters_list)) { + log_error(ls, "root list not empty"); + error = -EINVAL; + goto out; + } + + for (i = 0; i < ls->ls_rsbtbl_size; i++) { + spin_lock_bh(&ls->ls_rsbtbl[i].lock); + for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { + r = rb_entry(n, struct dlm_rsb, res_hashnode); + if (r->res_nodeid) + continue; + + list_add(&r->res_masters_list, &ls->ls_masters_list); + dlm_hold_rsb(r); + } + spin_unlock_bh(&ls->ls_rsbtbl[i].lock); + } + out: + write_unlock(&ls->ls_masters_lock); + return error; +} + +static void dlm_release_masters_list(struct dlm_ls *ls) +{ + struct dlm_rsb *r, *safe; + + write_lock(&ls->ls_masters_lock); + list_for_each_entry_safe(r, safe, &ls->ls_masters_list, res_masters_list) { + list_del_init(&r->res_masters_list); + dlm_put_rsb(r); + } + write_unlock(&ls->ls_masters_lock); +} + static void dlm_create_root_list(struct dlm_ls *ls) { struct rb_node *n; @@ -123,6 +165,23 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) dlm_recover_dir_nodeid(ls); + /* Create a snapshot of all active rsbs were we are the master of. + * During the barrier between dlm_recover_members_wait() and + * dlm_recover_directory() other nodes can dump their necessary + * directory dlm_rsb (r->res_dir_nodeid == nodeid) in rcom + * communication dlm_copy_master_names() handling. + * + * TODO We should create a per lockspace list that contains rsbs + * that we are the master of. Instead of creating this list while + * recovery we keep track of those rsbs while locking handling and + * recovery can use it when necessary. + */ + error = dlm_create_masters_list(ls); + if (error) { + log_rinfo(ls, "dlm_create_masters_list error %d", error); + goto fail; + } + ls->ls_recover_dir_sent_res = 0; ls->ls_recover_dir_sent_msg = 0; ls->ls_recover_locks_in = 0; @@ -132,6 +191,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) error = dlm_recover_members_wait(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_members_wait error %d", error); + dlm_release_masters_list(ls); goto fail; } @@ -145,6 +205,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) error = dlm_recover_directory(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_directory error %d", error); + dlm_release_masters_list(ls); goto fail; } @@ -153,9 +214,12 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) error = dlm_recover_directory_wait(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_directory_wait error %d", error); + dlm_release_masters_list(ls); goto fail; } + dlm_release_masters_list(ls); + log_rinfo(ls, "dlm_recover_directory %u out %u messages", ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg); -- cgit v1.2.3 From 3a747f4a2ee85d51b905e2df940de4a924f8060a Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:01 -0400 Subject: dlm: move rsb root_list to ls_recover() stack Move the rsb root_list from the lockspace to a stack variable since it is now only used by the ls_recover() function. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/dir.c | 6 ++---- fs/dlm/dir.h | 3 ++- fs/dlm/dlm_internal.h | 6 ++---- fs/dlm/lock.c | 6 ++---- fs/dlm/lock.h | 2 +- fs/dlm/lockspace.c | 2 -- fs/dlm/recover.c | 30 ++++++++++------------------ fs/dlm/recover.h | 8 +++++--- fs/dlm/recoverd.c | 54 ++++++++++++++++++++++----------------------------- 9 files changed, 47 insertions(+), 70 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index 10753486049a..3da00c46cbb3 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -47,15 +47,13 @@ int dlm_dir_nodeid(struct dlm_rsb *r) return r->res_dir_nodeid; } -void dlm_recover_dir_nodeid(struct dlm_ls *ls) +void dlm_recover_dir_nodeid(struct dlm_ls *ls, const struct list_head *root_list) { struct dlm_rsb *r; - down_read(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { + list_for_each_entry(r, root_list, res_root_list) { r->res_dir_nodeid = dlm_hash2nodeid(ls, r->res_hash); } - up_read(&ls->ls_root_sem); } int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq) diff --git a/fs/dlm/dir.h b/fs/dlm/dir.h index 39ecb69d7ef3..5b2a7ee3762d 100644 --- a/fs/dlm/dir.h +++ b/fs/dlm/dir.h @@ -14,7 +14,8 @@ int dlm_dir_nodeid(struct dlm_rsb *rsb); int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash); -void dlm_recover_dir_nodeid(struct dlm_ls *ls); +void dlm_recover_dir_nodeid(struct dlm_ls *ls, + const struct list_head *root_list); int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq); void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, char *outbuf, int outlen, int nodeid); diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 3524f2b33f2c..f434325d5bc8 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -674,10 +674,8 @@ struct dlm_ls { wait_queue_head_t ls_recover_lock_wait; spinlock_t ls_clear_proc_locks; - struct list_head ls_root_list; /* root resources */ - struct rw_semaphore ls_root_sem; /* protect root_list */ - struct list_head ls_masters_list; /* root resources */ - rwlock_t ls_masters_lock; /* protect root_list */ + struct list_head ls_masters_list; /* root resources */ + rwlock_t ls_masters_lock; /* protect root_list */ const struct dlm_lockspace_ops *ls_ops; void *ls_ops_arg; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index e0ab7432ca4d..43a2f4d0af53 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -5227,7 +5227,7 @@ static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r, /* Get rid of locks held by nodes that are gone. */ -void dlm_recover_purge(struct dlm_ls *ls) +void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list) { struct dlm_rsb *r; struct dlm_member *memb; @@ -5246,8 +5246,7 @@ void dlm_recover_purge(struct dlm_ls *ls) if (!nodes_count) return; - down_write(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { + list_for_each_entry(r, root_list, res_root_list) { hold_rsb(r); lock_rsb(r); if (is_master(r)) { @@ -5262,7 +5261,6 @@ void dlm_recover_purge(struct dlm_ls *ls) unhold_rsb(r); cond_resched(); } - up_write(&ls->ls_root_sem); if (lkb_count) log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes", diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index b54e2cbbe6e2..c8ff7780d3cc 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h @@ -31,7 +31,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len, struct dlm_rsb **r_ret); -void dlm_recover_purge(struct dlm_ls *ls); +void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list); void dlm_purge_mstcpy_locks(struct dlm_rsb *r); void dlm_recover_grant(struct dlm_ls *ls); int dlm_recover_waiters_post(struct dlm_ls *ls); diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index c427c76b5f07..da756e5c0f6c 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -580,8 +580,6 @@ static int new_lockspace(const char *name, const char *cluster, ls->ls_recover_list_count = 0; ls->ls_local_handle = ls; init_waitqueue_head(&ls->ls_wait_general); - INIT_LIST_HEAD(&ls->ls_root_list); - init_rwsem(&ls->ls_root_sem); INIT_LIST_HEAD(&ls->ls_masters_list); rwlock_init(&ls->ls_masters_lock); diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 6abc283f8f36..172c6b73f37a 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -519,7 +519,8 @@ static int recover_master_static(struct dlm_rsb *r, unsigned int *count) * the correct dir node. */ -int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq) +int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq, + const struct list_head *root_list) { struct dlm_rsb *r; unsigned int total = 0; @@ -529,10 +530,8 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq) log_rinfo(ls, "dlm_recover_masters"); - down_read(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { + list_for_each_entry(r, root_list, res_root_list) { if (dlm_recovery_stopped(ls)) { - up_read(&ls->ls_root_sem); error = -EINTR; goto out; } @@ -546,12 +545,9 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq) cond_resched(); total++; - if (error) { - up_read(&ls->ls_root_sem); + if (error) goto out; - } } - up_read(&ls->ls_root_sem); log_rinfo(ls, "dlm_recover_masters %u of %u", count, total); @@ -656,13 +652,13 @@ static int recover_locks(struct dlm_rsb *r, uint64_t seq) return error; } -int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq) +int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq, + const struct list_head *root_list) { struct dlm_rsb *r; int error, count = 0; - down_read(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { + list_for_each_entry(r, root_list, res_root_list) { if (is_master(r)) { rsb_clear_flag(r, RSB_NEW_MASTER); continue; @@ -673,19 +669,15 @@ int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq) if (dlm_recovery_stopped(ls)) { error = -EINTR; - up_read(&ls->ls_root_sem); goto out; } error = recover_locks(r, seq); - if (error) { - up_read(&ls->ls_root_sem); + if (error) goto out; - } count += r->res_recover_locks_count; } - up_read(&ls->ls_root_sem); log_rinfo(ls, "dlm_recover_locks %d out", count); @@ -854,13 +846,12 @@ static void recover_grant(struct dlm_rsb *r) rsb_set_flag(r, RSB_RECOVER_GRANT); } -void dlm_recover_rsbs(struct dlm_ls *ls) +void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list) { struct dlm_rsb *r; unsigned int count = 0; - down_read(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { + list_for_each_entry(r, root_list, res_root_list) { lock_rsb(r); if (is_master(r)) { if (rsb_flag(r, RSB_RECOVER_CONVERT)) @@ -881,7 +872,6 @@ void dlm_recover_rsbs(struct dlm_ls *ls) rsb_clear_flag(r, RSB_NEW_MASTER2); unlock_rsb(r); } - up_read(&ls->ls_root_sem); if (count) log_rinfo(ls, "dlm_recover_rsbs %d done", count); diff --git a/fs/dlm/recover.h b/fs/dlm/recover.h index 0b54550ee055..efc79a6e577d 100644 --- a/fs/dlm/recover.h +++ b/fs/dlm/recover.h @@ -19,12 +19,14 @@ int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq); int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq); int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq); int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq); -int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq); +int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq, + const struct list_head *root_list); int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc); -int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq); +int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq, + const struct list_head *root_list); void dlm_recovered_lock(struct dlm_rsb *r); void dlm_clear_toss(struct dlm_ls *ls); -void dlm_recover_rsbs(struct dlm_ls *ls); +void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list); #endif /* __RECOVER_DOT_H__ */ diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index dfce8fc6a783..f6acc7351625 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -62,23 +62,17 @@ static void dlm_release_masters_list(struct dlm_ls *ls) write_unlock(&ls->ls_masters_lock); } -static void dlm_create_root_list(struct dlm_ls *ls) +static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) { struct rb_node *n; struct dlm_rsb *r; int i; - down_write(&ls->ls_root_sem); - if (!list_empty(&ls->ls_root_list)) { - log_error(ls, "root list not empty"); - goto out; - } - for (i = 0; i < ls->ls_rsbtbl_size; i++) { spin_lock_bh(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); - list_add(&r->res_root_list, &ls->ls_root_list); + list_add(&r->res_root_list, root_list); dlm_hold_rsb(r); } @@ -86,20 +80,16 @@ static void dlm_create_root_list(struct dlm_ls *ls) log_error(ls, "%s toss not empty", __func__); spin_unlock_bh(&ls->ls_rsbtbl[i].lock); } - out: - up_write(&ls->ls_root_sem); } -static void dlm_release_root_list(struct dlm_ls *ls) +static void dlm_release_root_list(struct list_head *root_list) { struct dlm_rsb *r, *safe; - down_write(&ls->ls_root_sem); - list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { + list_for_each_entry_safe(r, safe, root_list, res_root_list) { list_del_init(&r->res_root_list); dlm_put_rsb(r); } - up_write(&ls->ls_root_sem); } /* If the start for which we're re-enabling locking (seq) has been superseded @@ -131,6 +121,7 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq) static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) { + LIST_HEAD(root_list); unsigned long start; int error, neg = 0; @@ -147,7 +138,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) * routines. */ - dlm_create_root_list(ls); + dlm_create_root_list(ls, &root_list); /* * Add or remove nodes from the lockspace's ls_nodes list. @@ -163,7 +154,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) goto fail; } - dlm_recover_dir_nodeid(ls); + dlm_recover_dir_nodeid(ls, &root_list); /* Create a snapshot of all active rsbs were we are the master of. * During the barrier between dlm_recover_members_wait() and @@ -179,7 +170,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) error = dlm_create_masters_list(ls); if (error) { log_rinfo(ls, "dlm_create_masters_list error %d", error); - goto fail; + goto fail_root_list; } ls->ls_recover_dir_sent_res = 0; @@ -192,7 +183,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) if (error) { log_rinfo(ls, "dlm_recover_members_wait error %d", error); dlm_release_masters_list(ls); - goto fail; + goto fail_root_list; } start = jiffies; @@ -206,7 +197,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) if (error) { log_rinfo(ls, "dlm_recover_directory error %d", error); dlm_release_masters_list(ls); - goto fail; + goto fail_root_list; } dlm_set_recover_status(ls, DLM_RS_DIR); @@ -215,7 +206,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) if (error) { log_rinfo(ls, "dlm_recover_directory_wait error %d", error); dlm_release_masters_list(ls); - goto fail; + goto fail_root_list; } dlm_release_masters_list(ls); @@ -233,7 +224,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) if (dlm_recovery_stopped(ls)) { error = -EINTR; - goto fail; + goto fail_root_list; } if (neg || dlm_no_directory(ls)) { @@ -241,27 +232,27 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) * Clear lkb's for departed nodes. */ - dlm_recover_purge(ls); + dlm_recover_purge(ls, &root_list); /* * Get new master nodeid's for rsb's that were mastered on * departed nodes. */ - error = dlm_recover_masters(ls, rv->seq); + error = dlm_recover_masters(ls, rv->seq, &root_list); if (error) { log_rinfo(ls, "dlm_recover_masters error %d", error); - goto fail; + goto fail_root_list; } /* * Send our locks on remastered rsb's to the new masters. */ - error = dlm_recover_locks(ls, rv->seq); + error = dlm_recover_locks(ls, rv->seq, &root_list); if (error) { log_rinfo(ls, "dlm_recover_locks error %d", error); - goto fail; + goto fail_root_list; } dlm_set_recover_status(ls, DLM_RS_LOCKS); @@ -269,7 +260,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) error = dlm_recover_locks_wait(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_locks_wait error %d", error); - goto fail; + goto fail_root_list; } log_rinfo(ls, "dlm_recover_locks %u in", @@ -281,7 +272,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) * settings. */ - dlm_recover_rsbs(ls); + dlm_recover_rsbs(ls, &root_list); } else { /* * Other lockspace members may be going through the "neg" steps @@ -293,11 +284,11 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) error = dlm_recover_locks_wait(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_locks_wait error %d", error); - goto fail; + goto fail_root_list; } } - dlm_release_root_list(ls); + dlm_release_root_list(&root_list); /* * Purge directory-related requests that are saved in requestqueue. @@ -346,8 +337,9 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) return 0; + fail_root_list: + dlm_release_root_list(&root_list); fail: - dlm_release_root_list(ls); mutex_unlock(&ls->ls_recoverd_active); return error; -- cgit v1.2.3 From 3ae67760567438ff857e79bd799154b1f7da3b2a Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:02 -0400 Subject: dlm: add new struct to save position in dlm_copy_master_names Add a new struct to save the current position in the rsb masters_list while sending the rsb names to other nodes. The rsb names are sent in multiple chunks, and for each new chunk, the new "dlm_dir_dump" struct saves the last position in the masters_list. The new struct is also used to save more information to sanity check the recovery process. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/dir.c | 115 ++++++++++++++++++++++++++++++++++++++++++++++++-- fs/dlm/dlm_internal.h | 4 +- fs/dlm/lockspace.c | 2 + fs/dlm/recoverd.c | 5 --- 4 files changed, 116 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index 3da00c46cbb3..0dc8a1d9e411 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -224,6 +224,80 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name, return NULL; } +struct dlm_dir_dump { + /* init values to match if whole + * dump fits to one seq. Sanity check only. + */ + uint64_t seq_init; + uint64_t nodeid_init; + /* compare local pointer with last lookup, + * just a sanity check. + */ + struct list_head *last; + + unsigned int sent_res; /* for log info */ + unsigned int sent_msg; /* for log info */ + + struct list_head list; +}; + +static void drop_dir_ctx(struct dlm_ls *ls, int nodeid) +{ + struct dlm_dir_dump *dd, *safe; + + write_lock(&ls->ls_dir_dump_lock); + list_for_each_entry_safe(dd, safe, &ls->ls_dir_dump_list, list) { + if (dd->nodeid_init == nodeid) { + log_error(ls, "drop dump seq %llu", + (unsigned long long)dd->seq_init); + list_del(&dd->list); + kfree(dd); + } + } + write_unlock(&ls->ls_dir_dump_lock); +} + +static struct dlm_dir_dump *lookup_dir_dump(struct dlm_ls *ls, int nodeid) +{ + struct dlm_dir_dump *iter, *dd = NULL; + + read_lock(&ls->ls_dir_dump_lock); + list_for_each_entry(iter, &ls->ls_dir_dump_list, list) { + if (iter->nodeid_init == nodeid) { + dd = iter; + break; + } + } + read_unlock(&ls->ls_dir_dump_lock); + + return dd; +} + +static struct dlm_dir_dump *init_dir_dump(struct dlm_ls *ls, int nodeid) +{ + struct dlm_dir_dump *dd; + + dd = lookup_dir_dump(ls, nodeid); + if (dd) { + log_error(ls, "found ongoing dir dump for node %d, will drop it", + nodeid); + drop_dir_ctx(ls, nodeid); + } + + dd = kzalloc(sizeof(*dd), GFP_ATOMIC); + if (!dd) + return NULL; + + dd->seq_init = ls->ls_recover_seq; + dd->nodeid_init = nodeid; + + write_lock(&ls->ls_dir_dump_lock); + list_add(&dd->list, &ls->ls_dir_dump_list); + write_unlock(&ls->ls_dir_dump_lock); + + return dd; +} + /* Find the rsb where we left off (or start again), then send rsb names for rsb's we're master of and whose directory node matches the requesting node. inbuf is the rsb name last sent, inlen is the name's length */ @@ -234,11 +308,20 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, struct list_head *list; struct dlm_rsb *r; int offset = 0, dir_nodeid; + struct dlm_dir_dump *dd; __be16 be_namelen; read_lock(&ls->ls_masters_lock); if (inlen > 1) { + dd = lookup_dir_dump(ls, nodeid); + if (!dd) { + log_error(ls, "failed to lookup dir dump context nodeid: %d", + nodeid); + goto out; + } + + /* next chunk in dump */ r = find_rsb_root(ls, inbuf, inlen); if (!r) { log_error(ls, "copy_master_names from %d start %d %.*s", @@ -246,8 +329,25 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, goto out; } list = r->res_masters_list.next; + + /* sanity checks */ + if (dd->last != &r->res_masters_list || + dd->seq_init != ls->ls_recover_seq) { + log_error(ls, "failed dir dump sanity check seq_init: %llu seq: %llu", + (unsigned long long)dd->seq_init, + (unsigned long long)ls->ls_recover_seq); + goto out; + } } else { + dd = init_dir_dump(ls, nodeid); + if (!dd) { + log_error(ls, "failed to allocate dir dump context"); + goto out; + } + + /* start dump */ list = ls->ls_masters_list.next; + dd->last = list; } for (offset = 0; list != &ls->ls_masters_list; list = list->next) { @@ -269,7 +369,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, be_namelen = cpu_to_be16(0); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); - ls->ls_recover_dir_sent_msg++; + dd->sent_msg++; goto out; } @@ -278,7 +378,8 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, offset += sizeof(__be16); memcpy(outbuf + offset, r->res_name, r->res_length); offset += r->res_length; - ls->ls_recover_dir_sent_res++; + dd->sent_res++; + dd->last = list; } /* @@ -288,10 +389,18 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, if ((list == &ls->ls_masters_list) && (offset + sizeof(uint16_t) <= outlen)) { + /* end dump */ be_namelen = cpu_to_be16(0xFFFF); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); - ls->ls_recover_dir_sent_msg++; + dd->sent_msg++; + log_rinfo(ls, "dlm_recover_directory nodeid %d sent %u res out %u messages", + nodeid, dd->sent_res, dd->sent_msg); + + write_lock(&ls->ls_dir_dump_lock); + list_del_init(&dd->list); + write_unlock(&ls->ls_dir_dump_lock); + kfree(dd); } out: read_unlock(&ls->ls_masters_lock); diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index f434325d5bc8..e03a379832d5 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -660,8 +660,6 @@ struct dlm_ls { struct mutex ls_requestqueue_mutex; struct dlm_rcom *ls_recover_buf; int ls_recover_nodeid; /* for debugging */ - unsigned int ls_recover_dir_sent_res; /* for log info */ - unsigned int ls_recover_dir_sent_msg; /* for log info */ unsigned int ls_recover_locks_in; /* for log info */ uint64_t ls_rcom_seq; spinlock_t ls_rcom_spin; @@ -676,6 +674,8 @@ struct dlm_ls { struct list_head ls_masters_list; /* root resources */ rwlock_t ls_masters_lock; /* protect root_list */ + struct list_head ls_dir_dump_list; /* root resources */ + rwlock_t ls_dir_dump_lock; /* protect root_list */ const struct dlm_lockspace_ops *ls_ops; void *ls_ops_arg; diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index da756e5c0f6c..af7769f8e38c 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -582,6 +582,8 @@ static int new_lockspace(const char *name, const char *cluster, init_waitqueue_head(&ls->ls_wait_general); INIT_LIST_HEAD(&ls->ls_masters_list); rwlock_init(&ls->ls_masters_lock); + INIT_LIST_HEAD(&ls->ls_dir_dump_list); + rwlock_init(&ls->ls_dir_dump_lock); spin_lock(&lslist_lock); ls->ls_create_count = 1; diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index f6acc7351625..0b1a62167798 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -173,8 +173,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) goto fail_root_list; } - ls->ls_recover_dir_sent_res = 0; - ls->ls_recover_dir_sent_msg = 0; ls->ls_recover_locks_in = 0; dlm_set_recover_status(ls, DLM_RS_NODES); @@ -211,9 +209,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) dlm_release_masters_list(ls); - log_rinfo(ls, "dlm_recover_directory %u out %u messages", - ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg); - /* * We may have outstanding operations that are waiting for a reply from * a failed node. Mark these to be resent after recovery. Unlock and -- cgit v1.2.3 From 6b52ea7916036cb01e7e37153f5e0ad342add765 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:03 -0400 Subject: dlm: drop mutex use in waiters recovery The waiters_mutex no longer needs to be used in the waiters recovery functions dlm_recover_waiters_pre() and dlm_recover_waiters_pre(). During recovery, ordinary locking operations are paused, and the recovery thread is the only context accessing the waiters list, so the lock is not needed. Access to the waiters list from debugfs functions is avoided by taking the top level recovery lock in the debugfs dump function. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/debug_fs.c | 13 +++++++++++++ fs/dlm/lock.c | 17 +++++++++-------- fs/dlm/lock.h | 1 + 3 files changed, 23 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 19cdedd56629..c238a9308323 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -737,6 +737,12 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, size_t len = DLM_DEBUG_BUF_LEN, pos = 0, ret, rv; mutex_lock(&debug_buf_lock); + ret = dlm_lock_recovery_try(ls); + if (!ret) { + rv = -EAGAIN; + goto out; + } + mutex_lock(&ls->ls_waiters_mutex); memset(debug_buf, 0, sizeof(debug_buf)); @@ -749,8 +755,10 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, pos += ret; } mutex_unlock(&ls->ls_waiters_mutex); + dlm_unlock_recovery(ls); rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos); +out: mutex_unlock(&debug_buf_lock); return rv; } @@ -772,7 +780,12 @@ static ssize_t waiters_write(struct file *file, const char __user *user_buf, if (n != 3) return -EINVAL; + error = dlm_lock_recovery_try(ls); + if (!error) + return -EAGAIN; + error = dlm_debug_add_lkb_to_waiters(ls, lkb_id, mstype, to_nodeid); + dlm_unlock_recovery(ls); if (error) return error; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 43a2f4d0af53..395b904a82f4 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -201,7 +201,7 @@ void dlm_dump_rsb(struct dlm_rsb *r) /* Threads cannot use the lockspace while it's being recovered */ -static inline void dlm_lock_recovery(struct dlm_ls *ls) +void dlm_lock_recovery(struct dlm_ls *ls) { down_read(&ls->ls_in_recovery); } @@ -1556,7 +1556,11 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype) } /* Handles situations where we might be processing a "fake" or "local" reply in - which we can't try to take waiters_mutex again. */ + * the recovery context which stops any locking activity. Only debugfs might + * change the lockspace waiters but they will held the recovery lock to ensure + * remove_from_waiters_ms() in local case will be the only user manipulating the + * lockspace waiters in recovery context. + */ static int remove_from_waiters_ms(struct dlm_lkb *lkb, const struct dlm_message *ms, bool local) @@ -1566,6 +1570,9 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, if (!local) mutex_lock(&ls->ls_waiters_mutex); + else + WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) || + !dlm_locking_stopped(ls)); error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms); if (!local) mutex_unlock(&ls->ls_waiters_mutex); @@ -4398,7 +4405,6 @@ static void _receive_convert_reply(struct dlm_lkb *lkb, if (error) goto out; - /* local reply can happen with waiters_mutex held */ error = remove_from_waiters_ms(lkb, ms, local); if (error) goto out; @@ -4437,7 +4443,6 @@ static void _receive_unlock_reply(struct dlm_lkb *lkb, if (error) goto out; - /* local reply can happen with waiters_mutex held */ error = remove_from_waiters_ms(lkb, ms, local); if (error) goto out; @@ -4489,7 +4494,6 @@ static void _receive_cancel_reply(struct dlm_lkb *lkb, if (error) goto out; - /* local reply can happen with waiters_mutex held */ error = remove_from_waiters_ms(lkb, ms, local); if (error) goto out; @@ -4890,8 +4894,6 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) if (!ms_local) return; - mutex_lock(&ls->ls_waiters_mutex); - list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) { dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource); @@ -4984,7 +4986,6 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) } schedule(); } - mutex_unlock(&ls->ls_waiters_mutex); kfree(ms_local); } diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index c8ff7780d3cc..b2fd74a2f8eb 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h @@ -23,6 +23,7 @@ void dlm_hold_rsb(struct dlm_rsb *r); int dlm_put_lkb(struct dlm_lkb *lkb); void dlm_scan_rsbs(struct dlm_ls *ls); int dlm_lock_recovery_try(struct dlm_ls *ls); +void dlm_lock_recovery(struct dlm_ls *ls); void dlm_unlock_recovery(struct dlm_ls *ls); int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, -- cgit v1.2.3 From 097691dbadcdcaa6428adf0417cd227602eb60a4 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:04 -0400 Subject: dlm: convert ls_waiters_mutex to spinlock Convert the waiters mutex to a spinlock in prepration for processing messages in softirq context. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/debug_fs.c | 4 ++-- fs/dlm/dlm_internal.h | 2 +- fs/dlm/lock.c | 20 ++++++++++---------- fs/dlm/lockspace.c | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index c238a9308323..487dcf05d076 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -743,7 +743,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, goto out; } - mutex_lock(&ls->ls_waiters_mutex); + spin_lock(&ls->ls_waiters_lock); memset(debug_buf, 0, sizeof(debug_buf)); list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { @@ -754,7 +754,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, break; pos += ret; } - mutex_unlock(&ls->ls_waiters_mutex); + spin_unlock(&ls->ls_waiters_lock); dlm_unlock_recovery(ls); rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos); diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index e03a379832d5..98029fd5cd2b 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -595,7 +595,7 @@ struct dlm_ls { struct dlm_rsbtable *ls_rsbtbl; uint32_t ls_rsbtbl_size; - struct mutex ls_waiters_mutex; + spinlock_t ls_waiters_lock; struct list_head ls_waiters; /* lkbs needing a reply */ struct mutex ls_orphans_mutex; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 395b904a82f4..af677add4f5f 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1406,7 +1406,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) struct dlm_ls *ls = lkb->lkb_resource->res_ls; int error = 0; - mutex_lock(&ls->ls_waiters_mutex); + spin_lock(&ls->ls_waiters_lock); if (is_overlap_unlock(lkb) || (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) { @@ -1449,7 +1449,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) log_error(ls, "addwait error %x %d flags %x %d %d %s", lkb->lkb_id, error, dlm_iflags_val(lkb), mstype, lkb->lkb_wait_type, lkb->lkb_resource->res_name); - mutex_unlock(&ls->ls_waiters_mutex); + spin_unlock(&ls->ls_waiters_lock); return error; } @@ -1549,9 +1549,9 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype) struct dlm_ls *ls = lkb->lkb_resource->res_ls; int error; - mutex_lock(&ls->ls_waiters_mutex); + spin_lock(&ls->ls_waiters_lock); error = _remove_from_waiters(lkb, mstype, NULL); - mutex_unlock(&ls->ls_waiters_mutex); + spin_unlock(&ls->ls_waiters_lock); return error; } @@ -1569,13 +1569,13 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, int error; if (!local) - mutex_lock(&ls->ls_waiters_mutex); + spin_lock(&ls->ls_waiters_lock); else WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) || !dlm_locking_stopped(ls)); error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms); if (!local) - mutex_unlock(&ls->ls_waiters_mutex); + spin_unlock(&ls->ls_waiters_lock); return error; } @@ -4993,7 +4993,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) { struct dlm_lkb *lkb = NULL, *iter; - mutex_lock(&ls->ls_waiters_mutex); + spin_lock(&ls->ls_waiters_lock); list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) { hold_lkb(iter); @@ -5001,7 +5001,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) break; } } - mutex_unlock(&ls->ls_waiters_mutex); + spin_unlock(&ls->ls_waiters_lock); return lkb; } @@ -5101,9 +5101,9 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) } /* Forcibly remove from waiters list */ - mutex_lock(&ls->ls_waiters_mutex); + spin_lock(&ls->ls_waiters_lock); list_del_init(&lkb->lkb_wait_reply); - mutex_unlock(&ls->ls_waiters_mutex); + spin_unlock(&ls->ls_waiters_lock); /* * The lkb is now clear of all prior waiters state and can be diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index af7769f8e38c..945139805605 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -515,7 +515,7 @@ static int new_lockspace(const char *name, const char *cluster, spin_lock_init(&ls->ls_lkbidr_spin); INIT_LIST_HEAD(&ls->ls_waiters); - mutex_init(&ls->ls_waiters_mutex); + spin_lock_init(&ls->ls_waiters_lock); INIT_LIST_HEAD(&ls->ls_orphans); mutex_init(&ls->ls_orphans_mutex); -- cgit v1.2.3 From cc396e2355b5ca6e1aee005f3ce99bab8f37f5ff Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:05 -0400 Subject: dlm: convert res_lock to spinlock Convert the rsb struct res_lock from a mutex to a spinlock in preparation for processing messages in softirq context. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/dlm_internal.h | 2 +- fs/dlm/lock.c | 2 +- fs/dlm/lock.h | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 98029fd5cd2b..9f98a815f935 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -320,7 +320,7 @@ struct dlm_lkb { struct dlm_rsb { struct dlm_ls *res_ls; /* the lockspace */ struct kref res_ref; - struct mutex res_mutex; + spinlock_t res_lock; unsigned long res_flags; int res_length; /* length of rsb name */ int res_nodeid; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index af677add4f5f..7b309231eebd 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -415,7 +415,7 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, r->res_ls = ls; r->res_length = len; memcpy(r->res_name, name, len); - mutex_init(&r->res_mutex); + spin_lock_init(&r->res_lock); INIT_LIST_HEAD(&r->res_lookup); INIT_LIST_HEAD(&r->res_grantqueue); diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index b2fd74a2f8eb..0f6b2700c0da 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h @@ -69,12 +69,12 @@ static inline int is_master(struct dlm_rsb *r) static inline void lock_rsb(struct dlm_rsb *r) { - mutex_lock(&r->res_mutex); + spin_lock(&r->res_lock); } static inline void unlock_rsb(struct dlm_rsb *r) { - mutex_unlock(&r->res_mutex); + spin_unlock(&r->res_lock); } #endif -- cgit v1.2.3 From c288745f1d4a2ead903e81d2f4716e9d40b0ad85 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:06 -0400 Subject: dlm: avoid blocking receive at the end of recovery The end of the recovery process transitioned to normal message processing by temporarily blocking the receiving context, processing saved messages, then unblocking the receiving context. To avoid blocking the receiving context, the old wait_queue and mutex are replaced by a new rwlock and the new RECV_MSG_BLOCKED flag. Received messages are added to the list of saved messages, protected by the rwlock, until the flag is cleared, which happens when all saved messages have been processed. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/dlm_internal.h | 5 ++--- fs/dlm/lock.c | 16 ++++++++++++++-- fs/dlm/lockspace.c | 4 +--- fs/dlm/member.c | 5 +++++ fs/dlm/requestqueue.c | 41 ++++++++--------------------------------- 5 files changed, 30 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 9f98a815f935..61820b8c47a7 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -655,9 +655,7 @@ struct dlm_ls { struct rw_semaphore ls_in_recovery; /* block local requests */ struct rw_semaphore ls_recv_active; /* block dlm_recv */ struct list_head ls_requestqueue;/* queue remote requests */ - atomic_t ls_requestqueue_cnt; - wait_queue_head_t ls_requestqueue_wait; - struct mutex ls_requestqueue_mutex; + rwlock_t ls_requestqueue_lock; struct dlm_rcom *ls_recover_buf; int ls_recover_nodeid; /* for debugging */ unsigned int ls_recover_locks_in; /* for log info */ @@ -717,6 +715,7 @@ struct dlm_ls { #define LSFL_UEVENT_WAIT 7 #define LSFL_CB_DELAY 9 #define LSFL_NODIR 10 +#define LSFL_RECV_MSG_BLOCKED 11 #define DLM_PROC_FLAGS_CLOSING 1 #define DLM_PROC_FLAGS_COMPAT 2 diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 7b309231eebd..98d9c5a4be00 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -4752,20 +4752,32 @@ static void _receive_message(struct dlm_ls *ls, const struct dlm_message *ms, static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms, int nodeid) { - if (dlm_locking_stopped(ls)) { +try_again: + read_lock(&ls->ls_requestqueue_lock); + if (test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { /* If we were a member of this lockspace, left, and rejoined, other nodes may still be sending us messages from the lockspace generation before we left. */ if (WARN_ON_ONCE(!ls->ls_generation)) { + read_unlock(&ls->ls_requestqueue_lock); log_limit(ls, "receive %d from %d ignore old gen", le32_to_cpu(ms->m_type), nodeid); return; } + read_unlock(&ls->ls_requestqueue_lock); + write_lock(&ls->ls_requestqueue_lock); + /* recheck because we hold writelock now */ + if (!test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { + write_unlock_bh(&ls->ls_requestqueue_lock); + goto try_again; + } + dlm_add_requestqueue(ls, nodeid, ms); + write_unlock(&ls->ls_requestqueue_lock); } else { - dlm_wait_requestqueue(ls); _receive_message(ls, ms, 0); + read_unlock(&ls->ls_requestqueue_lock); } } diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 945139805605..757e473bc619 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -554,9 +554,7 @@ static int new_lockspace(const char *name, const char *cluster, init_rwsem(&ls->ls_in_recovery); init_rwsem(&ls->ls_recv_active); INIT_LIST_HEAD(&ls->ls_requestqueue); - atomic_set(&ls->ls_requestqueue_cnt, 0); - init_waitqueue_head(&ls->ls_requestqueue_wait); - mutex_init(&ls->ls_requestqueue_mutex); + rwlock_init(&ls->ls_requestqueue_lock); spin_lock_init(&ls->ls_clear_proc_locks); /* Due backwards compatibility with 3.1 we need to use maximum diff --git a/fs/dlm/member.c b/fs/dlm/member.c index be7909ead71b..707cebcdc533 100644 --- a/fs/dlm/member.c +++ b/fs/dlm/member.c @@ -642,6 +642,11 @@ int dlm_ls_stop(struct dlm_ls *ls) set_bit(LSFL_RECOVER_STOP, &ls->ls_flags); new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags); ls->ls_recover_seq++; + + /* activate requestqueue and stop processing */ + write_lock(&ls->ls_requestqueue_lock); + set_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags); + write_unlock(&ls->ls_requestqueue_lock); spin_unlock(&ls->ls_recover_lock); /* diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c index c05940afd063..9b646026df46 100644 --- a/fs/dlm/requestqueue.c +++ b/fs/dlm/requestqueue.c @@ -48,10 +48,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, memcpy(&e->request, ms, sizeof(*ms)); memcpy(&e->request.m_extra, ms->m_extra, length); - atomic_inc(&ls->ls_requestqueue_cnt); - mutex_lock(&ls->ls_requestqueue_mutex); list_add_tail(&e->list, &ls->ls_requestqueue); - mutex_unlock(&ls->ls_requestqueue_mutex); } /* @@ -71,16 +68,14 @@ int dlm_process_requestqueue(struct dlm_ls *ls) struct dlm_message *ms; int error = 0; - mutex_lock(&ls->ls_requestqueue_mutex); - + write_lock(&ls->ls_requestqueue_lock); for (;;) { if (list_empty(&ls->ls_requestqueue)) { - mutex_unlock(&ls->ls_requestqueue_mutex); + clear_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags); error = 0; break; } - e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); - mutex_unlock(&ls->ls_requestqueue_mutex); + e = list_first_entry(&ls->ls_requestqueue, struct rq_entry, list); ms = &e->request; @@ -93,41 +88,23 @@ int dlm_process_requestqueue(struct dlm_ls *ls) e->recover_seq); dlm_receive_message_saved(ls, &e->request, e->recover_seq); - - mutex_lock(&ls->ls_requestqueue_mutex); list_del(&e->list); - if (atomic_dec_and_test(&ls->ls_requestqueue_cnt)) - wake_up(&ls->ls_requestqueue_wait); kfree(e); if (dlm_locking_stopped(ls)) { log_debug(ls, "process_requestqueue abort running"); - mutex_unlock(&ls->ls_requestqueue_mutex); error = -EINTR; break; } + write_unlock(&ls->ls_requestqueue_lock); schedule(); + write_lock(&ls->ls_requestqueue_lock); } + write_unlock(&ls->ls_requestqueue_lock); return error; } -/* - * After recovery is done, locking is resumed and dlm_recoverd takes all the - * saved requests and processes them as they would have been by dlm_recv. At - * the same time, dlm_recv will start receiving new requests from remote nodes. - * We want to delay dlm_recv processing new requests until dlm_recoverd has - * finished processing the old saved requests. We don't check for locking - * stopped here because dlm_ls_stop won't stop locking until it's suspended us - * (dlm_recv). - */ - -void dlm_wait_requestqueue(struct dlm_ls *ls) -{ - wait_event(ls->ls_requestqueue_wait, - atomic_read(&ls->ls_requestqueue_cnt) == 0); -} - static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) { __le32 type = ms->m_type; @@ -158,17 +135,15 @@ void dlm_purge_requestqueue(struct dlm_ls *ls) struct dlm_message *ms; struct rq_entry *e, *safe; - mutex_lock(&ls->ls_requestqueue_mutex); + write_lock(&ls->ls_requestqueue_lock); list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { ms = &e->request; if (purge_request(ls, ms, e->nodeid)) { list_del(&e->list); - if (atomic_dec_and_test(&ls->ls_requestqueue_cnt)) - wake_up(&ls->ls_requestqueue_wait); kfree(e); } } - mutex_unlock(&ls->ls_requestqueue_mutex); + write_unlock(&ls->ls_requestqueue_lock); } -- cgit v1.2.3 From d52c9b8fefa3ed4f1893eea8c5f38748a83356fc Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:07 -0400 Subject: dlm: convert ls_recv_active from rw_semaphore to rwlock Convert ls_recv_active rw_semaphore to an rwlock to avoid sleeping, in preparation for softirq message processing. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/dlm_internal.h | 2 +- fs/dlm/lock.c | 4 ++-- fs/dlm/lockspace.c | 2 +- fs/dlm/member.c | 4 ++-- fs/dlm/recoverd.c | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 61820b8c47a7..269c12e0824f 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -653,7 +653,7 @@ struct dlm_ls { uint64_t ls_recover_seq; struct dlm_recover *ls_recover_args; struct rw_semaphore ls_in_recovery; /* block local requests */ - struct rw_semaphore ls_recv_active; /* block dlm_recv */ + rwlock_t ls_recv_active; /* block dlm_recv */ struct list_head ls_requestqueue;/* queue remote requests */ rwlock_t ls_requestqueue_lock; struct dlm_rcom *ls_recover_buf; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 98d9c5a4be00..2f53fdfe262a 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -4837,7 +4837,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid) /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to be inactive (in this ls) before transitioning to recovery mode */ - down_read(&ls->ls_recv_active); + read_lock(&ls->ls_recv_active); if (hd->h_cmd == DLM_MSG) dlm_receive_message(ls, &p->message, nodeid); else if (hd->h_cmd == DLM_RCOM) @@ -4845,7 +4845,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid) else log_error(ls, "invalid h_cmd %d from %d lockspace %x", hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace)); - up_read(&ls->ls_recv_active); + read_unlock(&ls->ls_recv_active); dlm_put_lockspace(ls); } diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 757e473bc619..c021bf684fbc 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -552,7 +552,7 @@ static int new_lockspace(const char *name, const char *cluster, ls->ls_recover_seq = get_random_u64(); ls->ls_recover_args = NULL; init_rwsem(&ls->ls_in_recovery); - init_rwsem(&ls->ls_recv_active); + rwlock_init(&ls->ls_recv_active); INIT_LIST_HEAD(&ls->ls_requestqueue); rwlock_init(&ls->ls_requestqueue_lock); spin_lock_init(&ls->ls_clear_proc_locks); diff --git a/fs/dlm/member.c b/fs/dlm/member.c index 707cebcdc533..ac1b555af9d6 100644 --- a/fs/dlm/member.c +++ b/fs/dlm/member.c @@ -630,7 +630,7 @@ int dlm_ls_stop(struct dlm_ls *ls) * message to the requestqueue without races. */ - down_write(&ls->ls_recv_active); + write_lock(&ls->ls_recv_active); /* * Abort any recovery that's in progress (see RECOVER_STOP, @@ -654,7 +654,7 @@ int dlm_ls_stop(struct dlm_ls *ls) * requestqueue for later. */ - up_write(&ls->ls_recv_active); + write_unlock(&ls->ls_recv_active); /* * This in_recovery lock does two things: diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index 0b1a62167798..a11ae1da2f60 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -103,7 +103,7 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq) { int error = -EINTR; - down_write(&ls->ls_recv_active); + write_lock(&ls->ls_recv_active); spin_lock(&ls->ls_recover_lock); if (ls->ls_recover_seq == seq) { @@ -115,7 +115,7 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq) } spin_unlock(&ls->ls_recover_lock); - up_write(&ls->ls_recv_active); + write_unlock(&ls->ls_recv_active); return error; } -- cgit v1.2.3 From 308533b4b1d55892d939286313fb73c1527444ce Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:08 -0400 Subject: dlm: remove schedule in receive path Remove an explicit schedule() call in the message processing path, in preparation for softirq message processing. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/lock.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 2f53fdfe262a..e4cec14f9973 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -2543,7 +2543,6 @@ static void process_lookup_list(struct dlm_rsb *r) list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) { list_del_init(&lkb->lkb_rsb_lookup); _request_lock(r, lkb); - schedule(); } } -- cgit v1.2.3 From 578acf9a87a87531df5b59b3799ccc1256a4bbcc Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:09 -0400 Subject: dlm: use spin_lock_bh for message processing Use spin_lock_bh for all spinlocks involved in message processing, in preparation for softirq message processing. DLM lock requests from user space involve dlm processing in user context, in addition to the standard kernel context, necessitating bh variants. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/ast.c | 12 +-- fs/dlm/debug_fs.c | 28 +++---- fs/dlm/dir.c | 24 +++--- fs/dlm/lock.c | 206 +++++++++++++++++++++++++++++--------------------- fs/dlm/lock.h | 4 +- fs/dlm/lockspace.c | 51 ++++++------- fs/dlm/lowcomms.c | 16 ++-- fs/dlm/member.c | 22 +++--- fs/dlm/midcomms.c | 40 +++++----- fs/dlm/rcom.c | 26 +++---- fs/dlm/recover.c | 52 ++++++------- fs/dlm/recoverd.c | 20 ++--- fs/dlm/requestqueue.c | 12 +-- fs/dlm/user.c | 32 ++++---- 14 files changed, 287 insertions(+), 258 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 03879c94fadb..59711486d801 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -142,12 +142,12 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, cb->astparam = lkb->lkb_astparam; INIT_WORK(&cb->work, dlm_callback_work); - spin_lock(&ls->ls_cb_lock); + spin_lock_bh(&ls->ls_cb_lock); if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) list_add(&cb->list, &ls->ls_cb_delay); else queue_work(ls->ls_callback_wq, &cb->work); - spin_unlock(&ls->ls_cb_lock); + spin_unlock_bh(&ls->ls_cb_lock); break; case DLM_ENQUEUE_CALLBACK_SUCCESS: break; @@ -179,9 +179,9 @@ void dlm_callback_stop(struct dlm_ls *ls) void dlm_callback_suspend(struct dlm_ls *ls) { if (ls->ls_callback_wq) { - spin_lock(&ls->ls_cb_lock); + spin_lock_bh(&ls->ls_cb_lock); set_bit(LSFL_CB_DELAY, &ls->ls_flags); - spin_unlock(&ls->ls_cb_lock); + spin_unlock_bh(&ls->ls_cb_lock); flush_workqueue(ls->ls_callback_wq); } @@ -199,7 +199,7 @@ void dlm_callback_resume(struct dlm_ls *ls) return; more: - spin_lock(&ls->ls_cb_lock); + spin_lock_bh(&ls->ls_cb_lock); list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) { list_del(&cb->list); queue_work(ls->ls_callback_wq, &cb->work); @@ -210,7 +210,7 @@ more: empty = list_empty(&ls->ls_cb_delay); if (empty) clear_bit(LSFL_CB_DELAY, &ls->ls_flags); - spin_unlock(&ls->ls_cb_lock); + spin_unlock_bh(&ls->ls_cb_lock); sum += count; if (!empty) { diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 487dcf05d076..cba5514688ee 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -452,7 +452,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; - spin_lock(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); if (!RB_EMPTY_ROOT(tree)) { for (node = rb_first(tree); node; node = rb_next(node)) { r = rb_entry(node, struct dlm_rsb, res_hashnode); @@ -460,12 +460,12 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); return ri; } } } - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); /* * move to the first rsb in the next non-empty bucket @@ -484,18 +484,18 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) } tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; - spin_lock(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); if (!RB_EMPTY_ROOT(tree)) { node = rb_first(tree); r = rb_entry(node, struct dlm_rsb, res_hashnode); dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); *pos = n; return ri; } - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); } } @@ -516,7 +516,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) * move to the next rsb in the same bucket */ - spin_lock(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); rp = ri->rsb; next = rb_next(&rp->res_hashnode); @@ -524,12 +524,12 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) r = rb_entry(next, struct dlm_rsb, res_hashnode); dlm_hold_rsb(r); ri->rsb = r; - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); dlm_put_rsb(rp); ++*pos; return ri; } - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); dlm_put_rsb(rp); /* @@ -550,18 +550,18 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) } tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; - spin_lock(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); if (!RB_EMPTY_ROOT(tree)) { next = rb_first(tree); r = rb_entry(next, struct dlm_rsb, res_hashnode); dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); *pos = n; return ri; } - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); } } @@ -743,7 +743,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, goto out; } - spin_lock(&ls->ls_waiters_lock); + spin_lock_bh(&ls->ls_waiters_lock); memset(debug_buf, 0, sizeof(debug_buf)); list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { @@ -754,7 +754,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, break; pos += ret; } - spin_unlock(&ls->ls_waiters_lock); + spin_unlock_bh(&ls->ls_waiters_lock); dlm_unlock_recovery(ls); rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos); diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index 0dc8a1d9e411..ff3a51c759b5 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -204,12 +204,12 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name, hash = jhash(name, len, 0); bucket = hash & (ls->ls_rsbtbl_size - 1); - spin_lock(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); if (rv) rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, name, len, &r); - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); if (!rv) return r; @@ -245,7 +245,7 @@ static void drop_dir_ctx(struct dlm_ls *ls, int nodeid) { struct dlm_dir_dump *dd, *safe; - write_lock(&ls->ls_dir_dump_lock); + write_lock_bh(&ls->ls_dir_dump_lock); list_for_each_entry_safe(dd, safe, &ls->ls_dir_dump_list, list) { if (dd->nodeid_init == nodeid) { log_error(ls, "drop dump seq %llu", @@ -254,21 +254,21 @@ static void drop_dir_ctx(struct dlm_ls *ls, int nodeid) kfree(dd); } } - write_unlock(&ls->ls_dir_dump_lock); + write_unlock_bh(&ls->ls_dir_dump_lock); } static struct dlm_dir_dump *lookup_dir_dump(struct dlm_ls *ls, int nodeid) { struct dlm_dir_dump *iter, *dd = NULL; - read_lock(&ls->ls_dir_dump_lock); + read_lock_bh(&ls->ls_dir_dump_lock); list_for_each_entry(iter, &ls->ls_dir_dump_list, list) { if (iter->nodeid_init == nodeid) { dd = iter; break; } } - read_unlock(&ls->ls_dir_dump_lock); + read_unlock_bh(&ls->ls_dir_dump_lock); return dd; } @@ -291,9 +291,9 @@ static struct dlm_dir_dump *init_dir_dump(struct dlm_ls *ls, int nodeid) dd->seq_init = ls->ls_recover_seq; dd->nodeid_init = nodeid; - write_lock(&ls->ls_dir_dump_lock); + write_lock_bh(&ls->ls_dir_dump_lock); list_add(&dd->list, &ls->ls_dir_dump_list); - write_unlock(&ls->ls_dir_dump_lock); + write_unlock_bh(&ls->ls_dir_dump_lock); return dd; } @@ -311,7 +311,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, struct dlm_dir_dump *dd; __be16 be_namelen; - read_lock(&ls->ls_masters_lock); + read_lock_bh(&ls->ls_masters_lock); if (inlen > 1) { dd = lookup_dir_dump(ls, nodeid); @@ -397,12 +397,12 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, log_rinfo(ls, "dlm_recover_directory nodeid %d sent %u res out %u messages", nodeid, dd->sent_res, dd->sent_msg); - write_lock(&ls->ls_dir_dump_lock); + write_lock_bh(&ls->ls_dir_dump_lock); list_del_init(&dd->list); - write_unlock(&ls->ls_dir_dump_lock); + write_unlock_bh(&ls->ls_dir_dump_lock); kfree(dd); } out: - read_unlock(&ls->ls_masters_lock); + read_unlock_bh(&ls->ls_masters_lock); } diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index e4cec14f9973..4ff4ef2a5f87 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -333,6 +333,36 @@ void dlm_hold_rsb(struct dlm_rsb *r) hold_rsb(r); } +/* TODO move this to lib/refcount.c */ +static __must_check bool +dlm_refcount_dec_and_lock_bh(refcount_t *r, spinlock_t *lock) +__cond_acquires(lock) +{ + if (refcount_dec_not_one(r)) + return false; + + spin_lock_bh(lock); + if (!refcount_dec_and_test(r)) { + spin_unlock_bh(lock); + return false; + } + + return true; +} + +/* TODO move this to include/linux/kref.h */ +static inline int dlm_kref_put_lock_bh(struct kref *kref, + void (*release)(struct kref *kref), + spinlock_t *lock) +{ + if (dlm_refcount_dec_and_lock_bh(&kref->refcount, lock)) { + release(kref); + return 1; + } + + return 0; +} + /* When all references to the rsb are gone it's transferred to the tossed list for later disposal. */ @@ -342,10 +372,10 @@ static void put_rsb(struct dlm_rsb *r) uint32_t bucket = r->res_bucket; int rv; - rv = kref_put_lock(&r->res_ref, toss_rsb, - &ls->ls_rsbtbl[bucket].lock); + rv = dlm_kref_put_lock_bh(&r->res_ref, toss_rsb, + &ls->ls_rsbtbl[bucket].lock); if (rv) - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); } void dlm_put_rsb(struct dlm_rsb *r) @@ -358,17 +388,17 @@ static int pre_rsb_struct(struct dlm_ls *ls) struct dlm_rsb *r1, *r2; int count = 0; - spin_lock(&ls->ls_new_rsb_spin); + spin_lock_bh(&ls->ls_new_rsb_spin); if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { - spin_unlock(&ls->ls_new_rsb_spin); + spin_unlock_bh(&ls->ls_new_rsb_spin); return 0; } - spin_unlock(&ls->ls_new_rsb_spin); + spin_unlock_bh(&ls->ls_new_rsb_spin); r1 = dlm_allocate_rsb(ls); r2 = dlm_allocate_rsb(ls); - spin_lock(&ls->ls_new_rsb_spin); + spin_lock_bh(&ls->ls_new_rsb_spin); if (r1) { list_add(&r1->res_hashchain, &ls->ls_new_rsb); ls->ls_new_rsb_count++; @@ -378,7 +408,7 @@ static int pre_rsb_struct(struct dlm_ls *ls) ls->ls_new_rsb_count++; } count = ls->ls_new_rsb_count; - spin_unlock(&ls->ls_new_rsb_spin); + spin_unlock_bh(&ls->ls_new_rsb_spin); if (!count) return -ENOMEM; @@ -395,10 +425,10 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, struct dlm_rsb *r; int count; - spin_lock(&ls->ls_new_rsb_spin); + spin_lock_bh(&ls->ls_new_rsb_spin); if (list_empty(&ls->ls_new_rsb)) { count = ls->ls_new_rsb_count; - spin_unlock(&ls->ls_new_rsb_spin); + spin_unlock_bh(&ls->ls_new_rsb_spin); log_debug(ls, "find_rsb retry %d %d %s", count, dlm_config.ci_new_rsb_count, (const char *)name); @@ -410,7 +440,7 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, /* Convert the empty list_head to a NULL rb_node for tree usage: */ memset(&r->res_hashnode, 0, sizeof(struct rb_node)); ls->ls_new_rsb_count--; - spin_unlock(&ls->ls_new_rsb_spin); + spin_unlock_bh(&ls->ls_new_rsb_spin); r->res_ls = ls; r->res_length = len; @@ -585,7 +615,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, goto out; } - spin_lock(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl[b].lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (error) @@ -655,7 +685,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, error = get_rsb_struct(ls, name, len, &r); if (error == -EAGAIN) { - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); goto retry; } if (error) @@ -704,7 +734,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, out_add: error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); out_unlock: - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); out: *r_ret = r; return error; @@ -729,7 +759,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, if (error < 0) goto out; - spin_lock(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl[b].lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (error) @@ -787,7 +817,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, error = get_rsb_struct(ls, name, len, &r); if (error == -EAGAIN) { - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); goto retry; } if (error) @@ -802,7 +832,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); out_unlock: - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); out: *r_ret = r; return error; @@ -1019,7 +1049,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, if (error < 0) return error; - spin_lock(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl[b].lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (!error) { /* because the rsb is active, we need to lock_rsb before @@ -1027,7 +1057,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, */ hold_rsb(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); lock_rsb(r); __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false, @@ -1053,14 +1083,14 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, r->res_toss_time = jiffies; /* the rsb was inactive (on toss list) */ - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); return 0; not_found: error = get_rsb_struct(ls, name, len, &r); if (error == -EAGAIN) { - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); goto retry; } if (error) @@ -1078,7 +1108,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, if (error) { /* should never happen */ dlm_free_rsb(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); goto retry; } @@ -1086,7 +1116,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, *result = DLM_LU_ADD; *r_nodeid = from_nodeid; out_unlock: - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); return error; } @@ -1097,13 +1127,13 @@ static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) int i; for (i = 0; i < ls->ls_rsbtbl_size; i++) { - spin_lock(&ls->ls_rsbtbl[i].lock); + spin_lock_bh(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); if (r->res_hash == hash) dlm_dump_rsb(r); } - spin_unlock(&ls->ls_rsbtbl[i].lock); + spin_unlock_bh(&ls->ls_rsbtbl[i].lock); } } @@ -1116,7 +1146,7 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) hash = jhash(name, len, 0); b = hash & (ls->ls_rsbtbl_size - 1); - spin_lock(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl[b].lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (!error) goto out_dump; @@ -1127,7 +1157,7 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) out_dump: dlm_dump_rsb(r); out: - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); } static void toss_rsb(struct kref *kref) @@ -1208,11 +1238,11 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret, INIT_LIST_HEAD(&lkb->lkb_ownqueue); INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); - spin_lock(&ls->ls_lkbidr_spin); + spin_lock_bh(&ls->ls_lkbidr_spin); rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT); if (rv >= 0) lkb->lkb_id = rv; - spin_unlock(&ls->ls_lkbidr_spin); + spin_unlock_bh(&ls->ls_lkbidr_spin); if (rv < 0) { log_error(ls, "create_lkb idr error %d", rv); @@ -1233,11 +1263,11 @@ static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret) { struct dlm_lkb *lkb; - spin_lock(&ls->ls_lkbidr_spin); + spin_lock_bh(&ls->ls_lkbidr_spin); lkb = idr_find(&ls->ls_lkbidr, lkid); if (lkb) kref_get(&lkb->lkb_ref); - spin_unlock(&ls->ls_lkbidr_spin); + spin_unlock_bh(&ls->ls_lkbidr_spin); *lkb_ret = lkb; return lkb ? 0 : -ENOENT; @@ -1261,11 +1291,11 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb) uint32_t lkid = lkb->lkb_id; int rv; - rv = kref_put_lock(&lkb->lkb_ref, kill_lkb, - &ls->ls_lkbidr_spin); + rv = dlm_kref_put_lock_bh(&lkb->lkb_ref, kill_lkb, + &ls->ls_lkbidr_spin); if (rv) { idr_remove(&ls->ls_lkbidr, lkid); - spin_unlock(&ls->ls_lkbidr_spin); + spin_unlock_bh(&ls->ls_lkbidr_spin); detach_lkb(lkb); @@ -1406,7 +1436,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) struct dlm_ls *ls = lkb->lkb_resource->res_ls; int error = 0; - spin_lock(&ls->ls_waiters_lock); + spin_lock_bh(&ls->ls_waiters_lock); if (is_overlap_unlock(lkb) || (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) { @@ -1449,7 +1479,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) log_error(ls, "addwait error %x %d flags %x %d %d %s", lkb->lkb_id, error, dlm_iflags_val(lkb), mstype, lkb->lkb_wait_type, lkb->lkb_resource->res_name); - spin_unlock(&ls->ls_waiters_lock); + spin_unlock_bh(&ls->ls_waiters_lock); return error; } @@ -1549,9 +1579,9 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype) struct dlm_ls *ls = lkb->lkb_resource->res_ls; int error; - spin_lock(&ls->ls_waiters_lock); + spin_lock_bh(&ls->ls_waiters_lock); error = _remove_from_waiters(lkb, mstype, NULL); - spin_unlock(&ls->ls_waiters_lock); + spin_unlock_bh(&ls->ls_waiters_lock); return error; } @@ -1569,13 +1599,13 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, int error; if (!local) - spin_lock(&ls->ls_waiters_lock); + spin_lock_bh(&ls->ls_waiters_lock); else WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) || !dlm_locking_stopped(ls)); error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms); if (!local) - spin_unlock(&ls->ls_waiters_lock); + spin_unlock_bh(&ls->ls_waiters_lock); return error; } @@ -1591,10 +1621,10 @@ static void shrink_bucket(struct dlm_ls *ls, int b) memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX); - spin_lock(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl[b].lock); if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags)) { - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); return; } @@ -1651,7 +1681,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); else clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); /* * While searching for rsb's to free, we found some that require @@ -1666,16 +1696,16 @@ static void shrink_bucket(struct dlm_ls *ls, int b) name = ls->ls_remove_names[i]; len = ls->ls_remove_lens[i]; - spin_lock(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl[b].lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); if (rv) { - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); log_debug(ls, "remove_name not toss %s", name); continue; } if (r->res_master_nodeid != our_nodeid) { - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); log_debug(ls, "remove_name master %d dir %d our %d %s", r->res_master_nodeid, r->res_dir_nodeid, our_nodeid, name); @@ -1684,7 +1714,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) if (r->res_dir_nodeid == our_nodeid) { /* should never happen */ - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); log_error(ls, "remove_name dir %d master %d our %d %s", r->res_dir_nodeid, r->res_master_nodeid, our_nodeid, name); @@ -1693,21 +1723,21 @@ static void shrink_bucket(struct dlm_ls *ls, int b) if (!time_after_eq(jiffies, r->res_toss_time + dlm_config.ci_toss_secs * HZ)) { - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); log_debug(ls, "remove_name toss_time %lu now %lu %s", r->res_toss_time, jiffies, name); continue; } if (!kref_put(&r->res_ref, kill_rsb)) { - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); log_error(ls, "remove_name in use %s", name); continue; } rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); send_remove(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); dlm_free_rsb(r); } @@ -4171,7 +4201,7 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) hash = jhash(name, len, 0); b = hash & (ls->ls_rsbtbl_size - 1); - spin_lock(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl[b].lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); if (rv) { @@ -4181,7 +4211,7 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) /* should not happen */ log_error(ls, "receive_remove from %d not found %s", from_nodeid, name); - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); return; } if (r->res_master_nodeid != from_nodeid) { @@ -4189,14 +4219,14 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) log_error(ls, "receive_remove keep from %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); return; } log_debug(ls, "receive_remove from %d master %d first %x %s", from_nodeid, r->res_master_nodeid, r->res_first_lkid, name); - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); return; } @@ -4204,19 +4234,19 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) log_error(ls, "receive_remove toss from %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); return; } if (kref_put(&r->res_ref, kill_rsb)) { rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); dlm_free_rsb(r); } else { log_error(ls, "receive_remove from %d rsb ref error", from_nodeid); dlm_print_rsb(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl[b].lock); } } @@ -4752,20 +4782,20 @@ static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms, int nodeid) { try_again: - read_lock(&ls->ls_requestqueue_lock); + read_lock_bh(&ls->ls_requestqueue_lock); if (test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { /* If we were a member of this lockspace, left, and rejoined, other nodes may still be sending us messages from the lockspace generation before we left. */ if (WARN_ON_ONCE(!ls->ls_generation)) { - read_unlock(&ls->ls_requestqueue_lock); + read_unlock_bh(&ls->ls_requestqueue_lock); log_limit(ls, "receive %d from %d ignore old gen", le32_to_cpu(ms->m_type), nodeid); return; } - read_unlock(&ls->ls_requestqueue_lock); - write_lock(&ls->ls_requestqueue_lock); + read_unlock_bh(&ls->ls_requestqueue_lock); + write_lock_bh(&ls->ls_requestqueue_lock); /* recheck because we hold writelock now */ if (!test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { write_unlock_bh(&ls->ls_requestqueue_lock); @@ -4773,10 +4803,10 @@ try_again: } dlm_add_requestqueue(ls, nodeid, ms); - write_unlock(&ls->ls_requestqueue_lock); + write_unlock_bh(&ls->ls_requestqueue_lock); } else { _receive_message(ls, ms, 0); - read_unlock(&ls->ls_requestqueue_lock); + read_unlock_bh(&ls->ls_requestqueue_lock); } } @@ -4836,7 +4866,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid) /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to be inactive (in this ls) before transitioning to recovery mode */ - read_lock(&ls->ls_recv_active); + read_lock_bh(&ls->ls_recv_active); if (hd->h_cmd == DLM_MSG) dlm_receive_message(ls, &p->message, nodeid); else if (hd->h_cmd == DLM_RCOM) @@ -4844,7 +4874,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid) else log_error(ls, "invalid h_cmd %d from %d lockspace %x", hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace)); - read_unlock(&ls->ls_recv_active); + read_unlock_bh(&ls->ls_recv_active); dlm_put_lockspace(ls); } @@ -5004,7 +5034,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) { struct dlm_lkb *lkb = NULL, *iter; - spin_lock(&ls->ls_waiters_lock); + spin_lock_bh(&ls->ls_waiters_lock); list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) { hold_lkb(iter); @@ -5012,7 +5042,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) break; } } - spin_unlock(&ls->ls_waiters_lock); + spin_unlock_bh(&ls->ls_waiters_lock); return lkb; } @@ -5112,9 +5142,9 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) } /* Forcibly remove from waiters list */ - spin_lock(&ls->ls_waiters_lock); + spin_lock_bh(&ls->ls_waiters_lock); list_del_init(&lkb->lkb_wait_reply); - spin_unlock(&ls->ls_waiters_lock); + spin_unlock_bh(&ls->ls_waiters_lock); /* * The lkb is now clear of all prior waiters state and can be @@ -5284,7 +5314,7 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) struct rb_node *n; struct dlm_rsb *r; - spin_lock(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); @@ -5295,10 +5325,10 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) continue; } hold_rsb(r); - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); return r; } - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); return NULL; } @@ -5642,10 +5672,10 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, } /* add this new lkb to the per-process list of locks */ - spin_lock(&ua->proc->locks_spin); + spin_lock_bh(&ua->proc->locks_spin); hold_lkb(lkb); list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); - spin_unlock(&ua->proc->locks_spin); + spin_unlock_bh(&ua->proc->locks_spin); do_put = false; out_put: trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false); @@ -5775,9 +5805,9 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, * for the proc locks list. */ - spin_lock(&ua->proc->locks_spin); + spin_lock_bh(&ua->proc->locks_spin); list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); - spin_unlock(&ua->proc->locks_spin); + spin_unlock_bh(&ua->proc->locks_spin); out: kfree(ua_tmp); return rv; @@ -5821,11 +5851,11 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, if (error) goto out_put; - spin_lock(&ua->proc->locks_spin); + spin_lock_bh(&ua->proc->locks_spin); /* dlm_user_add_cb() may have already taken lkb off the proc list */ if (!list_empty(&lkb->lkb_ownqueue)) list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking); - spin_unlock(&ua->proc->locks_spin); + spin_unlock_bh(&ua->proc->locks_spin); out_put: trace_dlm_unlock_end(ls, lkb, flags, error); dlm_put_lkb(lkb); @@ -5976,7 +6006,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, { struct dlm_lkb *lkb = NULL; - spin_lock(&ls->ls_clear_proc_locks); + spin_lock_bh(&ls->ls_clear_proc_locks); if (list_empty(&proc->locks)) goto out; @@ -5988,7 +6018,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, else set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); out: - spin_unlock(&ls->ls_clear_proc_locks); + spin_unlock_bh(&ls->ls_clear_proc_locks); return lkb; } @@ -6025,7 +6055,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) dlm_put_lkb(lkb); } - spin_lock(&ls->ls_clear_proc_locks); + spin_lock_bh(&ls->ls_clear_proc_locks); /* in-progress unlocks */ list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { @@ -6039,7 +6069,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) dlm_free_cb(cb); } - spin_unlock(&ls->ls_clear_proc_locks); + spin_unlock_bh(&ls->ls_clear_proc_locks); dlm_unlock_recovery(ls); } @@ -6050,13 +6080,13 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) while (1) { lkb = NULL; - spin_lock(&proc->locks_spin); + spin_lock_bh(&proc->locks_spin); if (!list_empty(&proc->locks)) { lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue); list_del_init(&lkb->lkb_ownqueue); } - spin_unlock(&proc->locks_spin); + spin_unlock_bh(&proc->locks_spin); if (!lkb) break; @@ -6066,20 +6096,20 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) dlm_put_lkb(lkb); /* ref from proc->locks list */ } - spin_lock(&proc->locks_spin); + spin_lock_bh(&proc->locks_spin); list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { list_del_init(&lkb->lkb_ownqueue); set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); dlm_put_lkb(lkb); } - spin_unlock(&proc->locks_spin); + spin_unlock_bh(&proc->locks_spin); - spin_lock(&proc->asts_spin); + spin_lock_bh(&proc->asts_spin); list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { list_del(&cb->list); dlm_free_cb(cb); } - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); } /* pid of 0 means purge all orphans */ diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index 0f6b2700c0da..45a74869810a 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h @@ -69,12 +69,12 @@ static inline int is_master(struct dlm_rsb *r) static inline void lock_rsb(struct dlm_rsb *r) { - spin_lock(&r->res_lock); + spin_lock_bh(&r->res_lock); } static inline void unlock_rsb(struct dlm_rsb *r) { - spin_unlock(&r->res_lock); + spin_unlock_bh(&r->res_lock); } #endif diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index c021bf684fbc..c3681a50decb 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -251,15 +251,15 @@ static struct dlm_ls *find_ls_to_scan(void) { struct dlm_ls *ls; - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (time_after_eq(jiffies, ls->ls_scan_time + dlm_config.ci_scan_secs * HZ)) { - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); return ls; } } - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); return NULL; } @@ -306,7 +306,7 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id) { struct dlm_ls *ls; - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_global_id == id) { @@ -316,7 +316,7 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id) } ls = NULL; out: - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); return ls; } @@ -324,7 +324,7 @@ struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace) { struct dlm_ls *ls; - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_local_handle == lockspace) { atomic_inc(&ls->ls_count); @@ -333,7 +333,7 @@ struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace) } ls = NULL; out: - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); return ls; } @@ -341,7 +341,7 @@ struct dlm_ls *dlm_find_lockspace_device(int minor) { struct dlm_ls *ls; - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_device.minor == minor) { atomic_inc(&ls->ls_count); @@ -350,7 +350,7 @@ struct dlm_ls *dlm_find_lockspace_device(int minor) } ls = NULL; out: - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); return ls; } @@ -365,15 +365,15 @@ static void remove_lockspace(struct dlm_ls *ls) retry: wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0); - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); if (atomic_read(&ls->ls_count) != 0) { - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); goto retry; } WARN_ON(ls->ls_create_count != 0); list_del(&ls->ls_list); - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); } static int threads_start(void) @@ -448,7 +448,7 @@ static int new_lockspace(const char *name, const char *cluster, error = 0; - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { WARN_ON(ls->ls_create_count <= 0); if (ls->ls_namelen != namelen) @@ -464,7 +464,7 @@ static int new_lockspace(const char *name, const char *cluster, error = 1; break; } - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); if (error) goto out; @@ -583,10 +583,10 @@ static int new_lockspace(const char *name, const char *cluster, INIT_LIST_HEAD(&ls->ls_dir_dump_list); rwlock_init(&ls->ls_dir_dump_lock); - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); ls->ls_create_count = 1; list_add(&ls->ls_list, &lslist); - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); if (flags & DLM_LSFL_FS) { error = dlm_callback_start(ls); @@ -655,9 +655,9 @@ static int new_lockspace(const char *name, const char *cluster, out_callback: dlm_callback_stop(ls); out_delist: - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_del(&ls->ls_list); - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); idr_destroy(&ls->ls_recover_idr); kfree(ls->ls_recover_buf); out_lkbidr: @@ -756,7 +756,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force) { int rv; - spin_lock(&ls->ls_lkbidr_spin); + spin_lock_bh(&ls->ls_lkbidr_spin); if (force == 0) { rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls); } else if (force == 1) { @@ -764,7 +764,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force) } else { rv = 0; } - spin_unlock(&ls->ls_lkbidr_spin); + spin_unlock_bh(&ls->ls_lkbidr_spin); return rv; } @@ -776,7 +776,7 @@ static int release_lockspace(struct dlm_ls *ls, int force) busy = lockspace_busy(ls, force); - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); if (ls->ls_create_count == 1) { if (busy) { rv = -EBUSY; @@ -790,7 +790,7 @@ static int release_lockspace(struct dlm_ls *ls, int force) } else { rv = -EINVAL; } - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); if (rv) { log_debug(ls, "release_lockspace no remove %d", rv); @@ -918,20 +918,19 @@ void dlm_stop_lockspaces(void) restart: count = 0; - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) { count++; continue; } - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); log_error(ls, "no userland control daemon, stopping lockspace"); dlm_ls_stop(ls); goto restart; } - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); if (count) log_print("dlm user daemon left %d lockspaces", count); } - diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index ab2cfbd2ea77..444dc858c4a4 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -867,36 +867,36 @@ static void process_dlm_messages(struct work_struct *work) { struct processqueue_entry *pentry; - spin_lock(&processqueue_lock); + spin_lock_bh(&processqueue_lock); pentry = list_first_entry_or_null(&processqueue, struct processqueue_entry, list); if (WARN_ON_ONCE(!pentry)) { process_dlm_messages_pending = false; - spin_unlock(&processqueue_lock); + spin_unlock_bh(&processqueue_lock); return; } list_del(&pentry->list); atomic_dec(&processqueue_count); - spin_unlock(&processqueue_lock); + spin_unlock_bh(&processqueue_lock); for (;;) { dlm_process_incoming_buffer(pentry->nodeid, pentry->buf, pentry->buflen); free_processqueue_entry(pentry); - spin_lock(&processqueue_lock); + spin_lock_bh(&processqueue_lock); pentry = list_first_entry_or_null(&processqueue, struct processqueue_entry, list); if (!pentry) { process_dlm_messages_pending = false; - spin_unlock(&processqueue_lock); + spin_unlock_bh(&processqueue_lock); break; } list_del(&pentry->list); atomic_dec(&processqueue_count); - spin_unlock(&processqueue_lock); + spin_unlock_bh(&processqueue_lock); } } @@ -966,14 +966,14 @@ again: memmove(con->rx_leftover_buf, pentry->buf + ret, con->rx_leftover); - spin_lock(&processqueue_lock); + spin_lock_bh(&processqueue_lock); ret = atomic_inc_return(&processqueue_count); list_add_tail(&pentry->list, &processqueue); if (!process_dlm_messages_pending) { process_dlm_messages_pending = true; queue_work(process_workqueue, &process_work); } - spin_unlock(&processqueue_lock); + spin_unlock_bh(&processqueue_lock); if (ret > DLM_MAX_PROCESS_BUFFERS) return DLM_IO_FLUSH; diff --git a/fs/dlm/member.c b/fs/dlm/member.c index ac1b555af9d6..6401916a97ef 100644 --- a/fs/dlm/member.c +++ b/fs/dlm/member.c @@ -630,7 +630,7 @@ int dlm_ls_stop(struct dlm_ls *ls) * message to the requestqueue without races. */ - write_lock(&ls->ls_recv_active); + write_lock_bh(&ls->ls_recv_active); /* * Abort any recovery that's in progress (see RECOVER_STOP, @@ -638,23 +638,23 @@ int dlm_ls_stop(struct dlm_ls *ls) * dlm to quit any processing (see RUNNING, dlm_locking_stopped()). */ - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); set_bit(LSFL_RECOVER_STOP, &ls->ls_flags); new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags); ls->ls_recover_seq++; /* activate requestqueue and stop processing */ - write_lock(&ls->ls_requestqueue_lock); + write_lock_bh(&ls->ls_requestqueue_lock); set_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags); - write_unlock(&ls->ls_requestqueue_lock); - spin_unlock(&ls->ls_recover_lock); + write_unlock_bh(&ls->ls_requestqueue_lock); + spin_unlock_bh(&ls->ls_recover_lock); /* * Let dlm_recv run again, now any normal messages will be saved on the * requestqueue for later. */ - write_unlock(&ls->ls_recv_active); + write_unlock_bh(&ls->ls_recv_active); /* * This in_recovery lock does two things: @@ -679,13 +679,13 @@ int dlm_ls_stop(struct dlm_ls *ls) dlm_recoverd_suspend(ls); - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); kfree(ls->ls_slots); ls->ls_slots = NULL; ls->ls_num_slots = 0; ls->ls_slots_size = 0; ls->ls_recover_status = 0; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); dlm_recoverd_resume(ls); @@ -719,12 +719,12 @@ int dlm_ls_start(struct dlm_ls *ls) if (error < 0) goto fail_rv; - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); /* the lockspace needs to be stopped before it can be started */ if (!dlm_locking_stopped(ls)) { - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); log_error(ls, "start ignored: lockspace running"); error = -EINVAL; goto fail; @@ -735,7 +735,7 @@ int dlm_ls_start(struct dlm_ls *ls) rv->seq = ++ls->ls_recover_seq; rv_old = ls->ls_recover_args; ls->ls_recover_args = rv; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); if (rv_old) { log_error(ls, "unused recovery %llx %d", diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c index ed6fb9b9a582..c34f38e9ee5c 100644 --- a/fs/dlm/midcomms.c +++ b/fs/dlm/midcomms.c @@ -364,9 +364,9 @@ int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len) node->users = 0; midcomms_node_reset(node); - spin_lock(&nodes_lock); + spin_lock_bh(&nodes_lock); hlist_add_head_rcu(&node->hlist, &node_hash[r]); - spin_unlock(&nodes_lock); + spin_unlock_bh(&nodes_lock); node->debugfs = dlm_create_debug_comms_file(nodeid, node); return 0; @@ -477,7 +477,7 @@ static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq) static void dlm_pas_fin_ack_rcv(struct midcomms_node *node) { - spin_lock(&node->state_lock); + spin_lock_bh(&node->state_lock); pr_debug("receive passive fin ack from node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); @@ -491,13 +491,13 @@ static void dlm_pas_fin_ack_rcv(struct midcomms_node *node) wake_up(&node->shutdown_wait); break; default: - spin_unlock(&node->state_lock); + spin_unlock_bh(&node->state_lock); log_print("%s: unexpected state: %d", __func__, node->state); WARN_ON_ONCE(1); return; } - spin_unlock(&node->state_lock); + spin_unlock_bh(&node->state_lock); } static void dlm_receive_buffer_3_2_trace(uint32_t seq, @@ -534,7 +534,7 @@ static void dlm_midcomms_receive_buffer(const union dlm_packet *p, if (is_expected_seq) { switch (p->header.h_cmd) { case DLM_FIN: - spin_lock(&node->state_lock); + spin_lock_bh(&node->state_lock); pr_debug("receive fin msg from node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); @@ -575,13 +575,13 @@ static void dlm_midcomms_receive_buffer(const union dlm_packet *p, /* probably remove_member caught it, do nothing */ break; default: - spin_unlock(&node->state_lock); + spin_unlock_bh(&node->state_lock); log_print("%s: unexpected state: %d", __func__, node->state); WARN_ON_ONCE(1); return; } - spin_unlock(&node->state_lock); + spin_unlock_bh(&node->state_lock); break; default: WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); @@ -1182,7 +1182,7 @@ void dlm_midcomms_exit(void) static void dlm_act_fin_ack_rcv(struct midcomms_node *node) { - spin_lock(&node->state_lock); + spin_lock_bh(&node->state_lock); pr_debug("receive active fin ack from node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); @@ -1202,13 +1202,13 @@ static void dlm_act_fin_ack_rcv(struct midcomms_node *node) wake_up(&node->shutdown_wait); break; default: - spin_unlock(&node->state_lock); + spin_unlock_bh(&node->state_lock); log_print("%s: unexpected state: %d", __func__, node->state); WARN_ON_ONCE(1); return; } - spin_unlock(&node->state_lock); + spin_unlock_bh(&node->state_lock); } void dlm_midcomms_add_member(int nodeid) @@ -1223,7 +1223,7 @@ void dlm_midcomms_add_member(int nodeid) return; } - spin_lock(&node->state_lock); + spin_lock_bh(&node->state_lock); if (!node->users) { pr_debug("receive add member from node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); @@ -1251,7 +1251,7 @@ void dlm_midcomms_add_member(int nodeid) node->users++; pr_debug("node %d users inc count %d\n", nodeid, node->users); - spin_unlock(&node->state_lock); + spin_unlock_bh(&node->state_lock); srcu_read_unlock(&nodes_srcu, idx); } @@ -1269,13 +1269,13 @@ void dlm_midcomms_remove_member(int nodeid) return; } - spin_lock(&node->state_lock); + spin_lock_bh(&node->state_lock); /* case of dlm_midcomms_addr() created node but * was not added before because dlm_midcomms_close() * removed the node */ if (!node->users) { - spin_unlock(&node->state_lock); + spin_unlock_bh(&node->state_lock); srcu_read_unlock(&nodes_srcu, idx); return; } @@ -1313,7 +1313,7 @@ void dlm_midcomms_remove_member(int nodeid) break; } } - spin_unlock(&node->state_lock); + spin_unlock_bh(&node->state_lock); srcu_read_unlock(&nodes_srcu, idx); } @@ -1351,7 +1351,7 @@ static void midcomms_shutdown(struct midcomms_node *node) return; } - spin_lock(&node->state_lock); + spin_lock_bh(&node->state_lock); pr_debug("receive active shutdown for node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); switch (node->state) { @@ -1370,7 +1370,7 @@ static void midcomms_shutdown(struct midcomms_node *node) */ break; } - spin_unlock(&node->state_lock); + spin_unlock_bh(&node->state_lock); if (DLM_DEBUG_FENCE_TERMINATION) msleep(5000); @@ -1441,9 +1441,9 @@ int dlm_midcomms_close(int nodeid) ret = dlm_lowcomms_close(nodeid); dlm_delete_debug_comms_file(node->debugfs); - spin_lock(&nodes_lock); + spin_lock_bh(&nodes_lock); hlist_del_rcu(&node->hlist); - spin_unlock(&nodes_lock); + spin_unlock_bh(&nodes_lock); srcu_read_unlock(&nodes_srcu, idx); /* wait that all readers left until flush send queue */ diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c index 2e3f529f3ff2..be1a71a6303a 100644 --- a/fs/dlm/rcom.c +++ b/fs/dlm/rcom.c @@ -143,18 +143,18 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq) { - spin_lock(&ls->ls_rcom_spin); + spin_lock_bh(&ls->ls_rcom_spin); *new_seq = cpu_to_le64(++ls->ls_rcom_seq); set_bit(LSFL_RCOM_WAIT, &ls->ls_flags); - spin_unlock(&ls->ls_rcom_spin); + spin_unlock_bh(&ls->ls_rcom_spin); } static void disallow_sync_reply(struct dlm_ls *ls) { - spin_lock(&ls->ls_rcom_spin); + spin_lock_bh(&ls->ls_rcom_spin); clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); clear_bit(LSFL_RCOM_READY, &ls->ls_flags); - spin_unlock(&ls->ls_rcom_spin); + spin_unlock_bh(&ls->ls_rcom_spin); } /* @@ -245,10 +245,10 @@ static void receive_rcom_status(struct dlm_ls *ls, goto do_create; } - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); status = ls->ls_recover_status; num_slots = ls->ls_num_slots; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); len += num_slots * sizeof(struct rcom_slot); do_create: @@ -266,9 +266,9 @@ static void receive_rcom_status(struct dlm_ls *ls, if (!num_slots) goto do_send; - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); if (ls->ls_num_slots != num_slots) { - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); log_debug(ls, "receive_rcom_status num_slots %d to %d", num_slots, ls->ls_num_slots); rc->rc_result = 0; @@ -277,7 +277,7 @@ static void receive_rcom_status(struct dlm_ls *ls, } dlm_slots_copy_out(ls, rc); - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); do_send: send_rcom_stateless(msg, rc); @@ -285,7 +285,7 @@ static void receive_rcom_status(struct dlm_ls *ls, static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in) { - spin_lock(&ls->ls_rcom_spin); + spin_lock_bh(&ls->ls_rcom_spin); if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) || le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) { log_debug(ls, "reject reply %d from %d seq %llx expect %llx", @@ -301,7 +301,7 @@ static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in) clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); wake_up(&ls->ls_wait_general); out: - spin_unlock(&ls->ls_rcom_spin); + spin_unlock_bh(&ls->ls_rcom_spin); } int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, @@ -613,11 +613,11 @@ void dlm_receive_rcom(struct dlm_ls *ls, const struct dlm_rcom *rc, int nodeid) break; } - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); status = ls->ls_recover_status; stop = dlm_recovery_stopped(ls); seq = ls->ls_recover_seq; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS))) goto ignore; diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 172c6b73f37a..13bc845fa305 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -74,9 +74,9 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) uint32_t dlm_recover_status(struct dlm_ls *ls) { uint32_t status; - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); status = ls->ls_recover_status; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); return status; } @@ -87,9 +87,9 @@ static void _set_recover_status(struct dlm_ls *ls, uint32_t status) void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) { - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); _set_recover_status(ls, status); - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); } static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, @@ -188,13 +188,13 @@ int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq) rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); if (!rv) { - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); _set_recover_status(ls, DLM_RS_NODES_ALL); ls->ls_num_slots = num_slots; ls->ls_slots_size = slots_size; ls->ls_slots = slots; ls->ls_generation = gen; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); } else { dlm_set_recover_status(ls, DLM_RS_NODES_ALL); } @@ -241,9 +241,9 @@ static int recover_list_empty(struct dlm_ls *ls) { int empty; - spin_lock(&ls->ls_recover_list_lock); + spin_lock_bh(&ls->ls_recover_list_lock); empty = list_empty(&ls->ls_recover_list); - spin_unlock(&ls->ls_recover_list_lock); + spin_unlock_bh(&ls->ls_recover_list_lock); return empty; } @@ -252,23 +252,23 @@ static void recover_list_add(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; - spin_lock(&ls->ls_recover_list_lock); + spin_lock_bh(&ls->ls_recover_list_lock); if (list_empty(&r->res_recover_list)) { list_add_tail(&r->res_recover_list, &ls->ls_recover_list); ls->ls_recover_list_count++; dlm_hold_rsb(r); } - spin_unlock(&ls->ls_recover_list_lock); + spin_unlock_bh(&ls->ls_recover_list_lock); } static void recover_list_del(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; - spin_lock(&ls->ls_recover_list_lock); + spin_lock_bh(&ls->ls_recover_list_lock); list_del_init(&r->res_recover_list); ls->ls_recover_list_count--; - spin_unlock(&ls->ls_recover_list_lock); + spin_unlock_bh(&ls->ls_recover_list_lock); dlm_put_rsb(r); } @@ -277,7 +277,7 @@ static void recover_list_clear(struct dlm_ls *ls) { struct dlm_rsb *r, *s; - spin_lock(&ls->ls_recover_list_lock); + spin_lock_bh(&ls->ls_recover_list_lock); list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { list_del_init(&r->res_recover_list); r->res_recover_locks_count = 0; @@ -290,17 +290,17 @@ static void recover_list_clear(struct dlm_ls *ls) ls->ls_recover_list_count); ls->ls_recover_list_count = 0; } - spin_unlock(&ls->ls_recover_list_lock); + spin_unlock_bh(&ls->ls_recover_list_lock); } static int recover_idr_empty(struct dlm_ls *ls) { int empty = 1; - spin_lock(&ls->ls_recover_idr_lock); + spin_lock_bh(&ls->ls_recover_idr_lock); if (ls->ls_recover_list_count) empty = 0; - spin_unlock(&ls->ls_recover_idr_lock); + spin_unlock_bh(&ls->ls_recover_idr_lock); return empty; } @@ -310,7 +310,7 @@ static int recover_idr_add(struct dlm_rsb *r) struct dlm_ls *ls = r->res_ls; int rv; - spin_lock(&ls->ls_recover_idr_lock); + spin_lock_bh(&ls->ls_recover_idr_lock); if (r->res_id) { rv = -1; goto out_unlock; @@ -324,7 +324,7 @@ static int recover_idr_add(struct dlm_rsb *r) dlm_hold_rsb(r); rv = 0; out_unlock: - spin_unlock(&ls->ls_recover_idr_lock); + spin_unlock_bh(&ls->ls_recover_idr_lock); return rv; } @@ -332,11 +332,11 @@ static void recover_idr_del(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; - spin_lock(&ls->ls_recover_idr_lock); + spin_lock_bh(&ls->ls_recover_idr_lock); idr_remove(&ls->ls_recover_idr, r->res_id); r->res_id = 0; ls->ls_recover_list_count--; - spin_unlock(&ls->ls_recover_idr_lock); + spin_unlock_bh(&ls->ls_recover_idr_lock); dlm_put_rsb(r); } @@ -345,9 +345,9 @@ static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id) { struct dlm_rsb *r; - spin_lock(&ls->ls_recover_idr_lock); + spin_lock_bh(&ls->ls_recover_idr_lock); r = idr_find(&ls->ls_recover_idr, (int)id); - spin_unlock(&ls->ls_recover_idr_lock); + spin_unlock_bh(&ls->ls_recover_idr_lock); return r; } @@ -356,7 +356,7 @@ static void recover_idr_clear(struct dlm_ls *ls) struct dlm_rsb *r; int id; - spin_lock(&ls->ls_recover_idr_lock); + spin_lock_bh(&ls->ls_recover_idr_lock); idr_for_each_entry(&ls->ls_recover_idr, r, id) { idr_remove(&ls->ls_recover_idr, id); @@ -372,7 +372,7 @@ static void recover_idr_clear(struct dlm_ls *ls) ls->ls_recover_list_count); ls->ls_recover_list_count = 0; } - spin_unlock(&ls->ls_recover_idr_lock); + spin_unlock_bh(&ls->ls_recover_idr_lock); } @@ -887,7 +887,7 @@ void dlm_clear_toss(struct dlm_ls *ls) int i; for (i = 0; i < ls->ls_rsbtbl_size; i++) { - spin_lock(&ls->ls_rsbtbl[i].lock); + spin_lock_bh(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { next = rb_next(n); r = rb_entry(n, struct dlm_rsb, res_hashnode); @@ -895,7 +895,7 @@ void dlm_clear_toss(struct dlm_ls *ls) dlm_free_rsb(r); count++; } - spin_unlock(&ls->ls_rsbtbl[i].lock); + spin_unlock_bh(&ls->ls_rsbtbl[i].lock); } if (count) diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index a11ae1da2f60..c82cc48988c6 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -26,7 +26,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls) struct dlm_rsb *r; int i, error = 0; - write_lock(&ls->ls_masters_lock); + write_lock_bh(&ls->ls_masters_lock); if (!list_empty(&ls->ls_masters_list)) { log_error(ls, "root list not empty"); error = -EINVAL; @@ -46,7 +46,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls) spin_unlock_bh(&ls->ls_rsbtbl[i].lock); } out: - write_unlock(&ls->ls_masters_lock); + write_unlock_bh(&ls->ls_masters_lock); return error; } @@ -54,12 +54,12 @@ static void dlm_release_masters_list(struct dlm_ls *ls) { struct dlm_rsb *r, *safe; - write_lock(&ls->ls_masters_lock); + write_lock_bh(&ls->ls_masters_lock); list_for_each_entry_safe(r, safe, &ls->ls_masters_list, res_masters_list) { list_del_init(&r->res_masters_list); dlm_put_rsb(r); } - write_unlock(&ls->ls_masters_lock); + write_unlock_bh(&ls->ls_masters_lock); } static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) @@ -103,9 +103,9 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq) { int error = -EINTR; - write_lock(&ls->ls_recv_active); + write_lock_bh(&ls->ls_recv_active); - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); if (ls->ls_recover_seq == seq) { set_bit(LSFL_RUNNING, &ls->ls_flags); /* unblocks processes waiting to enter the dlm */ @@ -113,9 +113,9 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq) clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); error = 0; } - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); - write_unlock(&ls->ls_recv_active); + write_unlock_bh(&ls->ls_recv_active); return error; } @@ -349,12 +349,12 @@ static void do_ls_recovery(struct dlm_ls *ls) struct dlm_recover *rv = NULL; int error; - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); rv = ls->ls_recover_args; ls->ls_recover_args = NULL; if (rv && ls->ls_recover_seq == rv->seq) clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags); - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); if (rv) { error = ls_recover(ls, rv); diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c index 9b646026df46..719a5243a069 100644 --- a/fs/dlm/requestqueue.c +++ b/fs/dlm/requestqueue.c @@ -68,7 +68,7 @@ int dlm_process_requestqueue(struct dlm_ls *ls) struct dlm_message *ms; int error = 0; - write_lock(&ls->ls_requestqueue_lock); + write_lock_bh(&ls->ls_requestqueue_lock); for (;;) { if (list_empty(&ls->ls_requestqueue)) { clear_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags); @@ -96,11 +96,11 @@ int dlm_process_requestqueue(struct dlm_ls *ls) error = -EINTR; break; } - write_unlock(&ls->ls_requestqueue_lock); + write_unlock_bh(&ls->ls_requestqueue_lock); schedule(); - write_lock(&ls->ls_requestqueue_lock); + write_lock_bh(&ls->ls_requestqueue_lock); } - write_unlock(&ls->ls_requestqueue_lock); + write_unlock_bh(&ls->ls_requestqueue_lock); return error; } @@ -135,7 +135,7 @@ void dlm_purge_requestqueue(struct dlm_ls *ls) struct dlm_message *ms; struct rq_entry *e, *safe; - write_lock(&ls->ls_requestqueue_lock); + write_lock_bh(&ls->ls_requestqueue_lock); list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { ms = &e->request; @@ -144,6 +144,6 @@ void dlm_purge_requestqueue(struct dlm_ls *ls) kfree(e); } } - write_unlock(&ls->ls_requestqueue_lock); + write_unlock_bh(&ls->ls_requestqueue_lock); } diff --git a/fs/dlm/user.c b/fs/dlm/user.c index b4971ba4bdd6..3173b974e8c8 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -189,7 +189,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, return; ls = lkb->lkb_resource->res_ls; - spin_lock(&ls->ls_clear_proc_locks); + spin_lock_bh(&ls->ls_clear_proc_locks); /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed @@ -211,7 +211,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status)) set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags); - spin_lock(&proc->asts_spin); + spin_lock_bh(&proc->asts_spin); rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags, &cb); switch (rv) { @@ -232,23 +232,23 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, case DLM_ENQUEUE_CALLBACK_FAILURE: fallthrough; default: - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); WARN_ON_ONCE(1); goto out; } - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) { /* N.B. spin_lock locks_spin, not asts_spin */ - spin_lock(&proc->locks_spin); + spin_lock_bh(&proc->locks_spin); if (!list_empty(&lkb->lkb_ownqueue)) { list_del_init(&lkb->lkb_ownqueue); dlm_put_lkb(lkb); } - spin_unlock(&proc->locks_spin); + spin_unlock_bh(&proc->locks_spin); } out: - spin_unlock(&ls->ls_clear_proc_locks); + spin_unlock_bh(&ls->ls_clear_proc_locks); } static int device_user_lock(struct dlm_user_proc *proc, @@ -817,10 +817,10 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)) return -EINVAL; - spin_lock(&proc->asts_spin); + spin_lock_bh(&proc->asts_spin); if (list_empty(&proc->asts)) { if (file->f_flags & O_NONBLOCK) { - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); return -EAGAIN; } @@ -829,16 +829,16 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, repeat: set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&proc->asts) && !signal_pending(current)) { - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); schedule(); - spin_lock(&proc->asts_spin); + spin_lock_bh(&proc->asts_spin); goto repeat; } set_current_state(TASK_RUNNING); remove_wait_queue(&proc->wait, &wait); if (signal_pending(current)) { - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); return -ERESTARTSYS; } } @@ -849,7 +849,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, cb = list_first_entry(&proc->asts, struct dlm_callback, list); list_del(&cb->list); - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); if (cb->flags & DLM_CB_BAST) { trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name, @@ -874,12 +874,12 @@ static __poll_t device_poll(struct file *file, poll_table *wait) poll_wait(file, &proc->wait, wait); - spin_lock(&proc->asts_spin); + spin_lock_bh(&proc->asts_spin); if (!list_empty(&proc->asts)) { - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); return EPOLLIN | EPOLLRDNORM; } - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); return 0; } -- cgit v1.2.3 From 92d59adfaf710f34ae7788fa54f0731a7640833b Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 2 Apr 2024 15:18:10 -0400 Subject: dlm: do message processing in softirq context Move dlm message processing from an ordered workqueue context to an ordered softirq context. Handling dlm messages in softirq will allow requests to be cleared more quickly and efficiently, and should avoid longer queues of incomplete requests. Later patches are expected to run completion/blocking callbacks directly from this message processing context, further reducing context switches required to complete a request. In the longer term, concurrent message processing could be implemented. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 444dc858c4a4..6b8078085e56 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -204,6 +204,7 @@ static void process_dlm_messages(struct work_struct *work); static DECLARE_WORK(process_work, process_dlm_messages); static DEFINE_SPINLOCK(processqueue_lock); static bool process_dlm_messages_pending; +static DECLARE_WAIT_QUEUE_HEAD(processqueue_wq); static atomic_t processqueue_count; static LIST_HEAD(processqueue); @@ -877,7 +878,8 @@ static void process_dlm_messages(struct work_struct *work) } list_del(&pentry->list); - atomic_dec(&processqueue_count); + if (atomic_dec_and_test(&processqueue_count)) + wake_up(&processqueue_wq); spin_unlock_bh(&processqueue_lock); for (;;) { @@ -895,7 +897,8 @@ static void process_dlm_messages(struct work_struct *work) } list_del(&pentry->list); - atomic_dec(&processqueue_count); + if (atomic_dec_and_test(&processqueue_count)) + wake_up(&processqueue_wq); spin_unlock_bh(&processqueue_lock); } } @@ -1511,7 +1514,20 @@ static void process_recv_sockets(struct work_struct *work) /* CF_RECV_PENDING cleared */ break; case DLM_IO_FLUSH: - flush_workqueue(process_workqueue); + /* we can't flush the process_workqueue here because a + * WQ_MEM_RECLAIM workequeue can occurr a deadlock for a non + * WQ_MEM_RECLAIM workqueue such as process_workqueue. Instead + * we have a waitqueue to wait until all messages are + * processed. + * + * This handling is only necessary to backoff the sender and + * not queue all messages from the socket layer into DLM + * processqueue. When DLM is capable to parse multiple messages + * on an e.g. per socket basis this handling can might be + * removed. Especially in a message burst we are too slow to + * process messages and the queue will fill up memory. + */ + wait_event(processqueue_wq, !atomic_read(&processqueue_count)); fallthrough; case DLM_IO_RESCHED: cond_resched(); @@ -1701,11 +1717,7 @@ static int work_start(void) return -ENOMEM; } - /* ordered dlm message process queue, - * should be converted to a tasklet - */ - process_workqueue = alloc_ordered_workqueue("dlm_process", - WQ_HIGHPRI | WQ_MEM_RECLAIM); + process_workqueue = alloc_workqueue("dlm_process", WQ_HIGHPRI | WQ_BH, 0); if (!process_workqueue) { log_print("can't start dlm_process"); destroy_workqueue(io_workqueue); -- cgit v1.2.3 From 700b04808fad2eac24abf050f234f059199fa3fe Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 15 Apr 2024 14:39:35 -0400 Subject: dlm: increment ls_count for dlm_scand Increment the ls_count value while dlm_scand is processing a lockspace so that release_lockspace()/remove_lockspace() will wait for dlm_scand to finish. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/lockspace.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index c3681a50decb..731c48371a27 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -255,6 +255,7 @@ static struct dlm_ls *find_ls_to_scan(void) list_for_each_entry(ls, &lslist, ls_list) { if (time_after_eq(jiffies, ls->ls_scan_time + dlm_config.ci_scan_secs * HZ)) { + atomic_inc(&ls->ls_count); spin_unlock_bh(&lslist_lock); return ls; } @@ -277,6 +278,8 @@ static int dlm_scand(void *data) } else { ls->ls_scan_time += HZ; } + + dlm_put_lockspace(ls); continue; } schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ); -- cgit v1.2.3 From dcdaad05ca15150ae076299ba827867f243c0623 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 15 Apr 2024 14:39:36 -0400 Subject: dlm: change to single hashtable lock Prepare to replace our own hash table with rhashtable by replacing the per-bucket locks in our own hash table with a single lock. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/debug_fs.c | 24 ++++++++-------- fs/dlm/dir.c | 4 +-- fs/dlm/dlm_internal.h | 2 +- fs/dlm/lock.c | 77 +++++++++++++++++++++++++-------------------------- fs/dlm/lockspace.c | 2 +- fs/dlm/recover.c | 4 +-- fs/dlm/recoverd.c | 8 +++--- 7 files changed, 60 insertions(+), 61 deletions(-) (limited to 'fs') diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index cba5514688ee..b8234eba5e34 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -452,7 +452,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; - spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); if (!RB_EMPTY_ROOT(tree)) { for (node = rb_first(tree); node; node = rb_next(node)) { r = rb_entry(node, struct dlm_rsb, res_hashnode); @@ -460,12 +460,12 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); return ri; } } } - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); /* * move to the first rsb in the next non-empty bucket @@ -484,18 +484,18 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) } tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; - spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); if (!RB_EMPTY_ROOT(tree)) { node = rb_first(tree); r = rb_entry(node, struct dlm_rsb, res_hashnode); dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); *pos = n; return ri; } - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); } } @@ -516,7 +516,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) * move to the next rsb in the same bucket */ - spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); rp = ri->rsb; next = rb_next(&rp->res_hashnode); @@ -524,12 +524,12 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) r = rb_entry(next, struct dlm_rsb, res_hashnode); dlm_hold_rsb(r); ri->rsb = r; - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); dlm_put_rsb(rp); ++*pos; return ri; } - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); dlm_put_rsb(rp); /* @@ -550,18 +550,18 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) } tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; - spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); if (!RB_EMPTY_ROOT(tree)) { next = rb_first(tree); r = rb_entry(next, struct dlm_rsb, res_hashnode); dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); *pos = n; return ri; } - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); } } diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index ff3a51c759b5..5315f4f46cc7 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -204,12 +204,12 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name, hash = jhash(name, len, 0); bucket = hash & (ls->ls_rsbtbl_size - 1); - spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); if (rv) rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, name, len, &r); - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); if (!rv) return r; diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 269c12e0824f..2c961db53b27 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -105,7 +105,6 @@ do { \ struct dlm_rsbtable { struct rb_root keep; struct rb_root toss; - spinlock_t lock; unsigned long flags; }; @@ -593,6 +592,7 @@ struct dlm_ls { spinlock_t ls_lkbidr_spin; struct dlm_rsbtable *ls_rsbtbl; + spinlock_t ls_rsbtbl_lock; uint32_t ls_rsbtbl_size; spinlock_t ls_waiters_lock; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 4ff4ef2a5f87..af57d9d12434 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -369,13 +369,12 @@ static inline int dlm_kref_put_lock_bh(struct kref *kref, static void put_rsb(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; - uint32_t bucket = r->res_bucket; int rv; rv = dlm_kref_put_lock_bh(&r->res_ref, toss_rsb, - &ls->ls_rsbtbl[bucket].lock); + &ls->ls_rsbtbl_lock); if (rv) - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); } void dlm_put_rsb(struct dlm_rsb *r) @@ -615,7 +614,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, goto out; } - spin_lock_bh(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (error) @@ -685,7 +684,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, error = get_rsb_struct(ls, name, len, &r); if (error == -EAGAIN) { - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); goto retry; } if (error) @@ -734,7 +733,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, out_add: error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); out_unlock: - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); out: *r_ret = r; return error; @@ -759,7 +758,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, if (error < 0) goto out; - spin_lock_bh(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (error) @@ -817,7 +816,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, error = get_rsb_struct(ls, name, len, &r); if (error == -EAGAIN) { - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); goto retry; } if (error) @@ -832,7 +831,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); out_unlock: - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); out: *r_ret = r; return error; @@ -1049,7 +1048,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, if (error < 0) return error; - spin_lock_bh(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (!error) { /* because the rsb is active, we need to lock_rsb before @@ -1057,7 +1056,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, */ hold_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); lock_rsb(r); __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false, @@ -1083,14 +1082,14 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, r->res_toss_time = jiffies; /* the rsb was inactive (on toss list) */ - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); return 0; not_found: error = get_rsb_struct(ls, name, len, &r); if (error == -EAGAIN) { - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); goto retry; } if (error) @@ -1108,7 +1107,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, if (error) { /* should never happen */ dlm_free_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); goto retry; } @@ -1116,7 +1115,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, *result = DLM_LU_ADD; *r_nodeid = from_nodeid; out_unlock: - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); return error; } @@ -1126,15 +1125,15 @@ static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) struct dlm_rsb *r; int i; + spin_lock_bh(&ls->ls_rsbtbl_lock); for (i = 0; i < ls->ls_rsbtbl_size; i++) { - spin_lock_bh(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); if (r->res_hash == hash) dlm_dump_rsb(r); } - spin_unlock_bh(&ls->ls_rsbtbl[i].lock); } + spin_unlock_bh(&ls->ls_rsbtbl_lock); } void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) @@ -1146,7 +1145,7 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) hash = jhash(name, len, 0); b = hash & (ls->ls_rsbtbl_size - 1); - spin_lock_bh(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (!error) goto out_dump; @@ -1157,7 +1156,7 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) out_dump: dlm_dump_rsb(r); out: - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); } static void toss_rsb(struct kref *kref) @@ -1621,10 +1620,10 @@ static void shrink_bucket(struct dlm_ls *ls, int b) memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX); - spin_lock_bh(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags)) { - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); return; } @@ -1681,7 +1680,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); else clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); /* * While searching for rsb's to free, we found some that require @@ -1696,16 +1695,16 @@ static void shrink_bucket(struct dlm_ls *ls, int b) name = ls->ls_remove_names[i]; len = ls->ls_remove_lens[i]; - spin_lock_bh(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); if (rv) { - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); log_debug(ls, "remove_name not toss %s", name); continue; } if (r->res_master_nodeid != our_nodeid) { - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); log_debug(ls, "remove_name master %d dir %d our %d %s", r->res_master_nodeid, r->res_dir_nodeid, our_nodeid, name); @@ -1714,7 +1713,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) if (r->res_dir_nodeid == our_nodeid) { /* should never happen */ - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); log_error(ls, "remove_name dir %d master %d our %d %s", r->res_dir_nodeid, r->res_master_nodeid, our_nodeid, name); @@ -1723,21 +1722,21 @@ static void shrink_bucket(struct dlm_ls *ls, int b) if (!time_after_eq(jiffies, r->res_toss_time + dlm_config.ci_toss_secs * HZ)) { - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); log_debug(ls, "remove_name toss_time %lu now %lu %s", r->res_toss_time, jiffies, name); continue; } if (!kref_put(&r->res_ref, kill_rsb)) { - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); log_error(ls, "remove_name in use %s", name); continue; } rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); send_remove(r); - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); dlm_free_rsb(r); } @@ -4201,7 +4200,7 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) hash = jhash(name, len, 0); b = hash & (ls->ls_rsbtbl_size - 1); - spin_lock_bh(&ls->ls_rsbtbl[b].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); if (rv) { @@ -4211,7 +4210,7 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) /* should not happen */ log_error(ls, "receive_remove from %d not found %s", from_nodeid, name); - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); return; } if (r->res_master_nodeid != from_nodeid) { @@ -4219,14 +4218,14 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) log_error(ls, "receive_remove keep from %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); return; } log_debug(ls, "receive_remove from %d master %d first %x %s", from_nodeid, r->res_master_nodeid, r->res_first_lkid, name); - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); return; } @@ -4234,19 +4233,19 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) log_error(ls, "receive_remove toss from %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); return; } if (kref_put(&r->res_ref, kill_rsb)) { rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); dlm_free_rsb(r); } else { log_error(ls, "receive_remove from %d rsb ref error", from_nodeid); dlm_print_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl[b].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); } } @@ -5314,7 +5313,7 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) struct rb_node *n; struct dlm_rsb *r; - spin_lock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_lock_bh(&ls->ls_rsbtbl_lock); for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); @@ -5325,10 +5324,10 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) continue; } hold_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); return r; } - spin_unlock_bh(&ls->ls_rsbtbl[bucket].lock); + spin_unlock_bh(&ls->ls_rsbtbl_lock); return NULL; } diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 731c48371a27..d33dbcd5f4a1 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -495,6 +495,7 @@ static int new_lockspace(const char *name, const char *cluster, */ ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL)); + spin_lock_init(&ls->ls_rsbtbl_lock); size = READ_ONCE(dlm_config.ci_rsbtbl_size); ls->ls_rsbtbl_size = size; @@ -504,7 +505,6 @@ static int new_lockspace(const char *name, const char *cluster, for (i = 0; i < size; i++) { ls->ls_rsbtbl[i].keep.rb_node = NULL; ls->ls_rsbtbl[i].toss.rb_node = NULL; - spin_lock_init(&ls->ls_rsbtbl[i].lock); } for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) { diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 13bc845fa305..9a4c8e4b2442 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -886,8 +886,8 @@ void dlm_clear_toss(struct dlm_ls *ls) unsigned int count = 0; int i; + spin_lock(&ls->ls_rsbtbl_lock); for (i = 0; i < ls->ls_rsbtbl_size; i++) { - spin_lock_bh(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { next = rb_next(n); r = rb_entry(n, struct dlm_rsb, res_hashnode); @@ -895,8 +895,8 @@ void dlm_clear_toss(struct dlm_ls *ls) dlm_free_rsb(r); count++; } - spin_unlock_bh(&ls->ls_rsbtbl[i].lock); } + spin_unlock_bh(&ls->ls_rsbtbl_lock); if (count) log_rinfo(ls, "dlm_clear_toss %u done", count); diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index c82cc48988c6..fa6608363302 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -33,8 +33,8 @@ static int dlm_create_masters_list(struct dlm_ls *ls) goto out; } + spin_lock_bh(&ls->ls_rsbtbl_lock); for (i = 0; i < ls->ls_rsbtbl_size; i++) { - spin_lock_bh(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); if (r->res_nodeid) @@ -43,8 +43,8 @@ static int dlm_create_masters_list(struct dlm_ls *ls) list_add(&r->res_masters_list, &ls->ls_masters_list); dlm_hold_rsb(r); } - spin_unlock_bh(&ls->ls_rsbtbl[i].lock); } + spin_unlock_bh(&ls->ls_rsbtbl_lock); out: write_unlock_bh(&ls->ls_masters_lock); return error; @@ -68,8 +68,8 @@ static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) struct dlm_rsb *r; int i; + spin_lock_bh(&ls->ls_rsbtbl_lock); for (i = 0; i < ls->ls_rsbtbl_size; i++) { - spin_lock_bh(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); list_add(&r->res_root_list, root_list); @@ -78,8 +78,8 @@ static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss)) log_error(ls, "%s toss not empty", __func__); - spin_unlock_bh(&ls->ls_rsbtbl[i].lock); } + spin_unlock_bh(&ls->ls_rsbtbl_lock); } static void dlm_release_root_list(struct list_head *root_list) -- cgit v1.2.3 From 2d90354027ad2011c0c5a2a404fe81afc745c2a7 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 15 Apr 2024 14:39:37 -0400 Subject: dlm: merge toss and keep hash table lists into one list There are several places where lock processing can perform two hash table lookups, first in the "keep" list, and if not found, in the "toss" list. This patch introduces a new rsb state flag "RSB_TOSS" to represent the difference between the state of being on keep vs toss list, so that the two lists can be combined. This avoids cases of two lookups. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/debug_fs.c | 29 ++++++++++++-- fs/dlm/dir.c | 6 +-- fs/dlm/dlm_internal.h | 4 +- fs/dlm/lock.c | 103 +++++++++++++++++++++++++++----------------------- fs/dlm/lockspace.c | 13 ++----- fs/dlm/recover.c | 7 +++- fs/dlm/recoverd.c | 12 +++--- 7 files changed, 98 insertions(+), 76 deletions(-) (limited to 'fs') diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index b8234eba5e34..37f4dfca5e44 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -450,12 +450,20 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) if (seq->op == &format4_seq_ops) ri->format = 4; - tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; + tree = &ls->ls_rsbtbl[bucket].r; spin_lock_bh(&ls->ls_rsbtbl_lock); if (!RB_EMPTY_ROOT(tree)) { for (node = rb_first(tree); node; node = rb_next(node)) { r = rb_entry(node, struct dlm_rsb, res_hashnode); + if (toss) { + if (!rsb_flag(r, RSB_TOSS)) + continue; + } else { + if (rsb_flag(r, RSB_TOSS)) + continue; + } + if (!entry--) { dlm_hold_rsb(r); ri->rsb = r; @@ -482,12 +490,20 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) kfree(ri); return NULL; } - tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; + tree = &ls->ls_rsbtbl[bucket].r; spin_lock_bh(&ls->ls_rsbtbl_lock); if (!RB_EMPTY_ROOT(tree)) { node = rb_first(tree); r = rb_entry(node, struct dlm_rsb, res_hashnode); + if (toss) { + if (!rsb_flag(r, RSB_TOSS)) + continue; + } else { + if (rsb_flag(r, RSB_TOSS)) + continue; + } + dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; @@ -548,12 +564,19 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) ++*pos; return NULL; } - tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; + tree = &ls->ls_rsbtbl[bucket].r; spin_lock_bh(&ls->ls_rsbtbl_lock); if (!RB_EMPTY_ROOT(tree)) { next = rb_first(tree); r = rb_entry(next, struct dlm_rsb, res_hashnode); + if (toss) { + if (!rsb_flag(r, RSB_TOSS)) + continue; + } else { + if (rsb_flag(r, RSB_TOSS)) + continue; + } dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index 5315f4f46cc7..f8039f3ee2d1 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -205,12 +205,8 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name, bucket = hash & (ls->ls_rsbtbl_size - 1); spin_lock_bh(&ls->ls_rsbtbl_lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); - if (rv) - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, - name, len, &r); + rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].r, name, len, &r); spin_unlock_bh(&ls->ls_rsbtbl_lock); - if (!rv) return r; diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 2c961db53b27..af88fc2f978c 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -103,8 +103,7 @@ do { \ #define DLM_RTF_SHRINK_BIT 0 struct dlm_rsbtable { - struct rb_root keep; - struct rb_root toss; + struct rb_root r; unsigned long flags; }; @@ -376,6 +375,7 @@ enum rsb_flags { RSB_RECOVER_CONVERT, RSB_RECOVER_GRANT, RSB_RECOVER_LVB_INVAL, + RSB_TOSS, }; static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag) diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index af57d9d12434..08ec1a04476a 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -616,23 +616,22 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, spin_lock_bh(&ls->ls_rsbtbl_lock); - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); if (error) - goto do_toss; + goto do_new; /* * rsb is active, so we can't check master_nodeid without lock_rsb. */ + if (rsb_flag(r, RSB_TOSS)) + goto do_toss; + kref_get(&r->res_ref); goto out_unlock; do_toss: - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (error) - goto do_new; - /* * rsb found inactive (master_nodeid may be out of date unless * we are the dir_nodeid or were the master) No other thread @@ -669,8 +668,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, r->res_first_lkid = 0; } - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); - error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); + rsb_clear_flag(r, RSB_TOSS); goto out_unlock; @@ -731,7 +729,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, } out_add: - error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); + error = rsb_insert(r, &ls->ls_rsbtbl[b].r); out_unlock: spin_unlock_bh(&ls->ls_rsbtbl_lock); out: @@ -760,8 +758,11 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, spin_lock_bh(&ls->ls_rsbtbl_lock); - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); if (error) + goto do_new; + + if (rsb_flag(r, RSB_TOSS)) goto do_toss; /* @@ -773,10 +774,6 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, do_toss: - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (error) - goto do_new; - /* * rsb found inactive. No other thread is using this rsb because * it's on the toss list, so we can look at or update @@ -804,8 +801,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, r->res_nodeid = 0; } - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); - error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); + rsb_clear_flag(r, RSB_TOSS); goto out_unlock; @@ -829,7 +825,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid; kref_init(&r->res_ref); - error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); + error = rsb_insert(r, &ls->ls_rsbtbl[b].r); out_unlock: spin_unlock_bh(&ls->ls_rsbtbl_lock); out: @@ -1049,8 +1045,11 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, return error; spin_lock_bh(&ls->ls_rsbtbl_lock); - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); if (!error) { + if (rsb_flag(r, RSB_TOSS)) + goto do_toss; + /* because the rsb is active, we need to lock_rsb before * checking/changing re_master_nodeid */ @@ -1067,12 +1066,11 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, put_rsb(r); return 0; - } - - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (error) + } else { goto not_found; + } + do_toss: /* because the rsb is inactive (on toss list), it's not refcounted * and lock_rsb is not used, but is protected by the rsbtbl lock */ @@ -1102,8 +1100,9 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, r->res_nodeid = from_nodeid; kref_init(&r->res_ref); r->res_toss_time = jiffies; + rsb_set_flag(r, RSB_TOSS); - error = rsb_insert(r, &ls->ls_rsbtbl[b].toss); + error = rsb_insert(r, &ls->ls_rsbtbl[b].r); if (error) { /* should never happen */ dlm_free_rsb(r); @@ -1127,8 +1126,11 @@ static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) spin_lock_bh(&ls->ls_rsbtbl_lock); for (i = 0; i < ls->ls_rsbtbl_size; i++) { - for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { + for (n = rb_first(&ls->ls_rsbtbl[i].r); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); + if (rsb_flag(r, RSB_TOSS)) + continue; + if (r->res_hash == hash) dlm_dump_rsb(r); } @@ -1146,14 +1148,10 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) b = hash & (ls->ls_rsbtbl_size - 1); spin_lock_bh(&ls->ls_rsbtbl_lock); - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); if (!error) - goto out_dump; - - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (error) goto out; - out_dump: + dlm_dump_rsb(r); out: spin_unlock_bh(&ls->ls_rsbtbl_lock); @@ -1166,8 +1164,8 @@ static void toss_rsb(struct kref *kref) DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r);); kref_init(&r->res_ref); - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep); - rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss); + WARN_ON(rsb_flag(r, RSB_TOSS)); + rsb_set_flag(r, RSB_TOSS); r->res_toss_time = jiffies; set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[r->res_bucket].flags); if (r->res_lvbptr) { @@ -1627,9 +1625,11 @@ static void shrink_bucket(struct dlm_ls *ls, int b) return; } - for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) { + for (n = rb_first(&ls->ls_rsbtbl[b].r); n; n = next) { next = rb_next(n); r = rb_entry(n, struct dlm_rsb, res_hashnode); + if (!rsb_flag(r, RSB_TOSS)) + continue; /* If we're the directory record for this rsb, and we're not the master of it, then we need to wait @@ -1672,7 +1672,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) continue; } - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); + rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].r); dlm_free_rsb(r); } @@ -1696,8 +1696,14 @@ static void shrink_bucket(struct dlm_ls *ls, int b) len = ls->ls_remove_lens[i]; spin_lock_bh(&ls->ls_rsbtbl_lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); + rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); if (rv) { + spin_unlock_bh(&ls->ls_rsbtbl_lock); + log_error(ls, "remove_name not found %s", name); + continue; + } + + if (!rsb_flag(r, RSB_TOSS)) { spin_unlock_bh(&ls->ls_rsbtbl_lock); log_debug(ls, "remove_name not toss %s", name); continue; @@ -1734,7 +1740,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) continue; } - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); + rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].r); send_remove(r); spin_unlock_bh(&ls->ls_rsbtbl_lock); @@ -4202,17 +4208,16 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) spin_lock_bh(&ls->ls_rsbtbl_lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); + rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); if (rv) { - /* verify the rsb is on keep list per comment above */ - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); - if (rv) { - /* should not happen */ - log_error(ls, "receive_remove from %d not found %s", - from_nodeid, name); - spin_unlock_bh(&ls->ls_rsbtbl_lock); - return; - } + /* should not happen */ + log_error(ls, "%s from %d not found %s", __func__, + from_nodeid, name); + spin_unlock_bh(&ls->ls_rsbtbl_lock); + return; + } + + if (!rsb_flag(r, RSB_TOSS)) { if (r->res_master_nodeid != from_nodeid) { /* should not happen */ log_error(ls, "receive_remove keep from %d master %d", @@ -4238,7 +4243,7 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) } if (kref_put(&r->res_ref, kill_rsb)) { - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); + rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].r); spin_unlock_bh(&ls->ls_rsbtbl_lock); dlm_free_rsb(r); } else { @@ -5314,8 +5319,10 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) struct dlm_rsb *r; spin_lock_bh(&ls->ls_rsbtbl_lock); - for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { + for (n = rb_first(&ls->ls_rsbtbl[bucket].r); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); + if (rsb_flag(r, RSB_TOSS)) + continue; if (!rsb_flag(r, RSB_RECOVER_GRANT)) continue; diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index d33dbcd5f4a1..b5184ad550fa 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -503,8 +503,7 @@ static int new_lockspace(const char *name, const char *cluster, if (!ls->ls_rsbtbl) goto out_lsfree; for (i = 0; i < size; i++) { - ls->ls_rsbtbl[i].keep.rb_node = NULL; - ls->ls_rsbtbl[i].toss.rb_node = NULL; + ls->ls_rsbtbl[i].r.rb_node = NULL; } for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) { @@ -837,15 +836,9 @@ static int release_lockspace(struct dlm_ls *ls, int force) */ for (i = 0; i < ls->ls_rsbtbl_size; i++) { - while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) { + while ((n = rb_first(&ls->ls_rsbtbl[i].r))) { rsb = rb_entry(n, struct dlm_rsb, res_hashnode); - rb_erase(n, &ls->ls_rsbtbl[i].keep); - dlm_free_rsb(rsb); - } - - while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) { - rsb = rb_entry(n, struct dlm_rsb, res_hashnode); - rb_erase(n, &ls->ls_rsbtbl[i].toss); + rb_erase(n, &ls->ls_rsbtbl[i].r); dlm_free_rsb(rsb); } } diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 9a4c8e4b2442..e53d88e4ec93 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -888,10 +888,13 @@ void dlm_clear_toss(struct dlm_ls *ls) spin_lock(&ls->ls_rsbtbl_lock); for (i = 0; i < ls->ls_rsbtbl_size; i++) { - for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { + for (n = rb_first(&ls->ls_rsbtbl[i].r); n; n = next) { next = rb_next(n); r = rb_entry(n, struct dlm_rsb, res_hashnode); - rb_erase(n, &ls->ls_rsbtbl[i].toss); + if (!rsb_flag(r, RSB_TOSS)) + continue; + + rb_erase(n, &ls->ls_rsbtbl[i].r); dlm_free_rsb(r); count++; } diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index fa6608363302..ad696528ebe7 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -35,9 +35,9 @@ static int dlm_create_masters_list(struct dlm_ls *ls) spin_lock_bh(&ls->ls_rsbtbl_lock); for (i = 0; i < ls->ls_rsbtbl_size; i++) { - for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { + for (n = rb_first(&ls->ls_rsbtbl[i].r); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); - if (r->res_nodeid) + if (rsb_flag(r, RSB_TOSS) || r->res_nodeid) continue; list_add(&r->res_masters_list, &ls->ls_masters_list); @@ -70,14 +70,14 @@ static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) spin_lock_bh(&ls->ls_rsbtbl_lock); for (i = 0; i < ls->ls_rsbtbl_size; i++) { - for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { + for (n = rb_first(&ls->ls_rsbtbl[i].r); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); + if (WARN_ON_ONCE(rsb_flag(r, RSB_TOSS))) + continue; + list_add(&r->res_root_list, root_list); dlm_hold_rsb(r); } - - if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss)) - log_error(ls, "%s toss not empty", __func__); } spin_unlock_bh(&ls->ls_rsbtbl_lock); } -- cgit v1.2.3 From 93a693d19d2a4aeaa5aede5354cc0f749a780374 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 15 Apr 2024 14:39:38 -0400 Subject: dlm: add rsb lists for iteration To prepare for using rhashtable, add two rsb lists for iterating through rsb's in two uncommon cases where this is necesssary: - when dumping rsb state from debugfs, now using seq_list. - when looking at all rsb's during recovery. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/debug_fs.c | 233 ++++++++------------------------------------------ fs/dlm/dlm_internal.h | 4 + fs/dlm/lock.c | 47 +++++----- fs/dlm/lockspace.c | 3 + fs/dlm/recover.c | 24 ++---- fs/dlm/recoverd.c | 34 +++----- 6 files changed, 84 insertions(+), 261 deletions(-) (limited to 'fs') diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 37f4dfca5e44..70567919f1b7 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -366,12 +366,10 @@ static void print_format4(struct dlm_rsb *r, struct seq_file *s) unlock_rsb(r); } -struct rsbtbl_iter { - struct dlm_rsb *rsb; - unsigned bucket; - int format; - int header; -}; +static const struct seq_operations format1_seq_ops; +static const struct seq_operations format2_seq_ops; +static const struct seq_operations format3_seq_ops; +static const struct seq_operations format4_seq_ops; /* * If the buffer is full, seq_printf can be called again, but it @@ -382,220 +380,61 @@ struct rsbtbl_iter { static int table_seq_show(struct seq_file *seq, void *iter_ptr) { - struct rsbtbl_iter *ri = iter_ptr; - - switch (ri->format) { - case 1: - print_format1(ri->rsb, seq); - break; - case 2: - if (ri->header) { - seq_puts(seq, "id nodeid remid pid xid exflags flags sts grmode rqmode time_ms r_nodeid r_len r_name\n"); - ri->header = 0; - } - print_format2(ri->rsb, seq); - break; - case 3: - if (ri->header) { - seq_puts(seq, "rsb ptr nodeid first_lkid flags !root_list_empty !recover_list_empty recover_locks_count len\n"); - ri->header = 0; - } - print_format3(ri->rsb, seq); - break; - case 4: - if (ri->header) { - seq_puts(seq, "rsb ptr nodeid master_nodeid dir_nodeid our_nodeid toss_time flags len str|hex name\n"); - ri->header = 0; - } - print_format4(ri->rsb, seq); - break; - } + struct dlm_rsb *rsb = list_entry(iter_ptr, struct dlm_rsb, res_rsbs_list); + + if (seq->op == &format1_seq_ops) + print_format1(rsb, seq); + else if (seq->op == &format2_seq_ops) + print_format2(rsb, seq); + else if (seq->op == &format3_seq_ops) + print_format3(rsb, seq); + else if (seq->op == &format4_seq_ops) + print_format4(rsb, seq); return 0; } -static const struct seq_operations format1_seq_ops; -static const struct seq_operations format2_seq_ops; -static const struct seq_operations format3_seq_ops; -static const struct seq_operations format4_seq_ops; - static void *table_seq_start(struct seq_file *seq, loff_t *pos) { - struct rb_root *tree; - struct rb_node *node; struct dlm_ls *ls = seq->private; - struct rsbtbl_iter *ri; - struct dlm_rsb *r; - loff_t n = *pos; - unsigned bucket, entry; - int toss = (seq->op == &format4_seq_ops); - - bucket = n >> 32; - entry = n & ((1LL << 32) - 1); - - if (bucket >= ls->ls_rsbtbl_size) - return NULL; - - ri = kzalloc(sizeof(*ri), GFP_NOFS); - if (!ri) - return NULL; - if (n == 0) - ri->header = 1; - if (seq->op == &format1_seq_ops) - ri->format = 1; - if (seq->op == &format2_seq_ops) - ri->format = 2; - if (seq->op == &format3_seq_ops) - ri->format = 3; - if (seq->op == &format4_seq_ops) - ri->format = 4; - - tree = &ls->ls_rsbtbl[bucket].r; + struct list_head *list; - spin_lock_bh(&ls->ls_rsbtbl_lock); - if (!RB_EMPTY_ROOT(tree)) { - for (node = rb_first(tree); node; node = rb_next(node)) { - r = rb_entry(node, struct dlm_rsb, res_hashnode); - if (toss) { - if (!rsb_flag(r, RSB_TOSS)) - continue; - } else { - if (rsb_flag(r, RSB_TOSS)) - continue; - } - - if (!entry--) { - dlm_hold_rsb(r); - ri->rsb = r; - ri->bucket = bucket; - spin_unlock_bh(&ls->ls_rsbtbl_lock); - return ri; - } - } + if (!*pos) { + if (seq->op == &format2_seq_ops) + seq_puts(seq, "id nodeid remid pid xid exflags flags sts grmode rqmode time_ms r_nodeid r_len r_name\n"); + else if (seq->op == &format3_seq_ops) + seq_puts(seq, "rsb ptr nodeid first_lkid flags !root_list_empty !recover_list_empty recover_locks_count len\n"); + else if (seq->op == &format4_seq_ops) + seq_puts(seq, "rsb ptr nodeid master_nodeid dir_nodeid our_nodeid toss_time flags len str|hex name\n"); } - spin_unlock_bh(&ls->ls_rsbtbl_lock); - - /* - * move to the first rsb in the next non-empty bucket - */ - - /* zero the entry */ - n &= ~((1LL << 32) - 1); - while (1) { - bucket++; - n += 1LL << 32; + if (seq->op == &format4_seq_ops) + list = &ls->ls_toss; + else + list = &ls->ls_keep; - if (bucket >= ls->ls_rsbtbl_size) { - kfree(ri); - return NULL; - } - tree = &ls->ls_rsbtbl[bucket].r; - - spin_lock_bh(&ls->ls_rsbtbl_lock); - if (!RB_EMPTY_ROOT(tree)) { - node = rb_first(tree); - r = rb_entry(node, struct dlm_rsb, res_hashnode); - if (toss) { - if (!rsb_flag(r, RSB_TOSS)) - continue; - } else { - if (rsb_flag(r, RSB_TOSS)) - continue; - } - - dlm_hold_rsb(r); - ri->rsb = r; - ri->bucket = bucket; - spin_unlock_bh(&ls->ls_rsbtbl_lock); - *pos = n; - return ri; - } - spin_unlock_bh(&ls->ls_rsbtbl_lock); - } + spin_lock_bh(&ls->ls_rsbtbl_lock); + return seq_list_start(list, *pos); } static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) { struct dlm_ls *ls = seq->private; - struct rsbtbl_iter *ri = iter_ptr; - struct rb_root *tree; - struct rb_node *next; - struct dlm_rsb *r, *rp; - loff_t n = *pos; - unsigned bucket; - int toss = (seq->op == &format4_seq_ops); - - bucket = n >> 32; - - /* - * move to the next rsb in the same bucket - */ - - spin_lock_bh(&ls->ls_rsbtbl_lock); - rp = ri->rsb; - next = rb_next(&rp->res_hashnode); - - if (next) { - r = rb_entry(next, struct dlm_rsb, res_hashnode); - dlm_hold_rsb(r); - ri->rsb = r; - spin_unlock_bh(&ls->ls_rsbtbl_lock); - dlm_put_rsb(rp); - ++*pos; - return ri; - } - spin_unlock_bh(&ls->ls_rsbtbl_lock); - dlm_put_rsb(rp); - - /* - * move to the first rsb in the next non-empty bucket - */ + struct list_head *list; - /* zero the entry */ - n &= ~((1LL << 32) - 1); - - while (1) { - bucket++; - n += 1LL << 32; + if (seq->op == &format4_seq_ops) + list = &ls->ls_toss; + else + list = &ls->ls_keep; - if (bucket >= ls->ls_rsbtbl_size) { - kfree(ri); - ++*pos; - return NULL; - } - tree = &ls->ls_rsbtbl[bucket].r; - - spin_lock_bh(&ls->ls_rsbtbl_lock); - if (!RB_EMPTY_ROOT(tree)) { - next = rb_first(tree); - r = rb_entry(next, struct dlm_rsb, res_hashnode); - if (toss) { - if (!rsb_flag(r, RSB_TOSS)) - continue; - } else { - if (rsb_flag(r, RSB_TOSS)) - continue; - } - dlm_hold_rsb(r); - ri->rsb = r; - ri->bucket = bucket; - spin_unlock_bh(&ls->ls_rsbtbl_lock); - *pos = n; - return ri; - } - spin_unlock_bh(&ls->ls_rsbtbl_lock); - } + return seq_list_next(iter_ptr, list, pos); } static void table_seq_stop(struct seq_file *seq, void *iter_ptr) { - struct rsbtbl_iter *ri = iter_ptr; + struct dlm_ls *ls = seq->private; - if (ri) { - dlm_put_rsb(ri->rsb); - kfree(ri); - } + spin_unlock_bh(&ls->ls_rsbtbl_lock); } static const struct seq_operations format1_seq_ops = { diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index af88fc2f978c..6d06840029c3 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -339,6 +339,7 @@ struct dlm_rsb { struct list_head res_convertqueue; struct list_head res_waitqueue; + struct list_head res_rsbs_list; struct list_head res_root_list; /* used for recovery */ struct list_head res_masters_list; /* used for recovery */ struct list_head res_recover_list; /* used for recovery */ @@ -595,6 +596,9 @@ struct dlm_ls { spinlock_t ls_rsbtbl_lock; uint32_t ls_rsbtbl_size; + struct list_head ls_toss; + struct list_head ls_keep; + spinlock_t ls_waiters_lock; struct list_head ls_waiters; /* lkbs needing a reply */ diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 08ec1a04476a..a70b8edb5d3f 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -668,6 +668,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, r->res_first_lkid = 0; } + list_move(&r->res_rsbs_list, &ls->ls_keep); rsb_clear_flag(r, RSB_TOSS); goto out_unlock; @@ -730,6 +731,8 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, out_add: error = rsb_insert(r, &ls->ls_rsbtbl[b].r); + if (!error) + list_add(&r->res_rsbs_list, &ls->ls_keep); out_unlock: spin_unlock_bh(&ls->ls_rsbtbl_lock); out: @@ -801,6 +804,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, r->res_nodeid = 0; } + list_move(&r->res_rsbs_list, &ls->ls_keep); rsb_clear_flag(r, RSB_TOSS); goto out_unlock; @@ -826,6 +830,8 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, kref_init(&r->res_ref); error = rsb_insert(r, &ls->ls_rsbtbl[b].r); + if (!error) + list_add(&r->res_rsbs_list, &ls->ls_keep); out_unlock: spin_unlock_bh(&ls->ls_rsbtbl_lock); out: @@ -1110,6 +1116,8 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, goto retry; } + list_add(&r->res_rsbs_list, &ls->ls_toss); + if (result) *result = DLM_LU_ADD; *r_nodeid = from_nodeid; @@ -1120,20 +1128,12 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) { - struct rb_node *n; struct dlm_rsb *r; - int i; spin_lock_bh(&ls->ls_rsbtbl_lock); - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - for (n = rb_first(&ls->ls_rsbtbl[i].r); n; n = rb_next(n)) { - r = rb_entry(n, struct dlm_rsb, res_hashnode); - if (rsb_flag(r, RSB_TOSS)) - continue; - - if (r->res_hash == hash) - dlm_dump_rsb(r); - } + list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { + if (r->res_hash == hash) + dlm_dump_rsb(r); } spin_unlock_bh(&ls->ls_rsbtbl_lock); } @@ -1166,6 +1166,7 @@ static void toss_rsb(struct kref *kref) kref_init(&r->res_ref); WARN_ON(rsb_flag(r, RSB_TOSS)); rsb_set_flag(r, RSB_TOSS); + list_move(&r->res_rsbs_list, &ls->ls_toss); r->res_toss_time = jiffies; set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[r->res_bucket].flags); if (r->res_lvbptr) { @@ -1672,6 +1673,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) continue; } + list_del(&r->res_rsbs_list); rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].r); dlm_free_rsb(r); } @@ -1740,6 +1742,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) continue; } + list_del(&r->res_rsbs_list); rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].r); send_remove(r); spin_unlock_bh(&ls->ls_rsbtbl_lock); @@ -4243,6 +4246,7 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) } if (kref_put(&r->res_ref, kill_rsb)) { + list_del(&r->res_rsbs_list); rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].r); spin_unlock_bh(&ls->ls_rsbtbl_lock); dlm_free_rsb(r); @@ -5313,17 +5317,12 @@ void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list) lkb_count, nodes_count); } -static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) +static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls) { - struct rb_node *n; struct dlm_rsb *r; spin_lock_bh(&ls->ls_rsbtbl_lock); - for (n = rb_first(&ls->ls_rsbtbl[bucket].r); n; n = rb_next(n)) { - r = rb_entry(n, struct dlm_rsb, res_hashnode); - if (rsb_flag(r, RSB_TOSS)) - continue; - + list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { if (!rsb_flag(r, RSB_RECOVER_GRANT)) continue; if (!is_master(r)) { @@ -5358,19 +5357,15 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) void dlm_recover_grant(struct dlm_ls *ls) { struct dlm_rsb *r; - int bucket = 0; unsigned int count = 0; unsigned int rsb_count = 0; unsigned int lkb_count = 0; while (1) { - r = find_grant_rsb(ls, bucket); - if (!r) { - if (bucket == ls->ls_rsbtbl_size - 1) - break; - bucket++; - continue; - } + r = find_grant_rsb(ls); + if (!r) + break; + rsb_count++; count = 0; lock_rsb(r); diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index b5184ad550fa..2b5771a7bf31 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -495,6 +495,8 @@ static int new_lockspace(const char *name, const char *cluster, */ ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL)); + INIT_LIST_HEAD(&ls->ls_toss); + INIT_LIST_HEAD(&ls->ls_keep); spin_lock_init(&ls->ls_rsbtbl_lock); size = READ_ONCE(dlm_config.ci_rsbtbl_size); ls->ls_rsbtbl_size = size; @@ -838,6 +840,7 @@ static int release_lockspace(struct dlm_ls *ls, int force) for (i = 0; i < ls->ls_rsbtbl_size; i++) { while ((n = rb_first(&ls->ls_rsbtbl[i].r))) { rsb = rb_entry(n, struct dlm_rsb, res_hashnode); + list_del(&rsb->res_rsbs_list); rb_erase(n, &ls->ls_rsbtbl[i].r); dlm_free_rsb(rsb); } diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index e53d88e4ec93..512c1ae81a96 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -881,23 +881,15 @@ void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list) void dlm_clear_toss(struct dlm_ls *ls) { - struct rb_node *n, *next; - struct dlm_rsb *r; + struct dlm_rsb *r, *safe; unsigned int count = 0; - int i; - - spin_lock(&ls->ls_rsbtbl_lock); - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - for (n = rb_first(&ls->ls_rsbtbl[i].r); n; n = next) { - next = rb_next(n); - r = rb_entry(n, struct dlm_rsb, res_hashnode); - if (!rsb_flag(r, RSB_TOSS)) - continue; - - rb_erase(n, &ls->ls_rsbtbl[i].r); - dlm_free_rsb(r); - count++; - } + + spin_lock_bh(&ls->ls_rsbtbl_lock); + list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) { + list_del(&r->res_rsbs_list); + rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].r); + dlm_free_rsb(r); + count++; } spin_unlock_bh(&ls->ls_rsbtbl_lock); diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index ad696528ebe7..5e8e10030b74 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -22,9 +22,8 @@ static int dlm_create_masters_list(struct dlm_ls *ls) { - struct rb_node *n; struct dlm_rsb *r; - int i, error = 0; + int error = 0; write_lock_bh(&ls->ls_masters_lock); if (!list_empty(&ls->ls_masters_list)) { @@ -34,15 +33,12 @@ static int dlm_create_masters_list(struct dlm_ls *ls) } spin_lock_bh(&ls->ls_rsbtbl_lock); - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - for (n = rb_first(&ls->ls_rsbtbl[i].r); n; n = rb_next(n)) { - r = rb_entry(n, struct dlm_rsb, res_hashnode); - if (rsb_flag(r, RSB_TOSS) || r->res_nodeid) - continue; - - list_add(&r->res_masters_list, &ls->ls_masters_list); - dlm_hold_rsb(r); - } + list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { + if (r->res_nodeid) + continue; + + list_add(&r->res_masters_list, &ls->ls_masters_list); + dlm_hold_rsb(r); } spin_unlock_bh(&ls->ls_rsbtbl_lock); out: @@ -64,21 +60,15 @@ static void dlm_release_masters_list(struct dlm_ls *ls) static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) { - struct rb_node *n; struct dlm_rsb *r; - int i; spin_lock_bh(&ls->ls_rsbtbl_lock); - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - for (n = rb_first(&ls->ls_rsbtbl[i].r); n; n = rb_next(n)) { - r = rb_entry(n, struct dlm_rsb, res_hashnode); - if (WARN_ON_ONCE(rsb_flag(r, RSB_TOSS))) - continue; - - list_add(&r->res_root_list, root_list); - dlm_hold_rsb(r); - } + list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { + list_add(&r->res_root_list, root_list); + dlm_hold_rsb(r); } + + WARN_ON_ONCE(!list_empty(&ls->ls_toss)); spin_unlock_bh(&ls->ls_rsbtbl_lock); } -- cgit v1.2.3 From 6c648035cbe75d78836f6d7d2fdd9d996048a66b Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 15 Apr 2024 14:39:39 -0400 Subject: dlm: switch to use rhashtable for rsbs Replace our own hash table with the more advanced rhashtable for keeping rsb structs. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/config.c | 8 +++ fs/dlm/config.h | 2 + fs/dlm/dir.c | 6 +- fs/dlm/dlm_internal.h | 18 ++---- fs/dlm/lock.c | 172 ++++++++++++++++---------------------------------- fs/dlm/lock.h | 2 +- fs/dlm/lockspace.c | 35 ++++------ fs/dlm/recover.c | 3 +- 8 files changed, 86 insertions(+), 160 deletions(-) (limited to 'fs') diff --git a/fs/dlm/config.c b/fs/dlm/config.c index e55e0a2cd2e8..517fa975dc5a 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c @@ -63,6 +63,14 @@ static void release_node(struct config_item *); static struct configfs_attribute *comm_attrs[]; static struct configfs_attribute *node_attrs[]; +const struct rhashtable_params dlm_rhash_rsb_params = { + .nelem_hint = 3, /* start small */ + .key_len = DLM_RESNAME_MAXLEN, + .key_offset = offsetof(struct dlm_rsb, res_name), + .head_offset = offsetof(struct dlm_rsb, res_node), + .automatic_shrinking = true, +}; + struct dlm_cluster { struct config_group group; unsigned int cl_tcp_port; diff --git a/fs/dlm/config.h b/fs/dlm/config.h index 4c91fcca0fd4..ed237d910208 100644 --- a/fs/dlm/config.h +++ b/fs/dlm/config.h @@ -21,6 +21,8 @@ struct dlm_config_node { uint32_t comm_seq; }; +extern const struct rhashtable_params dlm_rhash_rsb_params; + #define DLM_MAX_ADDR_COUNT 3 #define DLM_PROTO_TCP 0 diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index f8039f3ee2d1..9687f908476b 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -198,14 +198,10 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name, int len) { struct dlm_rsb *r; - uint32_t hash, bucket; int rv; - hash = jhash(name, len, 0); - bucket = hash & (ls->ls_rsbtbl_size - 1); - spin_lock_bh(&ls->ls_rsbtbl_lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].r, name, len, &r); + rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); spin_unlock_bh(&ls->ls_rsbtbl_lock); if (!rv) return r; diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 6d06840029c3..cf43b97cf3e5 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -99,15 +100,6 @@ do { \ } \ } - -#define DLM_RTF_SHRINK_BIT 0 - -struct dlm_rsbtable { - struct rb_root r; - unsigned long flags; -}; - - /* * Lockspace member (per node in a ls) */ @@ -327,13 +319,12 @@ struct dlm_rsb { int res_id; /* for ls_recover_idr */ uint32_t res_lvbseq; uint32_t res_hash; - uint32_t res_bucket; /* rsbtbl */ unsigned long res_toss_time; uint32_t res_first_lkid; struct list_head res_lookup; /* lkbs waiting on first */ union { struct list_head res_hashchain; - struct rb_node res_hashnode; /* rsbtbl */ + struct rhash_head res_node; /* rsbtbl */ }; struct list_head res_grantqueue; struct list_head res_convertqueue; @@ -592,9 +583,10 @@ struct dlm_ls { struct idr ls_lkbidr; spinlock_t ls_lkbidr_spin; - struct dlm_rsbtable *ls_rsbtbl; + struct rhashtable ls_rsbtbl; +#define DLM_RTF_SHRINK_BIT 0 + unsigned long ls_rsbtbl_flags; spinlock_t ls_rsbtbl_lock; - uint32_t ls_rsbtbl_size; struct list_head ls_toss; struct list_head ls_keep; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index a70b8edb5d3f..defb90b56b72 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -436,8 +436,6 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); list_del(&r->res_hashchain); - /* Convert the empty list_head to a NULL rb_node for tree usage: */ - memset(&r->res_hashnode, 0, sizeof(struct rb_node)); ls->ls_new_rsb_count--; spin_unlock_bh(&ls->ls_new_rsb_spin); @@ -458,67 +456,31 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, return 0; } -static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen) +int dlm_search_rsb_tree(struct rhashtable *rhash, const void *name, int len, + struct dlm_rsb **r_ret) { - char maxname[DLM_RESNAME_MAXLEN]; + char key[DLM_RESNAME_MAXLEN] = {}; - memset(maxname, 0, DLM_RESNAME_MAXLEN); - memcpy(maxname, name, nlen); - return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN); -} + memcpy(key, name, len); + *r_ret = rhashtable_lookup_fast(rhash, &key, dlm_rhash_rsb_params); + if (*r_ret) + return 0; -int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len, - struct dlm_rsb **r_ret) -{ - struct rb_node *node = tree->rb_node; - struct dlm_rsb *r; - int rc; - - while (node) { - r = rb_entry(node, struct dlm_rsb, res_hashnode); - rc = rsb_cmp(r, name, len); - if (rc < 0) - node = node->rb_left; - else if (rc > 0) - node = node->rb_right; - else - goto found; - } - *r_ret = NULL; return -EBADR; - - found: - *r_ret = r; - return 0; } -static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree) +static int rsb_insert(struct dlm_rsb *rsb, struct rhashtable *rhash) { - struct rb_node **newn = &tree->rb_node; - struct rb_node *parent = NULL; - int rc; - - while (*newn) { - struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb, - res_hashnode); + int rv; - parent = *newn; - rc = rsb_cmp(cur, rsb->res_name, rsb->res_length); - if (rc < 0) - newn = &parent->rb_left; - else if (rc > 0) - newn = &parent->rb_right; - else { - log_print("rsb_insert match"); - dlm_dump_rsb(rsb); - dlm_dump_rsb(cur); - return -EEXIST; - } + rv = rhashtable_insert_fast(rhash, &rsb->res_node, + dlm_rhash_rsb_params); + if (rv == -EEXIST) { + log_print("%s match", __func__); + dlm_dump_rsb(rsb); } - rb_link_node(&rsb->res_hashnode, parent, newn); - rb_insert_color(&rsb->res_hashnode, tree); - return 0; + return rv; } /* @@ -566,8 +528,7 @@ static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree) */ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, - uint32_t hash, uint32_t b, - int dir_nodeid, int from_nodeid, + uint32_t hash, int dir_nodeid, int from_nodeid, unsigned int flags, struct dlm_rsb **r_ret) { struct dlm_rsb *r = NULL; @@ -616,7 +577,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, spin_lock_bh(&ls->ls_rsbtbl_lock); - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (error) goto do_new; @@ -690,7 +651,6 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, goto out_unlock; r->res_hash = hash; - r->res_bucket = b; r->res_dir_nodeid = dir_nodeid; kref_init(&r->res_ref); @@ -730,7 +690,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, } out_add: - error = rsb_insert(r, &ls->ls_rsbtbl[b].r); + error = rsb_insert(r, &ls->ls_rsbtbl); if (!error) list_add(&r->res_rsbs_list, &ls->ls_keep); out_unlock: @@ -745,8 +705,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, dlm_recover_masters). */ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, - uint32_t hash, uint32_t b, - int dir_nodeid, int from_nodeid, + uint32_t hash, int dir_nodeid, int from_nodeid, unsigned int flags, struct dlm_rsb **r_ret) { struct dlm_rsb *r = NULL; @@ -761,7 +720,7 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, spin_lock_bh(&ls->ls_rsbtbl_lock); - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (error) goto do_new; @@ -823,13 +782,12 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, goto out_unlock; r->res_hash = hash; - r->res_bucket = b; r->res_dir_nodeid = dir_nodeid; r->res_master_nodeid = dir_nodeid; r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid; kref_init(&r->res_ref); - error = rsb_insert(r, &ls->ls_rsbtbl[b].r); + error = rsb_insert(r, &ls->ls_rsbtbl); if (!error) list_add(&r->res_rsbs_list, &ls->ls_keep); out_unlock: @@ -843,23 +801,21 @@ static int find_rsb(struct dlm_ls *ls, const void *name, int len, int from_nodeid, unsigned int flags, struct dlm_rsb **r_ret) { - uint32_t hash, b; int dir_nodeid; + uint32_t hash; if (len > DLM_RESNAME_MAXLEN) return -EINVAL; hash = jhash(name, len, 0); - b = hash & (ls->ls_rsbtbl_size - 1); - dir_nodeid = dlm_hash2nodeid(ls, hash); if (dlm_no_directory(ls)) - return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid, + return find_rsb_nodir(ls, name, len, hash, dir_nodeid, from_nodeid, flags, r_ret); else - return find_rsb_dir(ls, name, len, hash, b, dir_nodeid, - from_nodeid, flags, r_ret); + return find_rsb_dir(ls, name, len, hash, dir_nodeid, + from_nodeid, flags, r_ret); } /* we have received a request and found that res_master_nodeid != our_nodeid, @@ -1020,7 +976,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, int len, unsigned int flags, int *r_nodeid, int *result) { struct dlm_rsb *r = NULL; - uint32_t hash, b; + uint32_t hash; int our_nodeid = dlm_our_nodeid(); int dir_nodeid, error; @@ -1034,8 +990,6 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, } hash = jhash(name, len, 0); - b = hash & (ls->ls_rsbtbl_size - 1); - dir_nodeid = dlm_hash2nodeid(ls, hash); if (dir_nodeid != our_nodeid) { log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d", @@ -1051,7 +1005,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, return error; spin_lock_bh(&ls->ls_rsbtbl_lock); - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (!error) { if (rsb_flag(r, RSB_TOSS)) goto do_toss; @@ -1100,7 +1054,6 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, goto out_unlock; r->res_hash = hash; - r->res_bucket = b; r->res_dir_nodeid = our_nodeid; r->res_master_nodeid = from_nodeid; r->res_nodeid = from_nodeid; @@ -1108,7 +1061,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, r->res_toss_time = jiffies; rsb_set_flag(r, RSB_TOSS); - error = rsb_insert(r, &ls->ls_rsbtbl[b].r); + error = rsb_insert(r, &ls->ls_rsbtbl); if (error) { /* should never happen */ dlm_free_rsb(r); @@ -1141,14 +1094,10 @@ static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) { struct dlm_rsb *r = NULL; - uint32_t hash, b; int error; - hash = jhash(name, len, 0); - b = hash & (ls->ls_rsbtbl_size - 1); - spin_lock_bh(&ls->ls_rsbtbl_lock); - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (!error) goto out; @@ -1168,7 +1117,7 @@ static void toss_rsb(struct kref *kref) rsb_set_flag(r, RSB_TOSS); list_move(&r->res_rsbs_list, &ls->ls_toss); r->res_toss_time = jiffies; - set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[r->res_bucket].flags); + set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags); if (r->res_lvbptr) { dlm_free_lvb(r->res_lvbptr); r->res_lvbptr = NULL; @@ -1607,10 +1556,9 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, return error; } -static void shrink_bucket(struct dlm_ls *ls, int b) +static void shrink_bucket(struct dlm_ls *ls) { - struct rb_node *n, *next; - struct dlm_rsb *r; + struct dlm_rsb *r, *safe; char *name; int our_nodeid = dlm_our_nodeid(); int remote_count = 0; @@ -1621,17 +1569,12 @@ static void shrink_bucket(struct dlm_ls *ls, int b) spin_lock_bh(&ls->ls_rsbtbl_lock); - if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags)) { + if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags)) { spin_unlock_bh(&ls->ls_rsbtbl_lock); return; } - for (n = rb_first(&ls->ls_rsbtbl[b].r); n; n = next) { - next = rb_next(n); - r = rb_entry(n, struct dlm_rsb, res_hashnode); - if (!rsb_flag(r, RSB_TOSS)) - continue; - + list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) { /* If we're the directory record for this rsb, and we're not the master of it, then we need to wait for the master node to send us a dir remove for @@ -1674,14 +1617,15 @@ static void shrink_bucket(struct dlm_ls *ls, int b) } list_del(&r->res_rsbs_list); - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].r); + rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, + dlm_rhash_rsb_params); dlm_free_rsb(r); } if (need_shrink) - set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); + set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags); else - clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); + clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags); spin_unlock_bh(&ls->ls_rsbtbl_lock); /* @@ -1698,7 +1642,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) len = ls->ls_remove_lens[i]; spin_lock_bh(&ls->ls_rsbtbl_lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); + rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (rv) { spin_unlock_bh(&ls->ls_rsbtbl_lock); log_error(ls, "remove_name not found %s", name); @@ -1743,7 +1687,8 @@ static void shrink_bucket(struct dlm_ls *ls, int b) } list_del(&r->res_rsbs_list); - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].r); + rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, + dlm_rhash_rsb_params); send_remove(r); spin_unlock_bh(&ls->ls_rsbtbl_lock); @@ -1753,14 +1698,7 @@ static void shrink_bucket(struct dlm_ls *ls, int b) void dlm_scan_rsbs(struct dlm_ls *ls) { - int i; - - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - shrink_bucket(ls, i); - if (dlm_locking_stopped(ls)) - break; - cond_resched(); - } + shrink_bucket(ls); } /* lkb is master or local copy */ @@ -4174,7 +4112,6 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) { char name[DLM_RESNAME_MAXLEN+1]; struct dlm_rsb *r; - uint32_t hash, b; int rv, len, dir_nodeid, from_nodeid; from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); @@ -4194,24 +4131,22 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) return; } - /* Look for name on rsbtbl.toss, if it's there, kill it. - If it's on rsbtbl.keep, it's being used, and we should ignore this - message. This is an expected race between the dir node sending a - request to the master node at the same time as the master node sends - a remove to the dir node. The resolution to that race is for the - dir node to ignore the remove message, and the master node to - recreate the master rsb when it gets a request from the dir node for - an rsb it doesn't have. */ + /* Look for name in rsb toss state, if it's there, kill it. + * If it's in non toss state, it's being used, and we should ignore this + * message. This is an expected race between the dir node sending a + * request to the master node at the same time as the master node sends + * a remove to the dir node. The resolution to that race is for the + * dir node to ignore the remove message, and the master node to + * recreate the master rsb when it gets a request from the dir node for + * an rsb it doesn't have. + */ memset(name, 0, sizeof(name)); memcpy(name, ms->m_extra, len); - hash = jhash(name, len, 0); - b = hash & (ls->ls_rsbtbl_size - 1); - spin_lock_bh(&ls->ls_rsbtbl_lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].r, name, len, &r); + rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (rv) { /* should not happen */ log_error(ls, "%s from %d not found %s", __func__, @@ -4247,7 +4182,8 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) if (kref_put(&r->res_ref, kill_rsb)) { list_del(&r->res_rsbs_list); - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].r); + rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, + dlm_rhash_rsb_params); spin_unlock_bh(&ls->ls_rsbtbl_lock); dlm_free_rsb(r); } else { diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index 45a74869810a..33616d4b0cdb 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h @@ -29,7 +29,7 @@ void dlm_unlock_recovery(struct dlm_ls *ls); int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, int len, unsigned int flags, int *r_nodeid, int *result); -int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len, +int dlm_search_rsb_tree(struct rhashtable *rhash, const void *name, int len, struct dlm_rsb **r_ret); void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list); diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 2b5771a7bf31..890e1a4cf787 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -410,9 +410,9 @@ static int new_lockspace(const char *name, const char *cluster, int *ops_result, dlm_lockspace_t **lockspace) { struct dlm_ls *ls; - int i, size, error; int do_unreg = 0; int namelen = strlen(name); + int i, error; if (namelen > DLM_LOCKSPACE_LEN || namelen == 0) return -EINVAL; @@ -498,15 +498,10 @@ static int new_lockspace(const char *name, const char *cluster, INIT_LIST_HEAD(&ls->ls_toss); INIT_LIST_HEAD(&ls->ls_keep); spin_lock_init(&ls->ls_rsbtbl_lock); - size = READ_ONCE(dlm_config.ci_rsbtbl_size); - ls->ls_rsbtbl_size = size; - ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable))); - if (!ls->ls_rsbtbl) + error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params); + if (error) goto out_lsfree; - for (i = 0; i < size; i++) { - ls->ls_rsbtbl[i].r.rb_node = NULL; - } for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) { ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1, @@ -669,7 +664,7 @@ static int new_lockspace(const char *name, const char *cluster, out_rsbtbl: for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) kfree(ls->ls_remove_names[i]); - vfree(ls->ls_rsbtbl); + rhashtable_destroy(&ls->ls_rsbtbl); out_lsfree: if (do_unreg) kobject_put(&ls->ls_kobj); @@ -772,10 +767,16 @@ static int lockspace_busy(struct dlm_ls *ls, int force) return rv; } +static void rhash_free_rsb(void *ptr, void *arg) +{ + struct dlm_rsb *rsb = ptr; + + dlm_free_rsb(rsb); +} + static int release_lockspace(struct dlm_ls *ls, int force) { struct dlm_rsb *rsb; - struct rb_node *n; int i, busy, rv; busy = lockspace_busy(ls, force); @@ -834,19 +835,9 @@ static int release_lockspace(struct dlm_ls *ls, int force) idr_destroy(&ls->ls_lkbidr); /* - * Free all rsb's on rsbtbl[] lists + * Free all rsb's on rsbtbl */ - - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - while ((n = rb_first(&ls->ls_rsbtbl[i].r))) { - rsb = rb_entry(n, struct dlm_rsb, res_hashnode); - list_del(&rsb->res_rsbs_list); - rb_erase(n, &ls->ls_rsbtbl[i].r); - dlm_free_rsb(rsb); - } - } - - vfree(ls->ls_rsbtbl); + rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL); for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) kfree(ls->ls_remove_names[i]); diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 512c1ae81a96..c21ef115123b 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -887,7 +887,8 @@ void dlm_clear_toss(struct dlm_ls *ls) spin_lock_bh(&ls->ls_rsbtbl_lock); list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) { list_del(&r->res_rsbs_list); - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].r); + rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, + dlm_rhash_rsb_params); dlm_free_rsb(r); count++; } -- cgit v1.2.3 From 6644925abf056030cd9efc73fc05ea5a5df4f59f Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 15 Apr 2024 14:39:40 -0400 Subject: dlm: do not use ref counts for rsb in the toss state In the past we had problems when an rsb had a reference counter greater than one while in the toss state. An rsb in the toss state is not actively used for locking, and should not have any other references apart from the single ref keeping it on the rsb hash. Shift to freeing rsb's directly rather than using kref_put to free them, since the ref counting is not meant to be used in this state. Add warnings if ref counting is seen while an rsb is in the toss state. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/lock.c | 61 ++++++++++++++++++++++++++++---------------------------- fs/dlm/lock.h | 1 + fs/dlm/recover.c | 2 +- 3 files changed, 32 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index defb90b56b72..fee1a4164fc1 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -325,6 +325,8 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) static inline void hold_rsb(struct dlm_rsb *r) { + /* rsbs in toss state never get referenced */ + WARN_ON(rsb_flag(r, RSB_TOSS)); kref_get(&r->res_ref); } @@ -631,6 +633,11 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, list_move(&r->res_rsbs_list, &ls->ls_keep); rsb_clear_flag(r, RSB_TOSS); + /* rsb got out of toss state, it becomes alive again + * and we reinit the reference counter that is only + * valid for keep state rsbs + */ + kref_init(&r->res_ref); goto out_unlock; @@ -765,6 +772,11 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, list_move(&r->res_rsbs_list, &ls->ls_keep); rsb_clear_flag(r, RSB_TOSS); + /* rsb got out of toss state, it becomes alive again + * and we reinit the reference counter that is only + * valid for keep state rsbs + */ + kref_init(&r->res_ref); goto out_unlock; @@ -1112,8 +1124,6 @@ static void toss_rsb(struct kref *kref) struct dlm_ls *ls = r->res_ls; DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r);); - kref_init(&r->res_ref); - WARN_ON(rsb_flag(r, RSB_TOSS)); rsb_set_flag(r, RSB_TOSS); list_move(&r->res_rsbs_list, &ls->ls_toss); r->res_toss_time = jiffies; @@ -1129,16 +1139,20 @@ static void toss_rsb(struct kref *kref) static void unhold_rsb(struct dlm_rsb *r) { int rv; + + /* rsbs in toss state never get referenced */ + WARN_ON(rsb_flag(r, RSB_TOSS)); rv = kref_put(&r->res_ref, toss_rsb); DLM_ASSERT(!rv, dlm_dump_rsb(r);); } -static void kill_rsb(struct kref *kref) +void free_toss_rsb(struct dlm_rsb *r) { - struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref); + WARN_ON_ONCE(!rsb_flag(r, RSB_TOSS)); - /* All work is done after the return from kref_put() so we - can release the write_lock before the remove and free. */ + /* check if all work is done after the rsb is on toss list + * and it can be freed. + */ DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r);); @@ -1147,6 +1161,8 @@ static void kill_rsb(struct kref *kref) DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_masters_list), dlm_dump_rsb(r);); + + dlm_free_rsb(r); } /* Attaching/detaching lkb's from rsb's is for rsb reference counting. @@ -1611,15 +1627,10 @@ static void shrink_bucket(struct dlm_ls *ls) continue; } - if (!kref_put(&r->res_ref, kill_rsb)) { - log_error(ls, "tossed rsb in use %s", r->res_name); - continue; - } - list_del(&r->res_rsbs_list); rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, dlm_rhash_rsb_params); - dlm_free_rsb(r); + free_toss_rsb(r); } if (need_shrink) @@ -1680,19 +1691,13 @@ static void shrink_bucket(struct dlm_ls *ls) continue; } - if (!kref_put(&r->res_ref, kill_rsb)) { - spin_unlock_bh(&ls->ls_rsbtbl_lock); - log_error(ls, "remove_name in use %s", name); - continue; - } - list_del(&r->res_rsbs_list); rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, dlm_rhash_rsb_params); send_remove(r); spin_unlock_bh(&ls->ls_rsbtbl_lock); - dlm_free_rsb(r); + free_toss_rsb(r); } } @@ -4180,18 +4185,12 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) return; } - if (kref_put(&r->res_ref, kill_rsb)) { - list_del(&r->res_rsbs_list); - rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, - dlm_rhash_rsb_params); - spin_unlock_bh(&ls->ls_rsbtbl_lock); - dlm_free_rsb(r); - } else { - log_error(ls, "receive_remove from %d rsb ref error", - from_nodeid); - dlm_print_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl_lock); - } + list_del(&r->res_rsbs_list); + rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, + dlm_rhash_rsb_params); + spin_unlock_bh(&ls->ls_rsbtbl_lock); + + free_toss_rsb(r); } static void receive_purge(struct dlm_ls *ls, const struct dlm_message *ms) diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index 33616d4b0cdb..b56a34802762 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h @@ -18,6 +18,7 @@ void dlm_receive_message_saved(struct dlm_ls *ls, const struct dlm_message *ms, uint32_t saved_seq); void dlm_receive_buffer(const union dlm_packet *p, int nodeid); int dlm_modes_compat(int mode1, int mode2); +void free_toss_rsb(struct dlm_rsb *r); void dlm_put_rsb(struct dlm_rsb *r); void dlm_hold_rsb(struct dlm_rsb *r); int dlm_put_lkb(struct dlm_lkb *lkb); diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index c21ef115123b..d43189532b14 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -889,7 +889,7 @@ void dlm_clear_toss(struct dlm_ls *ls) list_del(&r->res_rsbs_list); rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, dlm_rhash_rsb_params); - dlm_free_rsb(r); + free_toss_rsb(r); count++; } spin_unlock_bh(&ls->ls_rsbtbl_lock); -- cgit v1.2.3 From b1f2381c1a8d52b973944090ed8b42c750152533 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 15 Apr 2024 14:39:41 -0400 Subject: dlm: drop dlm_scand kthread and use timers Currently the scand kthread acts like a garbage collection for expired rsbs on toss list, to clean them up after a certain timeout. It triggers every couple of seconds and iterates over the toss list while holding ls_rsbtbl_lock for the whole hash bucket iteration. To reduce the amount of time holding ls_rsbtbl_lock, we now handle the disposal of expired rsbs using a per-lockspace timer that expires for the earliest tossed rsb on the lockspace toss queue. This toss queue is ordered according to the rsb res_toss_time with the earliest tossed rsb as the first entry. The toss timer will only trylock() necessary locks, since it is low priority garbage collection, and will rearm the timer if trylock() fails. If the timer function does not find any expired rsb's, it rearms the timer with the next earliest expired rsb. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/dlm_internal.h | 16 ++- fs/dlm/lock.c | 382 +++++++++++++++++++++++++++++++------------------- fs/dlm/lock.h | 2 + fs/dlm/lockspace.c | 105 ++------------ fs/dlm/member.c | 2 + fs/dlm/recover.c | 5 + fs/dlm/recoverd.c | 10 ++ 7 files changed, 283 insertions(+), 239 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index cf43b97cf3e5..98a0ac511bc8 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -334,6 +334,7 @@ struct dlm_rsb { struct list_head res_root_list; /* used for recovery */ struct list_head res_masters_list; /* used for recovery */ struct list_head res_recover_list; /* used for recovery */ + struct list_head res_toss_q_list; int res_recover_locks_count; char *res_lvbptr; @@ -584,13 +585,20 @@ struct dlm_ls { spinlock_t ls_lkbidr_spin; struct rhashtable ls_rsbtbl; -#define DLM_RTF_SHRINK_BIT 0 - unsigned long ls_rsbtbl_flags; spinlock_t ls_rsbtbl_lock; struct list_head ls_toss; struct list_head ls_keep; + struct timer_list ls_timer; + /* this queue is ordered according the + * absolute res_toss_time jiffies time + * to mod_timer() with the first element + * if necessary. + */ + struct list_head ls_toss_q; + spinlock_t ls_toss_q_lock; + spinlock_t ls_waiters_lock; struct list_head ls_waiters; /* lkbs needing a reply */ @@ -601,9 +609,6 @@ struct dlm_ls { int ls_new_rsb_count; struct list_head ls_new_rsb; /* new rsb structs */ - char *ls_remove_names[DLM_REMOVE_NAMES_MAX]; - int ls_remove_lens[DLM_REMOVE_NAMES_MAX]; - struct list_head ls_nodes; /* current nodes in ls */ struct list_head ls_nodes_gone; /* dead node list, recovery */ int ls_num_nodes; /* number of nodes in ls */ @@ -640,7 +645,6 @@ struct dlm_ls { spinlock_t ls_cb_lock; struct list_head ls_cb_delay; /* save for queue_work later */ - struct timer_list ls_timer; struct task_struct *ls_recoverd_task; struct mutex ls_recoverd_active; spinlock_t ls_recover_lock; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index fee1a4164fc1..7c97181a04fe 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -320,6 +320,11 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) * Basic operations on rsb's and lkb's */ +static inline unsigned long rsb_toss_jiffies(void) +{ + return jiffies + (READ_ONCE(dlm_config.ci_toss_secs) * HZ); +} + /* This is only called to add a reference when the code already holds a valid reference to the rsb, so there's no need for locking. */ @@ -416,6 +421,229 @@ static int pre_rsb_struct(struct dlm_ls *ls) return 0; } +/* connected with timer_delete_sync() in dlm_ls_stop() to stop + * new timers when recovery is triggered and don't run them + * again until a dlm_timer_resume() tries it again. + */ +static void __rsb_mod_timer(struct dlm_ls *ls, unsigned long jiffies) +{ + if (!dlm_locking_stopped(ls)) + mod_timer(&ls->ls_timer, jiffies); +} + +/* This function tries to resume the timer callback if a rsb + * is on the toss list and no timer is pending. It might that + * the first entry is on currently executed as timer callback + * but we don't care if a timer queued up again and does + * nothing. Should be a rare case. + */ +void dlm_timer_resume(struct dlm_ls *ls) +{ + struct dlm_rsb *r; + + spin_lock_bh(&ls->ls_toss_q_lock); + r = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb, + res_toss_q_list); + if (r && !timer_pending(&ls->ls_timer)) + __rsb_mod_timer(ls, r->res_toss_time); + spin_unlock_bh(&ls->ls_toss_q_lock); +} + +/* ls_rsbtbl_lock must be held and being sure the rsb is in toss state */ +static void rsb_delete_toss_timer(struct dlm_ls *ls, struct dlm_rsb *r) +{ + struct dlm_rsb *first; + + spin_lock_bh(&ls->ls_toss_q_lock); + r->res_toss_time = 0; + + /* if the rsb is not queued do nothing */ + if (list_empty(&r->res_toss_q_list)) + goto out; + + /* get the first element before delete */ + first = list_first_entry(&ls->ls_toss_q, struct dlm_rsb, + res_toss_q_list); + list_del_init(&r->res_toss_q_list); + /* check if the first element was the rsb we deleted */ + if (first == r) { + /* try to get the new first element, if the list + * is empty now try to delete the timer, if we are + * too late we don't care. + * + * if the list isn't empty and a new first element got + * in place, set the new timer expire time. + */ + first = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb, + res_toss_q_list); + if (!first) + timer_delete(&ls->ls_timer); + else + __rsb_mod_timer(ls, first->res_toss_time); + } + +out: + spin_unlock_bh(&ls->ls_toss_q_lock); +} + +/* Caller must held ls_rsbtbl_lock and need to be called every time + * when either the rsb enters toss state or the toss state changes + * the dir/master nodeid. + */ +static void rsb_mod_timer(struct dlm_ls *ls, struct dlm_rsb *r) +{ + int our_nodeid = dlm_our_nodeid(); + struct dlm_rsb *first; + + /* If we're the directory record for this rsb, and + * we're not the master of it, then we need to wait + * for the master node to send us a dir remove for + * before removing the dir record. + */ + if (!dlm_no_directory(ls) && + (r->res_master_nodeid != our_nodeid) && + (dlm_dir_nodeid(r) == our_nodeid)) { + rsb_delete_toss_timer(ls, r); + return; + } + + spin_lock_bh(&ls->ls_toss_q_lock); + /* set the new rsb absolute expire time in the rsb */ + r->res_toss_time = rsb_toss_jiffies(); + if (list_empty(&ls->ls_toss_q)) { + /* if the queue is empty add the element and it's + * our new expire time + */ + list_add_tail(&r->res_toss_q_list, &ls->ls_toss_q); + __rsb_mod_timer(ls, r->res_toss_time); + } else { + /* check if the rsb was already queued, if so delete + * it from the toss queue + */ + if (!list_empty(&r->res_toss_q_list)) + list_del(&r->res_toss_q_list); + + /* try to get the maybe new first element and then add + * to this rsb with the oldest expire time to the end + * of the queue. If the list was empty before this + * rsb expire time is our next expiration if it wasn't + * the now new first elemet is our new expiration time + */ + first = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb, + res_toss_q_list); + list_add_tail(&r->res_toss_q_list, &ls->ls_toss_q); + if (!first) + __rsb_mod_timer(ls, r->res_toss_time); + else + __rsb_mod_timer(ls, first->res_toss_time); + } + spin_unlock_bh(&ls->ls_toss_q_lock); +} + +/* if we hit contention we do in 250 ms a retry to trylock. + * if there is any other mod_timer in between we don't care + * about that it expires earlier again this is only for the + * unlikely case nothing happened in this time. + */ +#define DLM_TOSS_TIMER_RETRY (jiffies + msecs_to_jiffies(250)) + +void dlm_rsb_toss_timer(struct timer_list *timer) +{ + struct dlm_ls *ls = from_timer(ls, timer, ls_timer); + int our_nodeid = dlm_our_nodeid(); + struct dlm_rsb *r; + int rv; + + while (1) { + /* interrupting point to leave iteration when + * recovery waits for timer_delete_sync(), recovery + * will take care to delete everything in toss queue. + */ + if (dlm_locking_stopped(ls)) + break; + + rv = spin_trylock(&ls->ls_toss_q_lock); + if (!rv) { + /* rearm again try timer */ + __rsb_mod_timer(ls, DLM_TOSS_TIMER_RETRY); + break; + } + + r = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb, + res_toss_q_list); + if (!r) { + /* nothing to do anymore next rsb queue will + * set next mod_timer() expire. + */ + spin_unlock(&ls->ls_toss_q_lock); + break; + } + + /* test if the first rsb isn't expired yet, if + * so we stop freeing rsb from toss queue as + * the order in queue is ascending to the + * absolute res_toss_time jiffies + */ + if (time_before(jiffies, r->res_toss_time)) { + /* rearm with the next rsb to expire in the future */ + __rsb_mod_timer(ls, r->res_toss_time); + spin_unlock(&ls->ls_toss_q_lock); + break; + } + + /* in find_rsb_dir/nodir there is a reverse order of this + * lock, however this is only a trylock if we hit some + * possible contention we try it again. + * + * This lock synchronized while holding ls_toss_q_lock + * synchronize everything that rsb_delete_toss_timer() + * or rsb_mod_timer() can't run after this timer callback + * deletes the rsb from the ls_toss_q. Whereas the other + * holders have always a priority to run as this is only + * a caching handling and the other holders might to put + * this rsb out of the toss state. + */ + rv = spin_trylock(&ls->ls_rsbtbl_lock); + if (!rv) { + spin_unlock(&ls->ls_toss_q_lock); + /* rearm again try timer */ + __rsb_mod_timer(ls, DLM_TOSS_TIMER_RETRY); + break; + } + + list_del(&r->res_rsbs_list); + rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, + dlm_rhash_rsb_params); + + /* not necessary to held the ls_rsbtbl_lock when + * calling send_remove() + */ + spin_unlock(&ls->ls_rsbtbl_lock); + + /* remove the rsb out of the toss queue its gone + * drom DLM now + */ + list_del_init(&r->res_toss_q_list); + spin_unlock(&ls->ls_toss_q_lock); + + /* no rsb in this state should ever run a timer */ + WARN_ON(!dlm_no_directory(ls) && + (r->res_master_nodeid != our_nodeid) && + (dlm_dir_nodeid(r) == our_nodeid)); + + /* We're the master of this rsb but we're not + * the directory record, so we need to tell the + * dir node to remove the dir record + */ + if (!dlm_no_directory(ls) && + (r->res_master_nodeid == our_nodeid) && + (dlm_dir_nodeid(r) != our_nodeid)) + send_remove(r); + + free_toss_rsb(r); + } +} + /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can unlock any spinlocks, go back and call pre_rsb_struct again. Otherwise, take an rsb off the list and return it. */ @@ -451,6 +679,7 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, INIT_LIST_HEAD(&r->res_convertqueue); INIT_LIST_HEAD(&r->res_waitqueue); INIT_LIST_HEAD(&r->res_root_list); + INIT_LIST_HEAD(&r->res_toss_q_list); INIT_LIST_HEAD(&r->res_recover_list); INIT_LIST_HEAD(&r->res_masters_list); @@ -638,6 +867,9 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, * valid for keep state rsbs */ kref_init(&r->res_ref); + rsb_delete_toss_timer(ls, r); + spin_unlock_bh(&ls->ls_rsbtbl_lock); + goto out_unlock; @@ -777,6 +1009,9 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, * valid for keep state rsbs */ kref_init(&r->res_ref); + rsb_delete_toss_timer(ls, r); + spin_unlock_bh(&ls->ls_rsbtbl_lock); + goto out_unlock; @@ -1050,7 +1285,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags, r_nodeid, result); - r->res_toss_time = jiffies; + rsb_mod_timer(ls, r); /* the rsb was inactive (on toss list) */ spin_unlock_bh(&ls->ls_rsbtbl_lock); @@ -1070,7 +1305,6 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, r->res_master_nodeid = from_nodeid; r->res_nodeid = from_nodeid; kref_init(&r->res_ref); - r->res_toss_time = jiffies; rsb_set_flag(r, RSB_TOSS); error = rsb_insert(r, &ls->ls_rsbtbl); @@ -1082,6 +1316,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, } list_add(&r->res_rsbs_list, &ls->ls_toss); + rsb_mod_timer(ls, r); if (result) *result = DLM_LU_ADD; @@ -1126,8 +1361,8 @@ static void toss_rsb(struct kref *kref) DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r);); rsb_set_flag(r, RSB_TOSS); list_move(&r->res_rsbs_list, &ls->ls_toss); - r->res_toss_time = jiffies; - set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags); + rsb_mod_timer(ls, r); + if (r->res_lvbptr) { dlm_free_lvb(r->res_lvbptr); r->res_lvbptr = NULL; @@ -1150,15 +1385,12 @@ void free_toss_rsb(struct dlm_rsb *r) { WARN_ON_ONCE(!rsb_flag(r, RSB_TOSS)); - /* check if all work is done after the rsb is on toss list - * and it can be freed. - */ - DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r);); + DLM_ASSERT(list_empty(&r->res_toss_q_list), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_masters_list), dlm_dump_rsb(r);); @@ -1572,140 +1804,6 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, return error; } -static void shrink_bucket(struct dlm_ls *ls) -{ - struct dlm_rsb *r, *safe; - char *name; - int our_nodeid = dlm_our_nodeid(); - int remote_count = 0; - int need_shrink = 0; - int i, len, rv; - - memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX); - - spin_lock_bh(&ls->ls_rsbtbl_lock); - - if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags)) { - spin_unlock_bh(&ls->ls_rsbtbl_lock); - return; - } - - list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) { - /* If we're the directory record for this rsb, and - we're not the master of it, then we need to wait - for the master node to send us a dir remove for - before removing the dir record. */ - - if (!dlm_no_directory(ls) && - (r->res_master_nodeid != our_nodeid) && - (dlm_dir_nodeid(r) == our_nodeid)) { - continue; - } - - need_shrink = 1; - - if (!time_after_eq(jiffies, r->res_toss_time + - dlm_config.ci_toss_secs * HZ)) { - continue; - } - - if (!dlm_no_directory(ls) && - (r->res_master_nodeid == our_nodeid) && - (dlm_dir_nodeid(r) != our_nodeid)) { - - /* We're the master of this rsb but we're not - the directory record, so we need to tell the - dir node to remove the dir record. */ - - ls->ls_remove_lens[remote_count] = r->res_length; - memcpy(ls->ls_remove_names[remote_count], r->res_name, - DLM_RESNAME_MAXLEN); - remote_count++; - - if (remote_count >= DLM_REMOVE_NAMES_MAX) - break; - continue; - } - - list_del(&r->res_rsbs_list); - rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, - dlm_rhash_rsb_params); - free_toss_rsb(r); - } - - if (need_shrink) - set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags); - else - clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags); - spin_unlock_bh(&ls->ls_rsbtbl_lock); - - /* - * While searching for rsb's to free, we found some that require - * remote removal. We leave them in place and find them again here - * so there is a very small gap between removing them from the toss - * list and sending the removal. Keeping this gap small is - * important to keep us (the master node) from being out of sync - * with the remote dir node for very long. - */ - - for (i = 0; i < remote_count; i++) { - name = ls->ls_remove_names[i]; - len = ls->ls_remove_lens[i]; - - spin_lock_bh(&ls->ls_rsbtbl_lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); - if (rv) { - spin_unlock_bh(&ls->ls_rsbtbl_lock); - log_error(ls, "remove_name not found %s", name); - continue; - } - - if (!rsb_flag(r, RSB_TOSS)) { - spin_unlock_bh(&ls->ls_rsbtbl_lock); - log_debug(ls, "remove_name not toss %s", name); - continue; - } - - if (r->res_master_nodeid != our_nodeid) { - spin_unlock_bh(&ls->ls_rsbtbl_lock); - log_debug(ls, "remove_name master %d dir %d our %d %s", - r->res_master_nodeid, r->res_dir_nodeid, - our_nodeid, name); - continue; - } - - if (r->res_dir_nodeid == our_nodeid) { - /* should never happen */ - spin_unlock_bh(&ls->ls_rsbtbl_lock); - log_error(ls, "remove_name dir %d master %d our %d %s", - r->res_dir_nodeid, r->res_master_nodeid, - our_nodeid, name); - continue; - } - - if (!time_after_eq(jiffies, r->res_toss_time + - dlm_config.ci_toss_secs * HZ)) { - spin_unlock_bh(&ls->ls_rsbtbl_lock); - log_debug(ls, "remove_name toss_time %lu now %lu %s", - r->res_toss_time, jiffies, name); - continue; - } - - list_del(&r->res_rsbs_list); - rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, - dlm_rhash_rsb_params); - send_remove(r); - spin_unlock_bh(&ls->ls_rsbtbl_lock); - - free_toss_rsb(r); - } -} - -void dlm_scan_rsbs(struct dlm_ls *ls) -{ - shrink_bucket(ls); -} - /* lkb is master or local copy */ static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index b56a34802762..8de9dee4c058 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h @@ -11,6 +11,7 @@ #ifndef __LOCK_DOT_H__ #define __LOCK_DOT_H__ +void dlm_rsb_toss_timer(struct timer_list *timer); void dlm_dump_rsb(struct dlm_rsb *r); void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len); void dlm_print_lkb(struct dlm_lkb *lkb); @@ -26,6 +27,7 @@ void dlm_scan_rsbs(struct dlm_ls *ls); int dlm_lock_recovery_try(struct dlm_ls *ls); void dlm_lock_recovery(struct dlm_ls *ls); void dlm_unlock_recovery(struct dlm_ls *ls); +void dlm_timer_resume(struct dlm_ls *ls); int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, int len, unsigned int flags, int *r_nodeid, int *result); diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 890e1a4cf787..931eb3f22ec6 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -29,8 +29,6 @@ static int ls_count; static struct mutex ls_lock; static struct list_head lslist; static spinlock_t lslist_lock; -static struct task_struct * scand_task; - static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len) { @@ -247,64 +245,6 @@ void dlm_lockspace_exit(void) kset_unregister(dlm_kset); } -static struct dlm_ls *find_ls_to_scan(void) -{ - struct dlm_ls *ls; - - spin_lock_bh(&lslist_lock); - list_for_each_entry(ls, &lslist, ls_list) { - if (time_after_eq(jiffies, ls->ls_scan_time + - dlm_config.ci_scan_secs * HZ)) { - atomic_inc(&ls->ls_count); - spin_unlock_bh(&lslist_lock); - return ls; - } - } - spin_unlock_bh(&lslist_lock); - return NULL; -} - -static int dlm_scand(void *data) -{ - struct dlm_ls *ls; - - while (!kthread_should_stop()) { - ls = find_ls_to_scan(); - if (ls) { - if (dlm_lock_recovery_try(ls)) { - ls->ls_scan_time = jiffies; - dlm_scan_rsbs(ls); - dlm_unlock_recovery(ls); - } else { - ls->ls_scan_time += HZ; - } - - dlm_put_lockspace(ls); - continue; - } - schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ); - } - return 0; -} - -static int dlm_scand_start(void) -{ - struct task_struct *p; - int error = 0; - - p = kthread_run(dlm_scand, NULL, "dlm_scand"); - if (IS_ERR(p)) - error = PTR_ERR(p); - else - scand_task = p; - return error; -} - -static void dlm_scand_stop(void) -{ - kthread_stop(scand_task); -} - struct dlm_ls *dlm_find_lockspace_global(uint32_t id) { struct dlm_ls *ls; @@ -385,22 +325,9 @@ static int threads_start(void) /* Thread for sending/receiving messages for all lockspace's */ error = dlm_midcomms_start(); - if (error) { + if (error) log_print("cannot start dlm midcomms %d", error); - goto fail; - } - error = dlm_scand_start(); - if (error) { - log_print("cannot start dlm_scand thread %d", error); - goto midcomms_fail; - } - - return 0; - - midcomms_fail: - dlm_midcomms_stop(); - fail: return error; } @@ -412,7 +339,7 @@ static int new_lockspace(const char *name, const char *cluster, struct dlm_ls *ls; int do_unreg = 0; int namelen = strlen(name); - int i, error; + int error; if (namelen > DLM_LOCKSPACE_LEN || namelen == 0) return -EINVAL; @@ -503,13 +430,6 @@ static int new_lockspace(const char *name, const char *cluster, if (error) goto out_lsfree; - for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) { - ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1, - GFP_KERNEL); - if (!ls->ls_remove_names[i]) - goto out_rsbtbl; - } - idr_init(&ls->ls_lkbidr); spin_lock_init(&ls->ls_lkbidr_spin); @@ -582,6 +502,11 @@ static int new_lockspace(const char *name, const char *cluster, INIT_LIST_HEAD(&ls->ls_dir_dump_list); rwlock_init(&ls->ls_dir_dump_lock); + INIT_LIST_HEAD(&ls->ls_toss_q); + spin_lock_init(&ls->ls_toss_q_lock); + timer_setup(&ls->ls_timer, dlm_rsb_toss_timer, + TIMER_DEFERRABLE); + spin_lock_bh(&lslist_lock); ls->ls_create_count = 1; list_add(&ls->ls_list, &lslist); @@ -661,9 +586,6 @@ static int new_lockspace(const char *name, const char *cluster, kfree(ls->ls_recover_buf); out_lkbidr: idr_destroy(&ls->ls_lkbidr); - out_rsbtbl: - for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) - kfree(ls->ls_remove_names[i]); rhashtable_destroy(&ls->ls_rsbtbl); out_lsfree: if (do_unreg) @@ -696,7 +618,6 @@ static int __dlm_new_lockspace(const char *name, const char *cluster, if (error > 0) error = 0; if (!ls_count) { - dlm_scand_stop(); dlm_midcomms_shutdown(); dlm_midcomms_stop(); } @@ -777,7 +698,7 @@ static void rhash_free_rsb(void *ptr, void *arg) static int release_lockspace(struct dlm_ls *ls, int force) { struct dlm_rsb *rsb; - int i, busy, rv; + int busy, rv; busy = lockspace_busy(ls, force); @@ -812,8 +733,13 @@ static int release_lockspace(struct dlm_ls *ls, int force) dlm_recoverd_stop(ls); + /* clear the LSFL_RUNNING flag to fast up + * time_shutdown_sync(), we don't care anymore + */ + clear_bit(LSFL_RUNNING, &ls->ls_flags); + timer_shutdown_sync(&ls->ls_timer); + if (ls_count == 1) { - dlm_scand_stop(); dlm_clear_members(ls); dlm_midcomms_shutdown(); } @@ -839,9 +765,6 @@ static int release_lockspace(struct dlm_ls *ls, int force) */ rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL); - for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) - kfree(ls->ls_remove_names[i]); - while (!list_empty(&ls->ls_new_rsb)) { rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); diff --git a/fs/dlm/member.c b/fs/dlm/member.c index 6401916a97ef..c46e306f2e5c 100644 --- a/fs/dlm/member.c +++ b/fs/dlm/member.c @@ -641,6 +641,8 @@ int dlm_ls_stop(struct dlm_ls *ls) spin_lock_bh(&ls->ls_recover_lock); set_bit(LSFL_RECOVER_STOP, &ls->ls_flags); new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags); + if (new) + timer_delete_sync(&ls->ls_timer); ls->ls_recover_seq++; /* activate requestqueue and stop processing */ diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index d43189532b14..960a14b95605 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -889,6 +889,11 @@ void dlm_clear_toss(struct dlm_ls *ls) list_del(&r->res_rsbs_list); rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, dlm_rhash_rsb_params); + + /* remove it from the toss queue if its part of it */ + if (!list_empty(&r->res_toss_q_list)) + list_del_init(&r->res_toss_q_list); + free_toss_rsb(r); count++; } diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index 5e8e10030b74..c831e0275912 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -98,6 +98,16 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq) spin_lock_bh(&ls->ls_recover_lock); if (ls->ls_recover_seq == seq) { set_bit(LSFL_RUNNING, &ls->ls_flags); + /* Schedule next timer if recovery put something on toss. + * + * The rsbs that was queued while recovery on toss hasn't + * started yet because LSFL_RUNNING was set everything + * else recovery hasn't started as well because ls_in_recovery + * is still hold. So we should not run into the case that + * dlm_timer_resume() queues a timer that can occur in + * a no op. + */ + dlm_timer_resume(ls); /* unblocks processes waiting to enter the dlm */ up_write(&ls->ls_in_recovery); clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); -- cgit v1.2.3 From e91313591b29ce724fe2f1bdf29f2482878fc275 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 15 Apr 2024 14:39:42 -0400 Subject: dlm: use rwlock for rsb hash table The conversion to rhashtable introduced a hash table lock per lockspace, in place of per bucket locks. To make this more scalable, switch to using a rwlock for hash table access. The common case fast path uses it as a read lock. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/debug_fs.c | 4 +- fs/dlm/dir.c | 4 +- fs/dlm/dlm_internal.h | 2 +- fs/dlm/lock.c | 269 ++++++++++++++++++++++++++++++++++++-------------- fs/dlm/lockspace.c | 2 +- fs/dlm/recover.c | 4 +- fs/dlm/recoverd.c | 8 +- 7 files changed, 206 insertions(+), 87 deletions(-) (limited to 'fs') diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 70567919f1b7..6ab3ed4074c6 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -413,7 +413,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos) else list = &ls->ls_keep; - spin_lock_bh(&ls->ls_rsbtbl_lock); + read_lock_bh(&ls->ls_rsbtbl_lock); return seq_list_start(list, *pos); } @@ -434,7 +434,7 @@ static void table_seq_stop(struct seq_file *seq, void *iter_ptr) { struct dlm_ls *ls = seq->private; - spin_unlock_bh(&ls->ls_rsbtbl_lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); } static const struct seq_operations format1_seq_ops = { diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index 9687f908476b..b1ab0adbd9d0 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -200,9 +200,9 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name, struct dlm_rsb *r; int rv; - spin_lock_bh(&ls->ls_rsbtbl_lock); + read_lock_bh(&ls->ls_rsbtbl_lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); if (!rv) return r; diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 98a0ac511bc8..b675bffb61ae 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -585,7 +585,7 @@ struct dlm_ls { spinlock_t ls_lkbidr_spin; struct rhashtable ls_rsbtbl; - spinlock_t ls_rsbtbl_lock; + rwlock_t ls_rsbtbl_lock; struct list_head ls_toss; struct list_head ls_keep; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 7c97181a04fe..790d0fd76bbe 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -342,15 +342,15 @@ void dlm_hold_rsb(struct dlm_rsb *r) /* TODO move this to lib/refcount.c */ static __must_check bool -dlm_refcount_dec_and_lock_bh(refcount_t *r, spinlock_t *lock) +dlm_refcount_dec_and_write_lock_bh(refcount_t *r, rwlock_t *lock) __cond_acquires(lock) { if (refcount_dec_not_one(r)) return false; - spin_lock_bh(lock); + write_lock_bh(lock); if (!refcount_dec_and_test(r)) { - spin_unlock_bh(lock); + write_unlock_bh(lock); return false; } @@ -358,11 +358,11 @@ __cond_acquires(lock) } /* TODO move this to include/linux/kref.h */ -static inline int dlm_kref_put_lock_bh(struct kref *kref, - void (*release)(struct kref *kref), - spinlock_t *lock) +static inline int dlm_kref_put_write_lock_bh(struct kref *kref, + void (*release)(struct kref *kref), + rwlock_t *lock) { - if (dlm_refcount_dec_and_lock_bh(&kref->refcount, lock)) { + if (dlm_refcount_dec_and_write_lock_bh(&kref->refcount, lock)) { release(kref); return 1; } @@ -378,10 +378,10 @@ static void put_rsb(struct dlm_rsb *r) struct dlm_ls *ls = r->res_ls; int rv; - rv = dlm_kref_put_lock_bh(&r->res_ref, toss_rsb, - &ls->ls_rsbtbl_lock); + rv = dlm_kref_put_write_lock_bh(&r->res_ref, toss_rsb, + &ls->ls_rsbtbl_lock); if (rv) - spin_unlock_bh(&ls->ls_rsbtbl_lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); } void dlm_put_rsb(struct dlm_rsb *r) @@ -603,7 +603,7 @@ void dlm_rsb_toss_timer(struct timer_list *timer) * a caching handling and the other holders might to put * this rsb out of the toss state. */ - rv = spin_trylock(&ls->ls_rsbtbl_lock); + rv = write_trylock(&ls->ls_rsbtbl_lock); if (!rv) { spin_unlock(&ls->ls_toss_q_lock); /* rearm again try timer */ @@ -618,7 +618,7 @@ void dlm_rsb_toss_timer(struct timer_list *timer) /* not necessary to held the ls_rsbtbl_lock when * calling send_remove() */ - spin_unlock(&ls->ls_rsbtbl_lock); + write_unlock(&ls->ls_rsbtbl_lock); /* remove the rsb out of the toss queue its gone * drom DLM now @@ -702,16 +702,8 @@ int dlm_search_rsb_tree(struct rhashtable *rhash, const void *name, int len, static int rsb_insert(struct dlm_rsb *rsb, struct rhashtable *rhash) { - int rv; - - rv = rhashtable_insert_fast(rhash, &rsb->res_node, - dlm_rhash_rsb_params); - if (rv == -EEXIST) { - log_print("%s match", __func__); - dlm_dump_rsb(rsb); - } - - return rv; + return rhashtable_insert_fast(rhash, &rsb->res_node, + dlm_rhash_rsb_params); } /* @@ -806,24 +798,47 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, goto out; } - spin_lock_bh(&ls->ls_rsbtbl_lock); + retry_lookup: + /* check if the rsb is in keep state under read lock - likely path */ + read_lock_bh(&ls->ls_rsbtbl_lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); - if (error) + if (error) { + read_unlock_bh(&ls->ls_rsbtbl_lock); goto do_new; + } /* * rsb is active, so we can't check master_nodeid without lock_rsb. */ - if (rsb_flag(r, RSB_TOSS)) + if (rsb_flag(r, RSB_TOSS)) { + read_unlock_bh(&ls->ls_rsbtbl_lock); goto do_toss; + } kref_get(&r->res_ref); - goto out_unlock; + read_unlock_bh(&ls->ls_rsbtbl_lock); + goto out; do_toss: + write_lock_bh(&ls->ls_rsbtbl_lock); + + /* retry lookup under write lock to see if its still in toss state + * if not it's in keep state and we relookup - unlikely path. + */ + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); + if (!error) { + if (!rsb_flag(r, RSB_TOSS)) { + write_unlock_bh(&ls->ls_rsbtbl_lock); + goto retry_lookup; + } + } else { + write_unlock_bh(&ls->ls_rsbtbl_lock); + goto do_new; + } + /* * rsb found inactive (master_nodeid may be out of date unless * we are the dir_nodeid or were the master) No other thread @@ -837,8 +852,9 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s", from_nodeid, r->res_master_nodeid, dir_nodeid, r->res_name); + write_unlock_bh(&ls->ls_rsbtbl_lock); error = -ENOTBLK; - goto out_unlock; + goto out; } if ((r->res_master_nodeid != our_nodeid) && from_dir) { @@ -868,9 +884,9 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, */ kref_init(&r->res_ref); rsb_delete_toss_timer(ls, r); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); - goto out_unlock; + goto out; do_new: @@ -879,15 +895,13 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, */ if (error == -EBADR && !create) - goto out_unlock; + goto out; error = get_rsb_struct(ls, name, len, &r); - if (error == -EAGAIN) { - spin_unlock_bh(&ls->ls_rsbtbl_lock); + if (error == -EAGAIN) goto retry; - } if (error) - goto out_unlock; + goto out; r->res_hash = hash; r->res_dir_nodeid = dir_nodeid; @@ -909,7 +923,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, dlm_free_rsb(r); r = NULL; error = -ENOTBLK; - goto out_unlock; + goto out; } if (from_other) { @@ -929,11 +943,20 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, } out_add: + + write_lock_bh(&ls->ls_rsbtbl_lock); error = rsb_insert(r, &ls->ls_rsbtbl); - if (!error) + if (error == -EEXIST) { + /* somebody else was faster and it seems the + * rsb exists now, we do a whole relookup + */ + write_unlock_bh(&ls->ls_rsbtbl_lock); + dlm_free_rsb(r); + goto retry_lookup; + } else if (!error) { list_add(&r->res_rsbs_list, &ls->ls_keep); - out_unlock: - spin_unlock_bh(&ls->ls_rsbtbl_lock); + } + write_unlock_bh(&ls->ls_rsbtbl_lock); out: *r_ret = r; return error; @@ -957,24 +980,49 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, if (error < 0) goto out; - spin_lock_bh(&ls->ls_rsbtbl_lock); + retry_lookup: + /* check if the rsb is in keep state under read lock - likely path */ + read_lock_bh(&ls->ls_rsbtbl_lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); - if (error) + if (error) { + read_unlock_bh(&ls->ls_rsbtbl_lock); goto do_new; + } - if (rsb_flag(r, RSB_TOSS)) + if (rsb_flag(r, RSB_TOSS)) { + read_unlock_bh(&ls->ls_rsbtbl_lock); goto do_toss; + } /* * rsb is active, so we can't check master_nodeid without lock_rsb. */ kref_get(&r->res_ref); - goto out_unlock; + read_unlock_bh(&ls->ls_rsbtbl_lock); + + goto out; do_toss: + write_lock_bh(&ls->ls_rsbtbl_lock); + + /* retry lookup under write lock to see if its still in toss state + * if not it's in keep state and we relookup - unlikely path. + */ + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); + if (!error) { + if (!rsb_flag(r, RSB_TOSS)) { + write_unlock_bh(&ls->ls_rsbtbl_lock); + goto retry_lookup; + } + } else { + write_unlock_bh(&ls->ls_rsbtbl_lock); + goto do_new; + } + + /* * rsb found inactive. No other thread is using this rsb because * it's on the toss list, so we can look at or update @@ -987,8 +1035,9 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d", from_nodeid, r->res_master_nodeid, dir_nodeid); dlm_print_rsb(r); + write_unlock_bh(&ls->ls_rsbtbl_lock); error = -ENOTBLK; - goto out_unlock; + goto out; } if (!recover && (r->res_master_nodeid != our_nodeid) && @@ -1010,9 +1059,9 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, */ kref_init(&r->res_ref); rsb_delete_toss_timer(ls, r); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); - goto out_unlock; + goto out; do_new: @@ -1022,11 +1071,10 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, error = get_rsb_struct(ls, name, len, &r); if (error == -EAGAIN) { - spin_unlock_bh(&ls->ls_rsbtbl_lock); goto retry; } if (error) - goto out_unlock; + goto out; r->res_hash = hash; r->res_dir_nodeid = dir_nodeid; @@ -1034,11 +1082,20 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid; kref_init(&r->res_ref); + write_lock_bh(&ls->ls_rsbtbl_lock); error = rsb_insert(r, &ls->ls_rsbtbl); - if (!error) + if (error == -EEXIST) { + /* somebody else was faster and it seems the + * rsb exists now, we do a whole relookup + */ + write_unlock_bh(&ls->ls_rsbtbl_lock); + dlm_free_rsb(r); + goto retry_lookup; + } else if (!error) { list_add(&r->res_rsbs_list, &ls->ls_keep); - out_unlock: - spin_unlock_bh(&ls->ls_rsbtbl_lock); + } + write_unlock_bh(&ls->ls_rsbtbl_lock); + out: *r_ret = r; return error; @@ -1251,18 +1308,23 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, if (error < 0) return error; - spin_lock_bh(&ls->ls_rsbtbl_lock); + retry_lookup: + + /* check if the rsb is in keep state under read lock - likely path */ + read_lock_bh(&ls->ls_rsbtbl_lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (!error) { - if (rsb_flag(r, RSB_TOSS)) + if (rsb_flag(r, RSB_TOSS)) { + read_unlock_bh(&ls->ls_rsbtbl_lock); goto do_toss; + } /* because the rsb is active, we need to lock_rsb before * checking/changing re_master_nodeid */ hold_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); lock_rsb(r); __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false, @@ -1274,10 +1336,31 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, return 0; } else { + read_unlock_bh(&ls->ls_rsbtbl_lock); goto not_found; } do_toss: + /* unlikely path - relookup under write */ + write_lock_bh(&ls->ls_rsbtbl_lock); + + /* rsb_mod_timer() requires to held ls_rsbtbl_lock in write lock + * check if the rsb is still in toss state, if not relookup + */ + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); + if (!error) { + if (!rsb_flag(r, RSB_TOSS)) { + write_unlock_bh(&ls->ls_rsbtbl_lock); + /* something as changed, very unlikely but + * try again + */ + goto retry_lookup; + } + } else { + write_unlock_bh(&ls->ls_rsbtbl_lock); + goto not_found; + } + /* because the rsb is inactive (on toss list), it's not refcounted * and lock_rsb is not used, but is protected by the rsbtbl lock */ @@ -1287,18 +1370,16 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, rsb_mod_timer(ls, r); /* the rsb was inactive (on toss list) */ - spin_unlock_bh(&ls->ls_rsbtbl_lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); return 0; not_found: error = get_rsb_struct(ls, name, len, &r); - if (error == -EAGAIN) { - spin_unlock_bh(&ls->ls_rsbtbl_lock); + if (error == -EAGAIN) goto retry; - } if (error) - goto out_unlock; + goto out; r->res_hash = hash; r->res_dir_nodeid = our_nodeid; @@ -1307,22 +1388,30 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, kref_init(&r->res_ref); rsb_set_flag(r, RSB_TOSS); + write_lock_bh(&ls->ls_rsbtbl_lock); error = rsb_insert(r, &ls->ls_rsbtbl); - if (error) { + if (error == -EEXIST) { + /* somebody else was faster and it seems the + * rsb exists now, we do a whole relookup + */ + write_unlock_bh(&ls->ls_rsbtbl_lock); + dlm_free_rsb(r); + goto retry_lookup; + } else if (error) { + write_unlock_bh(&ls->ls_rsbtbl_lock); /* should never happen */ dlm_free_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl_lock); goto retry; } list_add(&r->res_rsbs_list, &ls->ls_toss); rsb_mod_timer(ls, r); + write_unlock_bh(&ls->ls_rsbtbl_lock); if (result) *result = DLM_LU_ADD; *r_nodeid = from_nodeid; - out_unlock: - spin_unlock_bh(&ls->ls_rsbtbl_lock); + out: return error; } @@ -1330,12 +1419,12 @@ static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) { struct dlm_rsb *r; - spin_lock_bh(&ls->ls_rsbtbl_lock); + read_lock_bh(&ls->ls_rsbtbl_lock); list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { if (r->res_hash == hash) dlm_dump_rsb(r); } - spin_unlock_bh(&ls->ls_rsbtbl_lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); } void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) @@ -1343,14 +1432,14 @@ void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) struct dlm_rsb *r = NULL; int error; - spin_lock_bh(&ls->ls_rsbtbl_lock); + read_lock_bh(&ls->ls_rsbtbl_lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (!error) goto out; dlm_dump_rsb(r); out: - spin_unlock_bh(&ls->ls_rsbtbl_lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); } static void toss_rsb(struct kref *kref) @@ -1478,6 +1567,36 @@ static void kill_lkb(struct kref *kref) DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); } +/* TODO move this to lib/refcount.c */ +static __must_check bool +dlm_refcount_dec_and_lock_bh(refcount_t *r, spinlock_t *lock) +__cond_acquires(lock) +{ + if (refcount_dec_not_one(r)) + return false; + + spin_lock_bh(lock); + if (!refcount_dec_and_test(r)) { + spin_unlock_bh(lock); + return false; + } + + return true; +} + +/* TODO move this to include/linux/kref.h */ +static inline int dlm_kref_put_lock_bh(struct kref *kref, + void (*release)(struct kref *kref), + spinlock_t *lock) +{ + if (dlm_refcount_dec_and_lock_bh(&kref->refcount, lock)) { + release(kref); + return 1; + } + + return 0; +} + /* __put_lkb() is used when an lkb may not have an rsb attached to it so we need to provide the lockspace explicitly */ @@ -4247,14 +4366,14 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) memset(name, 0, sizeof(name)); memcpy(name, ms->m_extra, len); - spin_lock_bh(&ls->ls_rsbtbl_lock); + write_lock_bh(&ls->ls_rsbtbl_lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (rv) { /* should not happen */ log_error(ls, "%s from %d not found %s", __func__, from_nodeid, name); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); return; } @@ -4264,14 +4383,14 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) log_error(ls, "receive_remove keep from %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); return; } log_debug(ls, "receive_remove from %d master %d first %x %s", from_nodeid, r->res_master_nodeid, r->res_first_lkid, name); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); return; } @@ -4279,14 +4398,14 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) log_error(ls, "receive_remove toss from %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); return; } list_del(&r->res_rsbs_list); rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, dlm_rhash_rsb_params); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); free_toss_rsb(r); } @@ -5354,7 +5473,7 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls) { struct dlm_rsb *r; - spin_lock_bh(&ls->ls_rsbtbl_lock); + read_lock_bh(&ls->ls_rsbtbl_lock); list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { if (!rsb_flag(r, RSB_RECOVER_GRANT)) continue; @@ -5363,10 +5482,10 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls) continue; } hold_rsb(r); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); return r; } - spin_unlock_bh(&ls->ls_rsbtbl_lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); return NULL; } diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 931eb3f22ec6..04f4c74831ce 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -424,7 +424,7 @@ static int new_lockspace(const char *name, const char *cluster, INIT_LIST_HEAD(&ls->ls_toss); INIT_LIST_HEAD(&ls->ls_keep); - spin_lock_init(&ls->ls_rsbtbl_lock); + rwlock_init(&ls->ls_rsbtbl_lock); error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params); if (error) diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 960a14b95605..f493d5f30c58 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -884,7 +884,7 @@ void dlm_clear_toss(struct dlm_ls *ls) struct dlm_rsb *r, *safe; unsigned int count = 0; - spin_lock_bh(&ls->ls_rsbtbl_lock); + write_lock_bh(&ls->ls_rsbtbl_lock); list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) { list_del(&r->res_rsbs_list); rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, @@ -897,7 +897,7 @@ void dlm_clear_toss(struct dlm_ls *ls) free_toss_rsb(r); count++; } - spin_unlock_bh(&ls->ls_rsbtbl_lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); if (count) log_rinfo(ls, "dlm_clear_toss %u done", count); diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index c831e0275912..17a40d1e6036 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -32,7 +32,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls) goto out; } - spin_lock_bh(&ls->ls_rsbtbl_lock); + read_lock_bh(&ls->ls_rsbtbl_lock); list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { if (r->res_nodeid) continue; @@ -40,7 +40,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls) list_add(&r->res_masters_list, &ls->ls_masters_list); dlm_hold_rsb(r); } - spin_unlock_bh(&ls->ls_rsbtbl_lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); out: write_unlock_bh(&ls->ls_masters_lock); return error; @@ -62,14 +62,14 @@ static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) { struct dlm_rsb *r; - spin_lock_bh(&ls->ls_rsbtbl_lock); + read_lock_bh(&ls->ls_rsbtbl_lock); list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) { list_add(&r->res_root_list, root_list); dlm_hold_rsb(r); } WARN_ON_ONCE(!list_empty(&ls->ls_toss)); - spin_unlock_bh(&ls->ls_rsbtbl_lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); } static void dlm_release_root_list(struct list_head *root_list) -- cgit v1.2.3 From 15fd7e55177a8d6243096bb6b9536cd15f56d547 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 15 Apr 2024 14:39:43 -0400 Subject: dlm: use rwlock for lkbidr Convert the lock for lkbidr to an rwlock. Most idr lookups will use the read lock. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/dlm_internal.h | 2 +- fs/dlm/lock.c | 44 +++++++------------------------------------- fs/dlm/lockspace.c | 6 +++--- 3 files changed, 11 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index b675bffb61ae..19e57cbd5b13 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -582,7 +582,7 @@ struct dlm_ls { struct kobject ls_kobj; struct idr ls_lkbidr; - spinlock_t ls_lkbidr_spin; + rwlock_t ls_lkbidr_lock; struct rhashtable ls_rsbtbl; rwlock_t ls_rsbtbl_lock; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 790d0fd76bbe..bbbc9593a64e 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1522,11 +1522,11 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret, INIT_LIST_HEAD(&lkb->lkb_ownqueue); INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); - spin_lock_bh(&ls->ls_lkbidr_spin); + write_lock_bh(&ls->ls_lkbidr_lock); rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT); if (rv >= 0) lkb->lkb_id = rv; - spin_unlock_bh(&ls->ls_lkbidr_spin); + write_unlock_bh(&ls->ls_lkbidr_lock); if (rv < 0) { log_error(ls, "create_lkb idr error %d", rv); @@ -1547,11 +1547,11 @@ static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret) { struct dlm_lkb *lkb; - spin_lock_bh(&ls->ls_lkbidr_spin); + read_lock_bh(&ls->ls_lkbidr_lock); lkb = idr_find(&ls->ls_lkbidr, lkid); if (lkb) kref_get(&lkb->lkb_ref); - spin_unlock_bh(&ls->ls_lkbidr_spin); + read_unlock_bh(&ls->ls_lkbidr_lock); *lkb_ret = lkb; return lkb ? 0 : -ENOENT; @@ -1567,36 +1567,6 @@ static void kill_lkb(struct kref *kref) DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); } -/* TODO move this to lib/refcount.c */ -static __must_check bool -dlm_refcount_dec_and_lock_bh(refcount_t *r, spinlock_t *lock) -__cond_acquires(lock) -{ - if (refcount_dec_not_one(r)) - return false; - - spin_lock_bh(lock); - if (!refcount_dec_and_test(r)) { - spin_unlock_bh(lock); - return false; - } - - return true; -} - -/* TODO move this to include/linux/kref.h */ -static inline int dlm_kref_put_lock_bh(struct kref *kref, - void (*release)(struct kref *kref), - spinlock_t *lock) -{ - if (dlm_refcount_dec_and_lock_bh(&kref->refcount, lock)) { - release(kref); - return 1; - } - - return 0; -} - /* __put_lkb() is used when an lkb may not have an rsb attached to it so we need to provide the lockspace explicitly */ @@ -1605,11 +1575,11 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb) uint32_t lkid = lkb->lkb_id; int rv; - rv = dlm_kref_put_lock_bh(&lkb->lkb_ref, kill_lkb, - &ls->ls_lkbidr_spin); + rv = dlm_kref_put_write_lock_bh(&lkb->lkb_ref, kill_lkb, + &ls->ls_lkbidr_lock); if (rv) { idr_remove(&ls->ls_lkbidr, lkid); - spin_unlock_bh(&ls->ls_lkbidr_spin); + write_unlock_bh(&ls->ls_lkbidr_lock); detach_lkb(lkb); diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 04f4c74831ce..5ce26882159e 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -431,7 +431,7 @@ static int new_lockspace(const char *name, const char *cluster, goto out_lsfree; idr_init(&ls->ls_lkbidr); - spin_lock_init(&ls->ls_lkbidr_spin); + rwlock_init(&ls->ls_lkbidr_lock); INIT_LIST_HEAD(&ls->ls_waiters); spin_lock_init(&ls->ls_waiters_lock); @@ -676,7 +676,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force) { int rv; - spin_lock_bh(&ls->ls_lkbidr_spin); + read_lock_bh(&ls->ls_lkbidr_lock); if (force == 0) { rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls); } else if (force == 1) { @@ -684,7 +684,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force) } else { rv = 0; } - spin_unlock_bh(&ls->ls_lkbidr_spin); + read_unlock_bh(&ls->ls_lkbidr_lock); return rv; } -- cgit v1.2.3 From 7b012732d005fba22912dda038dd253e3b9a1bfc Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Wed, 17 Apr 2024 15:13:22 -0400 Subject: dlm: fix sleep in atomic context This patch changes the orphans mutex to a spinlock since commit c288745f1d4a ("dlm: avoid blocking receive at the end of recovery") is using a rwlock_t to lock the DLM message receive path and do_purge() can be called while this lock is held that forbids to sleep. We need to use spin_lock_bh() because also a user context that calls dlm_user_purge() can call do_purge() and since commit 92d59adfaf71 ("dlm: do message processing in softirq context") the DLM message receive path is done under softirq context. Fixes: c288745f1d4a ("dlm: avoid blocking receive at the end of recovery") Reported-by: Dan Carpenter Closes: https://lore.kernel.org/gfs2/9ad928eb-2ece-4ad9-a79c-d2bce228e4bc@moroto.mountain/ Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/dlm_internal.h | 2 +- fs/dlm/lock.c | 12 ++++++------ fs/dlm/lockspace.c | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 19e57cbd5b13..9085ba3b2f20 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -602,7 +602,7 @@ struct dlm_ls { spinlock_t ls_waiters_lock; struct list_head ls_waiters; /* lkbs needing a reply */ - struct mutex ls_orphans_mutex; + spinlock_t ls_orphans_lock; struct list_head ls_orphans; spinlock_t ls_new_rsb_spin; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index bbbc9593a64e..f103b8c30592 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -5880,7 +5880,7 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, int found_other_mode = 0; int rv = 0; - mutex_lock(&ls->ls_orphans_mutex); + spin_lock_bh(&ls->ls_orphans_lock); list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) { if (iter->lkb_resource->res_length != namelen) continue; @@ -5897,7 +5897,7 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, *lkid = iter->lkb_id; break; } - mutex_unlock(&ls->ls_orphans_mutex); + spin_unlock_bh(&ls->ls_orphans_lock); if (!lkb && found_other_mode) { rv = -EAGAIN; @@ -6089,9 +6089,9 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) int error; hold_lkb(lkb); /* reference for the ls_orphans list */ - mutex_lock(&ls->ls_orphans_mutex); + spin_lock_bh(&ls->ls_orphans_lock); list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); - mutex_unlock(&ls->ls_orphans_mutex); + spin_unlock_bh(&ls->ls_orphans_lock); set_unlock_args(0, lkb->lkb_ua, &args); @@ -6241,7 +6241,7 @@ static void do_purge(struct dlm_ls *ls, int nodeid, int pid) { struct dlm_lkb *lkb, *safe; - mutex_lock(&ls->ls_orphans_mutex); + spin_lock_bh(&ls->ls_orphans_lock); list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) { if (pid && lkb->lkb_ownpid != pid) continue; @@ -6249,7 +6249,7 @@ static void do_purge(struct dlm_ls *ls, int nodeid, int pid) list_del_init(&lkb->lkb_ownqueue); dlm_put_lkb(lkb); } - mutex_unlock(&ls->ls_orphans_mutex); + spin_unlock_bh(&ls->ls_orphans_lock); } static int send_purge(struct dlm_ls *ls, int nodeid, int pid) diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 5ce26882159e..ed23787271b1 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -436,7 +436,7 @@ static int new_lockspace(const char *name, const char *cluster, INIT_LIST_HEAD(&ls->ls_waiters); spin_lock_init(&ls->ls_waiters_lock); INIT_LIST_HEAD(&ls->ls_orphans); - mutex_init(&ls->ls_orphans_mutex); + spin_lock_init(&ls->ls_orphans_lock); INIT_LIST_HEAD(&ls->ls_new_rsb); spin_lock_init(&ls->ls_new_rsb_spin); -- cgit v1.2.3 From 7b72ab2c6a468305449db8f204bf1e406fd3e147 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 23 Apr 2024 08:52:31 -0400 Subject: dlm: return -ENOMEM if ls_recover_buf fails This patch fixes to return -ENOMEM in case of an allocation failure that was forgotten to change in commit 6c648035cbe7 ("dlm: switch to use rhashtable for rsbs"). Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202404200536.jGi6052v-lkp@intel.com/ Fixes: 6c648035cbe7 ("dlm: switch to use rhashtable for rsbs") Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- fs/dlm/lockspace.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index ed23787271b1..475ab4370dda 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -482,8 +482,10 @@ static int new_lockspace(const char *name, const char *cluster, * might send less. */ ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS); - if (!ls->ls_recover_buf) + if (!ls->ls_recover_buf) { + error = -ENOMEM; goto out_lkbidr; + } ls->ls_slot = 0; ls->ls_num_slots = 0; -- cgit v1.2.3