summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2006-08-24 23:47:20 +0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-08-25 18:02:53 +0400
commit5f88f1ea16a2fb5f125505053d1bfb7901a88c64 (patch)
treee3d9ce0e01a3903556885c784f8db759c26a795e /fs
parent5dc39fe621ead2fa2a0439a686be4df185861eae (diff)
downloadlinux-5f88f1ea16a2fb5f125505053d1bfb7901a88c64.tar.xz
[DLM] add new lockspace to list ealier
When a new lockspace was being created, the recoverd thread was being started for it before the lockspace was added to the global list of lockspaces. The new thread was looking up the lockspace in the global list and sometimes not finding it due to the race with the original thread adding it to the list. We need to add the lockspace to the global list before starting the thread instead of after, and if the new thread can't find the lockspace for some reason, it should return an error. Signed-off-by: David Teigland <teigland@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/dlm/lockspace.c13
-rw-r--r--fs/dlm/recoverd.c4
2 files changed, 11 insertions, 6 deletions
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 7adaad53fc38..ff83f80e43eb 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -488,16 +488,17 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
down_write(&ls->ls_in_recovery);
+ spin_lock(&lslist_lock);
+ list_add(&ls->ls_list, &lslist);
+ spin_unlock(&lslist_lock);
+
+ /* needs to find ls in lslist */
error = dlm_recoverd_start(ls);
if (error) {
log_error(ls, "can't start dlm_recoverd %d", error);
goto out_rcomfree;
}
- spin_lock(&lslist_lock);
- list_add(&ls->ls_list, &lslist);
- spin_unlock(&lslist_lock);
-
dlm_create_debug_file(ls);
error = kobject_setup(ls);
@@ -519,11 +520,11 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
kobject_unregister(&ls->ls_kobj);
out_del:
dlm_delete_debug_file(ls);
+ dlm_recoverd_stop(ls);
+ out_rcomfree:
spin_lock(&lslist_lock);
list_del(&ls->ls_list);
spin_unlock(&lslist_lock);
- dlm_recoverd_stop(ls);
- out_rcomfree:
kfree(ls->ls_recover_buf);
out_dirfree:
kfree(ls->ls_dirtbl);
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index eac8e9fa67f1..362e3eff4dc9 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -234,6 +234,10 @@ static int dlm_recoverd(void *arg)
struct dlm_ls *ls;
ls = dlm_find_lockspace_local(arg);
+ if (!ls) {
+ log_print("dlm_recoverd: no lockspace %p", arg);
+ return -1;
+ }
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);