summaryrefslogtreecommitdiff
path: root/fs/dlm/lockspace.c
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2006-08-24 23:47:20 +0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-08-25 18:02:53 +0400
commit5f88f1ea16a2fb5f125505053d1bfb7901a88c64 (patch)
treee3d9ce0e01a3903556885c784f8db759c26a795e /fs/dlm/lockspace.c
parent5dc39fe621ead2fa2a0439a686be4df185861eae (diff)
downloadlinux-5f88f1ea16a2fb5f125505053d1bfb7901a88c64.tar.xz
[DLM] add new lockspace to list ealier
When a new lockspace was being created, the recoverd thread was being started for it before the lockspace was added to the global list of lockspaces. The new thread was looking up the lockspace in the global list and sometimes not finding it due to the race with the original thread adding it to the list. We need to add the lockspace to the global list before starting the thread instead of after, and if the new thread can't find the lockspace for some reason, it should return an error. Signed-off-by: David Teigland <teigland@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/dlm/lockspace.c')
-rw-r--r--fs/dlm/lockspace.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 7adaad53fc38..ff83f80e43eb 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -488,16 +488,17 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
down_write(&ls->ls_in_recovery);
+ spin_lock(&lslist_lock);
+ list_add(&ls->ls_list, &lslist);
+ spin_unlock(&lslist_lock);
+
+ /* needs to find ls in lslist */
error = dlm_recoverd_start(ls);
if (error) {
log_error(ls, "can't start dlm_recoverd %d", error);
goto out_rcomfree;
}
- spin_lock(&lslist_lock);
- list_add(&ls->ls_list, &lslist);
- spin_unlock(&lslist_lock);
-
dlm_create_debug_file(ls);
error = kobject_setup(ls);
@@ -519,11 +520,11 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
kobject_unregister(&ls->ls_kobj);
out_del:
dlm_delete_debug_file(ls);
+ dlm_recoverd_stop(ls);
+ out_rcomfree:
spin_lock(&lslist_lock);
list_del(&ls->ls_list);
spin_unlock(&lslist_lock);
- dlm_recoverd_stop(ls);
- out_rcomfree:
kfree(ls->ls_recover_buf);
out_dirfree:
kfree(ls->ls_dirtbl);