diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-01-14 02:09:38 +0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-01-14 02:09:38 +0400 |
commit | 9b0925a6ff64a33be45497e3c798bfee8790b102 (patch) | |
tree | 827aec07fe844addd8e1dd2d1f09b4bb855c9cd1 /fs/kernfs | |
parent | a9f138b0e537de55933335d580ebd38c2bc53c47 (diff) | |
download | linux-9b0925a6ff64a33be45497e3c798bfee8790b102.tar.xz |
Revert "kernfs: implement kernfs_{de|re}activate[_self]()"
This reverts commit 9f010c2ad5194a4b682e747984477850fabd03be.
Tejun writes:
I'm sorry but can you please revert the whole series?
get_active() waiting while a node is deactivated has potential
to lead to deadlock and that deactivate/reactivate interface is
something fundamentally flawed and that cgroup will have to work
with the remove_self() like everybody else. IOW, I think the
first posting was correct.
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/kernfs')
-rw-r--r-- | fs/kernfs/dir.c | 118 |
1 files changed, 1 insertions, 117 deletions
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 1aeb57969bff..37dd6408f5f6 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -396,7 +396,6 @@ struct kernfs_node *kernfs_new_node(struct kernfs_root *root, const char *name, atomic_set(&kn->count, 1); atomic_set(&kn->active, KN_DEACTIVATED_BIAS); - kn->deact_depth = 1; RB_CLEAR_NODE(&kn->rb); kn->name = name; @@ -462,7 +461,6 @@ int kernfs_add_one(struct kernfs_node *kn, struct kernfs_node *parent) /* Mark the entry added into directory tree */ atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); - kn->deact_depth--; ret = 0; out_unlock: mutex_unlock(&kernfs_mutex); @@ -563,7 +561,6 @@ struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv) } atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); - kn->deact_depth--; kn->priv = priv; kn->dir.root = root; @@ -776,8 +773,7 @@ static void __kernfs_deactivate(struct kernfs_node *kn) /* prevent any new usage under @kn by deactivating all nodes */ pos = NULL; while ((pos = kernfs_next_descendant_post(pos, kn))) { - if (!pos->deact_depth++) { - WARN_ON_ONCE(atomic_read(&pos->active) < 0); + if (atomic_read(&pos->active) >= 0) { atomic_add(KN_DEACTIVATED_BIAS, &pos->active); pos->flags |= KERNFS_JUST_DEACTIVATED; } @@ -801,118 +797,6 @@ static void __kernfs_deactivate(struct kernfs_node *kn) } } -static void __kernfs_reactivate(struct kernfs_node *kn) -{ - struct kernfs_node *pos; - - lockdep_assert_held(&kernfs_mutex); - - pos = NULL; - while ((pos = kernfs_next_descendant_post(pos, kn))) { - if (!--pos->deact_depth) { - WARN_ON_ONCE(atomic_read(&pos->active) >= 0); - atomic_sub(KN_DEACTIVATED_BIAS, &pos->active); - } - WARN_ON_ONCE(pos->deact_depth < 0); - } - - /* some nodes reactivated, kick get_active waiters */ - wake_up_all(&kernfs_root(kn)->deactivate_waitq); -} - -static void __kernfs_deactivate_self(struct kernfs_node *kn) -{ - /* - * Take out ourself out of the active ref dependency chain and - * deactivate. If we're called without an active ref, lockdep will - * complain. - */ - kernfs_put_active(kn); - __kernfs_deactivate(kn); -} - -static void __kernfs_reactivate_self(struct kernfs_node *kn) -{ - __kernfs_reactivate(kn); - /* - * Restore active ref dropped by deactivate_self() so that it's - * balanced on return. put_active() will soon be called on @kn, so - * this can't break anything regardless of @kn's state. - */ - atomic_inc(&kn->active); - if (kernfs_lockdep(kn)) - rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_); -} - -/** - * kernfs_deactivate - deactivate subtree of a node - * @kn: kernfs_node to deactivate subtree of - * - * Deactivate the subtree of @kn. On return, there's no active operation - * going on under @kn and creation or renaming of a node under @kn is - * blocked until @kn is reactivated or removed. This function can be - * called multiple times and nests properly. Each invocation should be - * paired with kernfs_reactivate(). - * - * For a kernfs user which uses simple locking, the subsystem lock would - * nest inside active reference. This becomes problematic if the user - * tries to remove nodes while holding the subystem lock as it would create - * a reverse locking dependency from the subsystem lock to active ref. - * This function can be used to break such reverse dependency. The user - * can call this function outside the subsystem lock and then proceed to - * invoke kernfs_remove() while holding the subsystem lock without - * introducing such reverse dependency. - */ -void kernfs_deactivate(struct kernfs_node *kn) -{ - mutex_lock(&kernfs_mutex); - __kernfs_deactivate(kn); - mutex_unlock(&kernfs_mutex); -} - -/** - * kernfs_reactivate - reactivate subtree of a node - * @kn: kernfs_node to reactivate subtree of - * - * Undo kernfs_deactivate(). - */ -void kernfs_reactivate(struct kernfs_node *kn) -{ - mutex_lock(&kernfs_mutex); - __kernfs_reactivate(kn); - mutex_unlock(&kernfs_mutex); -} - -/** - * kernfs_deactivate_self - deactivate subtree of a node from its own method - * @kn: the self kernfs_node to deactivate subtree of - * - * The caller must be running off of a kernfs operation which is invoked - * with an active reference - e.g. one of kernfs_ops. Once this function - * is called, @kn may be removed by someone else while the enclosing method - * is in progress. Other than that, this function is equivalent to - * kernfs_deactivate() and should be paired with kernfs_reactivate_self(). - */ -void kernfs_deactivate_self(struct kernfs_node *kn) -{ - mutex_lock(&kernfs_mutex); - __kernfs_deactivate_self(kn); - mutex_unlock(&kernfs_mutex); -} - -/** - * kernfs_reactivate_self - reactivate subtree of a node from its own method - * @kn: the self kernfs_node to reactivate subtree of - * - * Undo kernfs_deactivate_self(). - */ -void kernfs_reactivate_self(struct kernfs_node *kn) -{ - mutex_lock(&kernfs_mutex); - __kernfs_reactivate_self(kn); - mutex_unlock(&kernfs_mutex); -} - static void __kernfs_remove(struct kernfs_node *kn) { struct kernfs_root *root = kernfs_root(kn); |