diff options
| author | Al Viro <viro@zeniv.linux.org.uk> | 2019-06-30 17:39:08 +0300 | 
|---|---|---|
| committer | Al Viro <viro@zeniv.linux.org.uk> | 2019-07-17 05:43:40 +0300 | 
| commit | 4edbe133f851c9e3a2f2a1db367e826b01e72594 (patch) | |
| tree | efd25d386cbb1f6dcf85519f0ea318b7f2d04e87 /fs/namespace.c | |
| parent | 9bdebc2bd1c4abfbf44dc154cc152ec333e004de (diff) | |
| download | linux-4edbe133f851c9e3a2f2a1db367e826b01e72594.tar.xz | |
make struct mountpoint bear the dentry reference to mountpoint, not struct mount
Using dput_to_list() to shift the contributing reference from ->mnt_mountpoint
to ->mnt_mp->m_dentry.  Dentries are dropped (with dput_to_list()) as soon
as struct mountpoint is destroyed; in cases where we are under namespace_sem
we use the global list, shrinking it in namespace_unlock().  In case of
detaching stuck MNT_LOCKed children at final mntput_no_expire() we use a local
list and shrink it ourselves.  ->mnt_ex_mountpoint crap is gone.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/namespace.c')
| -rw-r--r-- | fs/namespace.c | 52 | 
1 files changed, 25 insertions, 27 deletions
diff --git a/fs/namespace.c b/fs/namespace.c index b7059a4f07e3..e0902fda6f07 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -69,6 +69,8 @@ static struct hlist_head *mount_hashtable __read_mostly;  static struct hlist_head *mountpoint_hashtable __read_mostly;  static struct kmem_cache *mnt_cache __read_mostly;  static DECLARE_RWSEM(namespace_sem); +static HLIST_HEAD(unmounted);	/* protected by namespace_sem */ +static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */  /* /sys/fs */  struct kobject *fs_kobj; @@ -172,7 +174,6 @@ unsigned int mnt_get_count(struct mount *mnt)  static void drop_mountpoint(struct fs_pin *p)  {  	struct mount *m = container_of(p, struct mount, mnt_umount); -	dput(m->mnt_ex_mountpoint);  	pin_remove(p);  	mntput(&m->mnt);  } @@ -739,7 +740,7 @@ mountpoint:  	/* Add the new mountpoint to the hash table */  	read_seqlock_excl(&mount_lock); -	new->m_dentry = dentry; +	new->m_dentry = dget(dentry);  	new->m_count = 1;  	hlist_add_head(&new->m_hash, mp_hash(dentry));  	INIT_HLIST_HEAD(&new->m_list); @@ -752,7 +753,11 @@ done:  	return mp;  } -static void put_mountpoint(struct mountpoint *mp) +/* + * vfsmount lock must be held.  Additionally, the caller is responsible + * for serializing calls for given disposal list. + */ +static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)  {  	if (!--mp->m_count) {  		struct dentry *dentry = mp->m_dentry; @@ -760,11 +765,18 @@ static void put_mountpoint(struct mountpoint *mp)  		spin_lock(&dentry->d_lock);  		dentry->d_flags &= ~DCACHE_MOUNTED;  		spin_unlock(&dentry->d_lock); +		dput_to_list(dentry, list);  		hlist_del(&mp->m_hash);  		kfree(mp);  	}  } +/* called with namespace_lock and vfsmount lock */ +static void put_mountpoint(struct mountpoint *mp) +{ +	__put_mountpoint(mp, &ex_mountpoints); +} +  static inline int check_mnt(struct mount *mnt)  {  	return mnt->mnt_ns == current->nsproxy->mnt_ns; @@ -813,7 +825,7 @@ static struct mountpoint *unhash_mnt(struct mount *mnt)   */  static void detach_mnt(struct mount *mnt, struct path *old_path)  { -	old_path->dentry = mnt->mnt_mountpoint; +	old_path->dentry = dget(mnt->mnt_mountpoint);  	old_path->mnt = &mnt->mnt_parent->mnt;  	put_mountpoint(unhash_mnt(mnt));  } @@ -823,8 +835,6 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)   */  static void umount_mnt(struct mount *mnt)  { -	/* old mountpoint will be dropped when we can do that */ -	mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;  	put_mountpoint(unhash_mnt(mnt));  } @@ -837,7 +847,7 @@ void mnt_set_mountpoint(struct mount *mnt,  {  	mp->m_count++;  	mnt_add_count(mnt, 1);	/* essentially, that's mntget */ -	child_mnt->mnt_mountpoint = dget(mp->m_dentry); +	child_mnt->mnt_mountpoint = mp->m_dentry;  	child_mnt->mnt_parent = mnt;  	child_mnt->mnt_mp = mp;  	hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); @@ -864,7 +874,6 @@ static void attach_mnt(struct mount *mnt,  void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)  {  	struct mountpoint *old_mp = mnt->mnt_mp; -	struct dentry *old_mountpoint = mnt->mnt_mountpoint;  	struct mount *old_parent = mnt->mnt_parent;  	list_del_init(&mnt->mnt_child); @@ -874,22 +883,6 @@ void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct m  	attach_mnt(mnt, parent, mp);  	put_mountpoint(old_mp); - -	/* -	 * Safely avoid even the suggestion this code might sleep or -	 * lock the mount hash by taking advantage of the knowledge that -	 * mnt_change_mountpoint will not release the final reference -	 * to a mountpoint. -	 * -	 * During mounting, the mount passed in as the parent mount will -	 * continue to use the old mountpoint and during unmounting, the -	 * old mountpoint will continue to exist until namespace_unlock, -	 * which happens well after mnt_change_mountpoint. -	 */ -	spin_lock(&old_mountpoint->d_lock); -	old_mountpoint->d_lockref.count--; -	spin_unlock(&old_mountpoint->d_lock); -  	mnt_add_count(old_parent, -1);  } @@ -1142,6 +1135,8 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);  static void mntput_no_expire(struct mount *mnt)  { +	LIST_HEAD(list); +  	rcu_read_lock();  	if (likely(READ_ONCE(mnt->mnt_ns))) {  		/* @@ -1182,10 +1177,11 @@ static void mntput_no_expire(struct mount *mnt)  	if (unlikely(!list_empty(&mnt->mnt_mounts))) {  		struct mount *p, *tmp;  		list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) { -			umount_mnt(p); +			__put_mountpoint(unhash_mnt(p), &list);  		}  	}  	unlock_mount_hash(); +	shrink_dentry_list(&list);  	if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {  		struct task_struct *task = current; @@ -1371,16 +1367,18 @@ int may_umount(struct vfsmount *mnt)  EXPORT_SYMBOL(may_umount); -static HLIST_HEAD(unmounted);	/* protected by namespace_sem */ -  static void namespace_unlock(void)  {  	struct hlist_head head; +	LIST_HEAD(list);  	hlist_move_list(&unmounted, &head); +	list_splice_init(&ex_mountpoints, &list);  	up_write(&namespace_sem); +	shrink_dentry_list(&list); +  	if (likely(hlist_empty(&head)))  		return;  | 
