diff options
Diffstat (limited to 'fs/nfsd/nfs4state.c')
-rw-r--r-- | fs/nfsd/nfs4state.c | 455 |
1 files changed, 425 insertions, 30 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 618e66078ee5..7857942c5ca6 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -42,6 +42,7 @@ #include <linux/sunrpc/svcauth_gss.h> #include <linux/sunrpc/addr.h> #include <linux/jhash.h> +#include <linux/string_helpers.h> #include "xdr4.h" #include "xdr4cb.h" #include "vfs.h" @@ -99,6 +100,13 @@ enum nfsd4_st_mutex_lock_subclass { */ static DECLARE_WAIT_QUEUE_HEAD(close_wq); +/* + * A waitqueue where a writer to clients/#/ctl destroying a client can + * wait for cl_rpc_users to drop to 0 and then for the client to be + * unhashed. + */ +static DECLARE_WAIT_QUEUE_HEAD(expiry_wq); + static struct kmem_cache *client_slab; static struct kmem_cache *openowner_slab; static struct kmem_cache *lockowner_slab; @@ -138,7 +146,7 @@ static __be32 get_client_locked(struct nfs4_client *clp) if (is_client_expired(clp)) return nfserr_expired; - atomic_inc(&clp->cl_refcount); + atomic_inc(&clp->cl_rpc_users); return nfs_ok; } @@ -170,20 +178,24 @@ static void put_client_renew_locked(struct nfs4_client *clp) lockdep_assert_held(&nn->client_lock); - if (!atomic_dec_and_test(&clp->cl_refcount)) + if (!atomic_dec_and_test(&clp->cl_rpc_users)) return; if (!is_client_expired(clp)) renew_client_locked(clp); + else + wake_up_all(&expiry_wq); } static void put_client_renew(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); - if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock)) + if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) return; if (!is_client_expired(clp)) renew_client_locked(clp); + else + wake_up_all(&expiry_wq); spin_unlock(&nn->client_lock); } @@ -694,7 +706,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla idr_preload(GFP_KERNEL); spin_lock(&cl->cl_lock); - new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT); + /* Reserving 0 for start of file in nfsdfs "states" file: */ + new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); spin_unlock(&cl->cl_lock); idr_preload_end(); if (new_id < 0) @@ -1563,7 +1576,7 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca) * Never use more than a third of the remaining memory, * unless it's the only way to give this client a slot: */ - avail = clamp_t(int, avail, slotsize, total_avail/3); + avail = clamp_t(unsigned long, avail, slotsize, total_avail/3); num = min_t(int, num, avail / slotsize); nfsd_drc_mem_used += num * slotsize; spin_unlock(&nfsd_drc_lock); @@ -1844,7 +1857,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) clp = kmem_cache_zalloc(client_slab, GFP_KERNEL); if (clp == NULL) return NULL; - clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); + xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); if (clp->cl_name.data == NULL) goto err_no_name; clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, @@ -1854,10 +1867,9 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) goto err_no_hashtbl; for (i = 0; i < OWNER_HASH_SIZE; i++) INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); - clp->cl_name.len = name.len; INIT_LIST_HEAD(&clp->cl_sessions); idr_init(&clp->cl_stateids); - atomic_set(&clp->cl_refcount, 0); + atomic_set(&clp->cl_rpc_users, 0); clp->cl_cb_state = NFSD4_CB_UNKNOWN; INIT_LIST_HEAD(&clp->cl_idhash); INIT_LIST_HEAD(&clp->cl_openowners); @@ -1879,6 +1891,25 @@ err_no_name: return NULL; } +static void __free_client(struct kref *k) +{ + struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref); + struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs); + + free_svc_cred(&clp->cl_cred); + kfree(clp->cl_ownerstr_hashtbl); + kfree(clp->cl_name.data); + kfree(clp->cl_nii_domain.data); + kfree(clp->cl_nii_name.data); + idr_destroy(&clp->cl_stateids); + kmem_cache_free(client_slab, clp); +} + +static void drop_client(struct nfs4_client *clp) +{ + kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); +} + static void free_client(struct nfs4_client *clp) { @@ -1891,11 +1922,12 @@ free_client(struct nfs4_client *clp) free_session(ses); } rpc_destroy_wait_queue(&clp->cl_cb_waitq); - free_svc_cred(&clp->cl_cred); - kfree(clp->cl_ownerstr_hashtbl); - kfree(clp->cl_name.data); - idr_destroy(&clp->cl_stateids); - kmem_cache_free(client_slab, clp); + if (clp->cl_nfsd_dentry) { + nfsd_client_rmdir(clp->cl_nfsd_dentry); + clp->cl_nfsd_dentry = NULL; + wake_up_all(&expiry_wq); + } + drop_client(clp); } /* must be called under the client_lock */ @@ -1936,7 +1968,7 @@ unhash_client(struct nfs4_client *clp) static __be32 mark_client_expired_locked(struct nfs4_client *clp) { - if (atomic_read(&clp->cl_refcount)) + if (atomic_read(&clp->cl_rpc_users)) return nfserr_jukebox; unhash_client_locked(clp); return nfs_ok; @@ -1989,6 +2021,7 @@ __destroy_client(struct nfs4_client *clp) if (clp->cl_cb_conn.cb_xprt) svc_xprt_put(clp->cl_cb_conn.cb_xprt); free_client(clp); + wake_up_all(&expiry_wq); } static void @@ -2199,6 +2232,342 @@ find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) return s; } +static struct nfs4_client *get_nfsdfs_clp(struct inode *inode) +{ + struct nfsdfs_client *nc; + nc = get_nfsdfs_client(inode); + if (!nc) + return NULL; + return container_of(nc, struct nfs4_client, cl_nfsdfs); +} + +static void seq_quote_mem(struct seq_file *m, char *data, int len) +{ + seq_printf(m, "\""); + seq_escape_mem_ascii(m, data, len); + seq_printf(m, "\""); +} + +static int client_info_show(struct seq_file *m, void *v) +{ + struct inode *inode = m->private; + struct nfs4_client *clp; + u64 clid; + + clp = get_nfsdfs_clp(inode); + if (!clp) + return -ENXIO; + memcpy(&clid, &clp->cl_clientid, sizeof(clid)); + seq_printf(m, "clientid: 0x%llx\n", clid); + seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); + seq_printf(m, "name: "); + seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); + seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); + if (clp->cl_nii_domain.data) { + seq_printf(m, "Implementation domain: "); + seq_quote_mem(m, clp->cl_nii_domain.data, + clp->cl_nii_domain.len); + seq_printf(m, "\nImplementation name: "); + seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); + seq_printf(m, "\nImplementation time: [%ld, %ld]\n", + clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); + } + drop_client(clp); + + return 0; +} + +static int client_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, client_info_show, inode); +} + +static const struct file_operations client_info_fops = { + .open = client_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void *states_start(struct seq_file *s, loff_t *pos) + __acquires(&clp->cl_lock) +{ + struct nfs4_client *clp = s->private; + unsigned long id = *pos; + void *ret; + + spin_lock(&clp->cl_lock); + ret = idr_get_next_ul(&clp->cl_stateids, &id); + *pos = id; + return ret; +} + +static void *states_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct nfs4_client *clp = s->private; + unsigned long id = *pos; + void *ret; + + id = *pos; + id++; + ret = idr_get_next_ul(&clp->cl_stateids, &id); + *pos = id; + return ret; +} + +static void states_stop(struct seq_file *s, void *v) + __releases(&clp->cl_lock) +{ + struct nfs4_client *clp = s->private; + + spin_unlock(&clp->cl_lock); +} + +static void nfs4_show_superblock(struct seq_file *s, struct file *f) +{ + struct inode *inode = file_inode(f); + + seq_printf(s, "superblock: \"%02x:%02x:%ld\"", + MAJOR(inode->i_sb->s_dev), + MINOR(inode->i_sb->s_dev), + inode->i_ino); +} + +static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo) +{ + seq_printf(s, "owner: "); + seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); +} + +static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) +{ + struct nfs4_ol_stateid *ols; + struct nfs4_file *nf; + struct file *file; + struct nfs4_stateowner *oo; + unsigned int access, deny; + + if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID) + return 0; /* XXX: or SEQ_SKIP? */ + ols = openlockstateid(st); + oo = ols->st_stateowner; + nf = st->sc_file; + file = find_any_file(nf); + + seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid); + + access = bmap_to_share_mode(ols->st_access_bmap); + deny = bmap_to_share_mode(ols->st_deny_bmap); + + seq_printf(s, "access: \%s\%s, ", + access & NFS4_SHARE_ACCESS_READ ? "r" : "-", + access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); + seq_printf(s, "deny: \%s\%s, ", + deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", + deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); + + nfs4_show_superblock(s, file); + seq_printf(s, ", "); + nfs4_show_owner(s, oo); + seq_printf(s, " }\n"); + fput(file); + + return 0; +} + +static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) +{ + struct nfs4_ol_stateid *ols; + struct nfs4_file *nf; + struct file *file; + struct nfs4_stateowner *oo; + + ols = openlockstateid(st); + oo = ols->st_stateowner; + nf = st->sc_file; + file = find_any_file(nf); + + seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid); + + /* + * Note: a lock stateid isn't really the same thing as a lock, + * it's the locking state held by one owner on a file, and there + * may be multiple (or no) lock ranges associated with it. + * (Same for the matter is true of open stateids.) + */ + + nfs4_show_superblock(s, file); + /* XXX: open stateid? */ + seq_printf(s, ", "); + nfs4_show_owner(s, oo); + seq_printf(s, " }\n"); + fput(file); + + return 0; +} + +static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) +{ + struct nfs4_delegation *ds; + struct nfs4_file *nf; + struct file *file; + + ds = delegstateid(st); + nf = st->sc_file; + file = nf->fi_deleg_file; + + seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid); + + /* Kinda dead code as long as we only support read delegs: */ + seq_printf(s, "access: %s, ", + ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w"); + + /* XXX: lease time, whether it's being recalled. */ + + nfs4_show_superblock(s, file); + seq_printf(s, " }\n"); + + return 0; +} + +static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st) +{ + struct nfs4_layout_stateid *ls; + struct file *file; + + ls = container_of(st, struct nfs4_layout_stateid, ls_stid); + file = ls->ls_file; + + seq_printf(s, "- 0x%16phN: { type: layout, ", &st->sc_stateid); + + /* XXX: What else would be useful? */ + + nfs4_show_superblock(s, file); + seq_printf(s, " }\n"); + + return 0; +} + +static int states_show(struct seq_file *s, void *v) +{ + struct nfs4_stid *st = v; + + switch (st->sc_type) { + case NFS4_OPEN_STID: + return nfs4_show_open(s, st); + case NFS4_LOCK_STID: + return nfs4_show_lock(s, st); + case NFS4_DELEG_STID: + return nfs4_show_deleg(s, st); + case NFS4_LAYOUT_STID: + return nfs4_show_layout(s, st); + default: + return 0; /* XXX: or SEQ_SKIP? */ + } + /* XXX: copy stateids? */ +} + +static struct seq_operations states_seq_ops = { + .start = states_start, + .next = states_next, + .stop = states_stop, + .show = states_show +}; + +static int client_states_open(struct inode *inode, struct file *file) +{ + struct seq_file *s; + struct nfs4_client *clp; + int ret; + + clp = get_nfsdfs_clp(inode); + if (!clp) + return -ENXIO; + + ret = seq_open(file, &states_seq_ops); + if (ret) + return ret; + s = file->private_data; + s->private = clp; + return 0; +} + +static int client_opens_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + struct nfs4_client *clp = m->private; + + /* XXX: alternatively, we could get/drop in seq start/stop */ + drop_client(clp); + return 0; +} + +static const struct file_operations client_states_fops = { + .open = client_states_open, + .read = seq_read, + .llseek = seq_lseek, + .release = client_opens_release, +}; + +/* + * Normally we refuse to destroy clients that are in use, but here the + * administrator is telling us to just do it. We also want to wait + * so the caller has a guarantee that the client's locks are gone by + * the time the write returns: + */ +static void force_expire_client(struct nfs4_client *clp) +{ + struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); + bool already_expired; + + spin_lock(&clp->cl_lock); + clp->cl_time = 0; + spin_unlock(&clp->cl_lock); + + wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); + spin_lock(&nn->client_lock); + already_expired = list_empty(&clp->cl_lru); + if (!already_expired) + unhash_client_locked(clp); + spin_unlock(&nn->client_lock); + + if (!already_expired) + expire_client(clp); + else + wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); +} + +static ssize_t client_ctl_write(struct file *file, const char __user *buf, + size_t size, loff_t *pos) +{ + char *data; + struct nfs4_client *clp; + + data = simple_transaction_get(file, buf, size); + if (IS_ERR(data)) + return PTR_ERR(data); + if (size != 7 || 0 != memcmp(data, "expire\n", 7)) + return -EINVAL; + clp = get_nfsdfs_clp(file_inode(file)); + if (!clp) + return -ENXIO; + force_expire_client(clp); + drop_client(clp); + return 7; +} + +static const struct file_operations client_ctl_fops = { + .write = client_ctl_write, + .release = simple_transaction_release, +}; + +static const struct tree_descr client_files[] = { + [0] = {"info", &client_info_fops, S_IRUSR}, + [1] = {"states", &client_states_fops, S_IRUSR}, + [2] = {"ctl", &client_ctl_fops, S_IRUSR|S_IWUSR}, + [3] = {""}, +}; + static struct nfs4_client *create_client(struct xdr_netobj name, struct svc_rqst *rqstp, nfs4_verifier *verf) { @@ -2206,6 +2575,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, struct sockaddr *sa = svc_addr(rqstp); int ret; struct net *net = SVC_NET(rqstp); + struct nfsd_net *nn = net_generic(net, nfsd_net_id); clp = alloc_client(name); if (clp == NULL) @@ -2216,13 +2586,22 @@ static struct nfs4_client *create_client(struct xdr_netobj name, free_client(clp); return NULL; } + gen_clid(clp, nn); + kref_init(&clp->cl_nfsdfs.cl_ref); nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); clp->cl_time = get_seconds(); clear_bit(0, &clp->cl_cb_slot_busy); copy_verf(clp, verf); - rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); + memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); clp->cl_cb_session = NULL; clp->net = net; + clp->cl_nfsd_dentry = nfsd_client_mkdir(nn, &clp->cl_nfsdfs, + clp->cl_clientid.cl_id - nn->clientid_base, + client_files); + if (!clp->cl_nfsd_dentry) { + free_client(clp); + return NULL; + } return clp; } @@ -2533,6 +2912,22 @@ static bool client_has_state(struct nfs4_client *clp) || !list_empty(&clp->async_copies); } +static __be32 copy_impl_id(struct nfs4_client *clp, + struct nfsd4_exchange_id *exid) +{ + if (!exid->nii_domain.data) + return 0; + xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); + if (!clp->cl_nii_domain.data) + return nfserr_jukebox; + xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); + if (!clp->cl_nii_name.data) + return nfserr_jukebox; + clp->cl_nii_time.tv_sec = exid->nii_time.tv_sec; + clp->cl_nii_time.tv_nsec = exid->nii_time.tv_nsec; + return 0; +} + __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) @@ -2559,6 +2954,9 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, new = create_client(exid->clname, rqstp, &verf); if (new == NULL) return nfserr_jukebox; + status = copy_impl_id(new, exid); + if (status) + goto out_nolock; switch (exid->spa_how) { case SP4_MACH_CRED: @@ -2667,7 +3065,6 @@ out_new: new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; - gen_clid(new, nn); add_to_unconfirmed(new); swap(new, conf); out_copy: @@ -3411,7 +3808,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, copy_clid(new, conf); gen_confirm(new, nn); } else /* case 4 (new client) or cases 2, 3 (client reboot): */ - gen_clid(new, nn); + ; new->cl_minorversion = 0; gen_callback(new, setclid, rqstp); add_to_unconfirmed(new); @@ -3632,12 +4029,11 @@ static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj if (!sop) return NULL; - sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL); + xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); if (!sop->so_owner.data) { kmem_cache_free(slab, sop); return NULL; } - sop->so_owner.len = owner->len; INIT_LIST_HEAD(&sop->so_stateids); sop->so_client = clp; @@ -4092,7 +4488,7 @@ static __be32 lookup_clientid(clientid_t *clid, spin_unlock(&nn->client_lock); return nfserr_expired; } - atomic_inc(&found->cl_refcount); + atomic_inc(&found->cl_rpc_users); spin_unlock(&nn->client_lock); /* Cache the nfs4_client in cstate! */ @@ -5725,12 +6121,11 @@ nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) if (fl->fl_lmops == &nfsd_posix_mng_ops) { lo = (struct nfs4_lockowner *) fl->fl_owner; - deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data, - lo->lo_owner.so_owner.len, GFP_KERNEL); + xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, + GFP_KERNEL); if (!deny->ld_owner.data) /* We just don't care that much */ goto nevermind; - deny->ld_owner.len = lo->lo_owner.so_owner.len; deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; } else { nevermind: @@ -6584,7 +6979,7 @@ nfs4_check_open_reclaim(clientid_t *clid, static inline void put_client(struct nfs4_client *clp) { - atomic_dec(&clp->cl_refcount); + atomic_dec(&clp->cl_rpc_users); } static struct nfs4_client * @@ -6702,7 +7097,7 @@ nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst, return; lockdep_assert_held(&nn->client_lock); - atomic_inc(&clp->cl_refcount); + atomic_inc(&clp->cl_rpc_users); list_add(&lst->st_locks, collect); } @@ -6731,7 +7126,7 @@ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, * Despite the fact that these functions deal * with 64-bit integers for "count", we must * ensure that it doesn't blow up the - * clp->cl_refcount. Throw a warning if we + * clp->cl_rpc_users. Throw a warning if we * start to approach INT_MAX here. */ WARN_ON_ONCE(count == (INT_MAX / 2)); @@ -6855,7 +7250,7 @@ nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max, if (func) { func(oop); if (collect) { - atomic_inc(&clp->cl_refcount); + atomic_inc(&clp->cl_rpc_users); list_add(&oop->oo_perclient, collect); } } @@ -6863,7 +7258,7 @@ nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max, /* * Despite the fact that these functions deal with * 64-bit integers for "count", we must ensure that - * it doesn't blow up the clp->cl_refcount. Throw a + * it doesn't blow up the clp->cl_rpc_users. Throw a * warning if we start to approach INT_MAX here. */ WARN_ON_ONCE(count == (INT_MAX / 2)); @@ -6993,7 +7388,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, if (dp->dl_time != 0) continue; - atomic_inc(&clp->cl_refcount); + atomic_inc(&clp->cl_rpc_users); WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, victims); } @@ -7001,7 +7396,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, /* * Despite the fact that these functions deal with * 64-bit integers for "count", we must ensure that - * it doesn't blow up the clp->cl_refcount. Throw a + * it doesn't blow up the clp->cl_rpc_users. Throw a * warning if we start to approach INT_MAX here. */ WARN_ON_ONCE(count == (INT_MAX / 2)); |