cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nfs4state.c (209767B)


      1/*
      2*  Copyright (c) 2001 The Regents of the University of Michigan.
      3*  All rights reserved.
      4*
      5*  Kendrick Smith <kmsmith@umich.edu>
      6*  Andy Adamson <kandros@umich.edu>
      7*
      8*  Redistribution and use in source and binary forms, with or without
      9*  modification, are permitted provided that the following conditions
     10*  are met:
     11*
     12*  1. Redistributions of source code must retain the above copyright
     13*     notice, this list of conditions and the following disclaimer.
     14*  2. Redistributions in binary form must reproduce the above copyright
     15*     notice, this list of conditions and the following disclaimer in the
     16*     documentation and/or other materials provided with the distribution.
     17*  3. Neither the name of the University nor the names of its
     18*     contributors may be used to endorse or promote products derived
     19*     from this software without specific prior written permission.
     20*
     21*  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
     22*  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     23*  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     24*  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     25*  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26*  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27*  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
     28*  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     29*  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     30*  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     31*  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32*
     33*/
     34
     35#include <linux/file.h>
     36#include <linux/fs.h>
     37#include <linux/slab.h>
     38#include <linux/namei.h>
     39#include <linux/swap.h>
     40#include <linux/pagemap.h>
     41#include <linux/ratelimit.h>
     42#include <linux/sunrpc/svcauth_gss.h>
     43#include <linux/sunrpc/addr.h>
     44#include <linux/jhash.h>
     45#include <linux/string_helpers.h>
     46#include <linux/fsnotify.h>
     47#include <linux/nfs_ssc.h>
     48#include "xdr4.h"
     49#include "xdr4cb.h"
     50#include "vfs.h"
     51#include "current_stateid.h"
     52
     53#include "netns.h"
     54#include "pnfs.h"
     55#include "filecache.h"
     56#include "trace.h"
     57
     58#define NFSDDBG_FACILITY                NFSDDBG_PROC
     59
     60#define all_ones {{~0,~0},~0}
     61static const stateid_t one_stateid = {
     62	.si_generation = ~0,
     63	.si_opaque = all_ones,
     64};
     65static const stateid_t zero_stateid = {
     66	/* all fields zero */
     67};
     68static const stateid_t currentstateid = {
     69	.si_generation = 1,
     70};
     71static const stateid_t close_stateid = {
     72	.si_generation = 0xffffffffU,
     73};
     74
     75static u64 current_sessionid = 1;
     76
     77#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
     78#define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
     79#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
     80#define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
     81
     82/* forward declarations */
     83static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
     84static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
     85void nfsd4_end_grace(struct nfsd_net *nn);
     86static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
     87
     88/* Locking: */
     89
     90/*
     91 * Currently used for the del_recall_lru and file hash table.  In an
     92 * effort to decrease the scope of the client_mutex, this spinlock may
     93 * eventually cover more:
     94 */
     95static DEFINE_SPINLOCK(state_lock);
     96
     97enum nfsd4_st_mutex_lock_subclass {
     98	OPEN_STATEID_MUTEX = 0,
     99	LOCK_STATEID_MUTEX = 1,
    100};
    101
    102/*
    103 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
    104 * the refcount on the open stateid to drop.
    105 */
    106static DECLARE_WAIT_QUEUE_HEAD(close_wq);
    107
    108/*
    109 * A waitqueue where a writer to clients/#/ctl destroying a client can
    110 * wait for cl_rpc_users to drop to 0 and then for the client to be
    111 * unhashed.
    112 */
    113static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
    114
    115static struct kmem_cache *client_slab;
    116static struct kmem_cache *openowner_slab;
    117static struct kmem_cache *lockowner_slab;
    118static struct kmem_cache *file_slab;
    119static struct kmem_cache *stateid_slab;
    120static struct kmem_cache *deleg_slab;
    121static struct kmem_cache *odstate_slab;
    122
    123static void free_session(struct nfsd4_session *);
    124
    125static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
    126static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
    127
    128static struct workqueue_struct *laundry_wq;
    129
    130int nfsd4_create_laundry_wq(void)
    131{
    132	int rc = 0;
    133
    134	laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
    135	if (laundry_wq == NULL)
    136		rc = -ENOMEM;
    137	return rc;
    138}
    139
    140void nfsd4_destroy_laundry_wq(void)
    141{
    142	destroy_workqueue(laundry_wq);
    143}
    144
    145static bool is_session_dead(struct nfsd4_session *ses)
    146{
    147	return ses->se_flags & NFS4_SESSION_DEAD;
    148}
    149
    150static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
    151{
    152	if (atomic_read(&ses->se_ref) > ref_held_by_me)
    153		return nfserr_jukebox;
    154	ses->se_flags |= NFS4_SESSION_DEAD;
    155	return nfs_ok;
    156}
    157
    158static bool is_client_expired(struct nfs4_client *clp)
    159{
    160	return clp->cl_time == 0;
    161}
    162
    163static __be32 get_client_locked(struct nfs4_client *clp)
    164{
    165	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
    166
    167	lockdep_assert_held(&nn->client_lock);
    168
    169	if (is_client_expired(clp))
    170		return nfserr_expired;
    171	atomic_inc(&clp->cl_rpc_users);
    172	clp->cl_state = NFSD4_ACTIVE;
    173	return nfs_ok;
    174}
    175
    176/* must be called under the client_lock */
    177static inline void
    178renew_client_locked(struct nfs4_client *clp)
    179{
    180	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
    181
    182	if (is_client_expired(clp)) {
    183		WARN_ON(1);
    184		printk("%s: client (clientid %08x/%08x) already expired\n",
    185			__func__,
    186			clp->cl_clientid.cl_boot,
    187			clp->cl_clientid.cl_id);
    188		return;
    189	}
    190
    191	list_move_tail(&clp->cl_lru, &nn->client_lru);
    192	clp->cl_time = ktime_get_boottime_seconds();
    193	clp->cl_state = NFSD4_ACTIVE;
    194}
    195
    196static void put_client_renew_locked(struct nfs4_client *clp)
    197{
    198	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
    199
    200	lockdep_assert_held(&nn->client_lock);
    201
    202	if (!atomic_dec_and_test(&clp->cl_rpc_users))
    203		return;
    204	if (!is_client_expired(clp))
    205		renew_client_locked(clp);
    206	else
    207		wake_up_all(&expiry_wq);
    208}
    209
    210static void put_client_renew(struct nfs4_client *clp)
    211{
    212	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
    213
    214	if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
    215		return;
    216	if (!is_client_expired(clp))
    217		renew_client_locked(clp);
    218	else
    219		wake_up_all(&expiry_wq);
    220	spin_unlock(&nn->client_lock);
    221}
    222
    223static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
    224{
    225	__be32 status;
    226
    227	if (is_session_dead(ses))
    228		return nfserr_badsession;
    229	status = get_client_locked(ses->se_client);
    230	if (status)
    231		return status;
    232	atomic_inc(&ses->se_ref);
    233	return nfs_ok;
    234}
    235
    236static void nfsd4_put_session_locked(struct nfsd4_session *ses)
    237{
    238	struct nfs4_client *clp = ses->se_client;
    239	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
    240
    241	lockdep_assert_held(&nn->client_lock);
    242
    243	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
    244		free_session(ses);
    245	put_client_renew_locked(clp);
    246}
    247
    248static void nfsd4_put_session(struct nfsd4_session *ses)
    249{
    250	struct nfs4_client *clp = ses->se_client;
    251	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
    252
    253	spin_lock(&nn->client_lock);
    254	nfsd4_put_session_locked(ses);
    255	spin_unlock(&nn->client_lock);
    256}
    257
    258static struct nfsd4_blocked_lock *
    259find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
    260			struct nfsd_net *nn)
    261{
    262	struct nfsd4_blocked_lock *cur, *found = NULL;
    263
    264	spin_lock(&nn->blocked_locks_lock);
    265	list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
    266		if (fh_match(fh, &cur->nbl_fh)) {
    267			list_del_init(&cur->nbl_list);
    268			WARN_ON(list_empty(&cur->nbl_lru));
    269			list_del_init(&cur->nbl_lru);
    270			found = cur;
    271			break;
    272		}
    273	}
    274	spin_unlock(&nn->blocked_locks_lock);
    275	if (found)
    276		locks_delete_block(&found->nbl_lock);
    277	return found;
    278}
    279
    280static struct nfsd4_blocked_lock *
    281find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
    282			struct nfsd_net *nn)
    283{
    284	struct nfsd4_blocked_lock *nbl;
    285
    286	nbl = find_blocked_lock(lo, fh, nn);
    287	if (!nbl) {
    288		nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
    289		if (nbl) {
    290			INIT_LIST_HEAD(&nbl->nbl_list);
    291			INIT_LIST_HEAD(&nbl->nbl_lru);
    292			fh_copy_shallow(&nbl->nbl_fh, fh);
    293			locks_init_lock(&nbl->nbl_lock);
    294			kref_init(&nbl->nbl_kref);
    295			nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
    296					&nfsd4_cb_notify_lock_ops,
    297					NFSPROC4_CLNT_CB_NOTIFY_LOCK);
    298		}
    299	}
    300	return nbl;
    301}
    302
    303static void
    304free_nbl(struct kref *kref)
    305{
    306	struct nfsd4_blocked_lock *nbl;
    307
    308	nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
    309	kfree(nbl);
    310}
    311
    312static void
    313free_blocked_lock(struct nfsd4_blocked_lock *nbl)
    314{
    315	locks_delete_block(&nbl->nbl_lock);
    316	locks_release_private(&nbl->nbl_lock);
    317	kref_put(&nbl->nbl_kref, free_nbl);
    318}
    319
    320static void
    321remove_blocked_locks(struct nfs4_lockowner *lo)
    322{
    323	struct nfs4_client *clp = lo->lo_owner.so_client;
    324	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
    325	struct nfsd4_blocked_lock *nbl;
    326	LIST_HEAD(reaplist);
    327
    328	/* Dequeue all blocked locks */
    329	spin_lock(&nn->blocked_locks_lock);
    330	while (!list_empty(&lo->lo_blocked)) {
    331		nbl = list_first_entry(&lo->lo_blocked,
    332					struct nfsd4_blocked_lock,
    333					nbl_list);
    334		list_del_init(&nbl->nbl_list);
    335		WARN_ON(list_empty(&nbl->nbl_lru));
    336		list_move(&nbl->nbl_lru, &reaplist);
    337	}
    338	spin_unlock(&nn->blocked_locks_lock);
    339
    340	/* Now free them */
    341	while (!list_empty(&reaplist)) {
    342		nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
    343					nbl_lru);
    344		list_del_init(&nbl->nbl_lru);
    345		free_blocked_lock(nbl);
    346	}
    347}
    348
    349static void
    350nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
    351{
    352	struct nfsd4_blocked_lock	*nbl = container_of(cb,
    353						struct nfsd4_blocked_lock, nbl_cb);
    354	locks_delete_block(&nbl->nbl_lock);
    355}
    356
    357static int
    358nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
    359{
    360	/*
    361	 * Since this is just an optimization, we don't try very hard if it
    362	 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
    363	 * just quit trying on anything else.
    364	 */
    365	switch (task->tk_status) {
    366	case -NFS4ERR_DELAY:
    367		rpc_delay(task, 1 * HZ);
    368		return 0;
    369	default:
    370		return 1;
    371	}
    372}
    373
    374static void
    375nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
    376{
    377	struct nfsd4_blocked_lock	*nbl = container_of(cb,
    378						struct nfsd4_blocked_lock, nbl_cb);
    379
    380	free_blocked_lock(nbl);
    381}
    382
    383static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
    384	.prepare	= nfsd4_cb_notify_lock_prepare,
    385	.done		= nfsd4_cb_notify_lock_done,
    386	.release	= nfsd4_cb_notify_lock_release,
    387};
    388
    389/*
    390 * We store the NONE, READ, WRITE, and BOTH bits separately in the
    391 * st_{access,deny}_bmap field of the stateid, in order to track not
    392 * only what share bits are currently in force, but also what
    393 * combinations of share bits previous opens have used.  This allows us
    394 * to enforce the recommendation in
    395 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
    396 * the server return an error if the client attempt to downgrade to a
    397 * combination of share bits not explicable by closing some of its
    398 * previous opens.
    399 *
    400 * This enforcement is arguably incomplete, since we don't keep
    401 * track of access/deny bit combinations; so, e.g., we allow:
    402 *
    403 *	OPEN allow read, deny write
    404 *	OPEN allow both, deny none
    405 *	DOWNGRADE allow read, deny none
    406 *
    407 * which we should reject.
    408 *
    409 * But you could also argue that our current code is already overkill,
    410 * since it only exists to return NFS4ERR_INVAL on incorrect client
    411 * behavior.
    412 */
    413static unsigned int
    414bmap_to_share_mode(unsigned long bmap)
    415{
    416	int i;
    417	unsigned int access = 0;
    418
    419	for (i = 1; i < 4; i++) {
    420		if (test_bit(i, &bmap))
    421			access |= i;
    422	}
    423	return access;
    424}
    425
    426/* set share access for a given stateid */
    427static inline void
    428set_access(u32 access, struct nfs4_ol_stateid *stp)
    429{
    430	unsigned char mask = 1 << access;
    431
    432	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
    433	stp->st_access_bmap |= mask;
    434}
    435
    436/* clear share access for a given stateid */
    437static inline void
    438clear_access(u32 access, struct nfs4_ol_stateid *stp)
    439{
    440	unsigned char mask = 1 << access;
    441
    442	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
    443	stp->st_access_bmap &= ~mask;
    444}
    445
    446/* test whether a given stateid has access */
    447static inline bool
    448test_access(u32 access, struct nfs4_ol_stateid *stp)
    449{
    450	unsigned char mask = 1 << access;
    451
    452	return (bool)(stp->st_access_bmap & mask);
    453}
    454
    455/* set share deny for a given stateid */
    456static inline void
    457set_deny(u32 deny, struct nfs4_ol_stateid *stp)
    458{
    459	unsigned char mask = 1 << deny;
    460
    461	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
    462	stp->st_deny_bmap |= mask;
    463}
    464
    465/* clear share deny for a given stateid */
    466static inline void
    467clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
    468{
    469	unsigned char mask = 1 << deny;
    470
    471	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
    472	stp->st_deny_bmap &= ~mask;
    473}
    474
    475/* test whether a given stateid is denying specific access */
    476static inline bool
    477test_deny(u32 deny, struct nfs4_ol_stateid *stp)
    478{
    479	unsigned char mask = 1 << deny;
    480
    481	return (bool)(stp->st_deny_bmap & mask);
    482}
    483
    484static int nfs4_access_to_omode(u32 access)
    485{
    486	switch (access & NFS4_SHARE_ACCESS_BOTH) {
    487	case NFS4_SHARE_ACCESS_READ:
    488		return O_RDONLY;
    489	case NFS4_SHARE_ACCESS_WRITE:
    490		return O_WRONLY;
    491	case NFS4_SHARE_ACCESS_BOTH:
    492		return O_RDWR;
    493	}
    494	WARN_ON_ONCE(1);
    495	return O_RDONLY;
    496}
    497
    498static inline int
    499access_permit_read(struct nfs4_ol_stateid *stp)
    500{
    501	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
    502		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
    503		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
    504}
    505
    506static inline int
    507access_permit_write(struct nfs4_ol_stateid *stp)
    508{
    509	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
    510		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
    511}
    512
    513static inline struct nfs4_stateowner *
    514nfs4_get_stateowner(struct nfs4_stateowner *sop)
    515{
    516	atomic_inc(&sop->so_count);
    517	return sop;
    518}
    519
    520static int
    521same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
    522{
    523	return (sop->so_owner.len == owner->len) &&
    524		0 == memcmp(sop->so_owner.data, owner->data, owner->len);
    525}
    526
    527static struct nfs4_openowner *
    528find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
    529			struct nfs4_client *clp)
    530{
    531	struct nfs4_stateowner *so;
    532
    533	lockdep_assert_held(&clp->cl_lock);
    534
    535	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
    536			    so_strhash) {
    537		if (!so->so_is_open_owner)
    538			continue;
    539		if (same_owner_str(so, &open->op_owner))
    540			return openowner(nfs4_get_stateowner(so));
    541	}
    542	return NULL;
    543}
    544
    545static struct nfs4_openowner *
    546find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
    547			struct nfs4_client *clp)
    548{
    549	struct nfs4_openowner *oo;
    550
    551	spin_lock(&clp->cl_lock);
    552	oo = find_openstateowner_str_locked(hashval, open, clp);
    553	spin_unlock(&clp->cl_lock);
    554	return oo;
    555}
    556
    557static inline u32
    558opaque_hashval(const void *ptr, int nbytes)
    559{
    560	unsigned char *cptr = (unsigned char *) ptr;
    561
    562	u32 x = 0;
    563	while (nbytes--) {
    564		x *= 37;
    565		x += *cptr++;
    566	}
    567	return x;
    568}
    569
    570static void nfsd4_free_file_rcu(struct rcu_head *rcu)
    571{
    572	struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
    573
    574	kmem_cache_free(file_slab, fp);
    575}
    576
    577void
    578put_nfs4_file(struct nfs4_file *fi)
    579{
    580	might_lock(&state_lock);
    581
    582	if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
    583		hlist_del_rcu(&fi->fi_hash);
    584		spin_unlock(&state_lock);
    585		WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
    586		WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
    587		call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
    588	}
    589}
    590
    591static struct nfsd_file *
    592__nfs4_get_fd(struct nfs4_file *f, int oflag)
    593{
    594	if (f->fi_fds[oflag])
    595		return nfsd_file_get(f->fi_fds[oflag]);
    596	return NULL;
    597}
    598
    599static struct nfsd_file *
    600find_writeable_file_locked(struct nfs4_file *f)
    601{
    602	struct nfsd_file *ret;
    603
    604	lockdep_assert_held(&f->fi_lock);
    605
    606	ret = __nfs4_get_fd(f, O_WRONLY);
    607	if (!ret)
    608		ret = __nfs4_get_fd(f, O_RDWR);
    609	return ret;
    610}
    611
    612static struct nfsd_file *
    613find_writeable_file(struct nfs4_file *f)
    614{
    615	struct nfsd_file *ret;
    616
    617	spin_lock(&f->fi_lock);
    618	ret = find_writeable_file_locked(f);
    619	spin_unlock(&f->fi_lock);
    620
    621	return ret;
    622}
    623
    624static struct nfsd_file *
    625find_readable_file_locked(struct nfs4_file *f)
    626{
    627	struct nfsd_file *ret;
    628
    629	lockdep_assert_held(&f->fi_lock);
    630
    631	ret = __nfs4_get_fd(f, O_RDONLY);
    632	if (!ret)
    633		ret = __nfs4_get_fd(f, O_RDWR);
    634	return ret;
    635}
    636
    637static struct nfsd_file *
    638find_readable_file(struct nfs4_file *f)
    639{
    640	struct nfsd_file *ret;
    641
    642	spin_lock(&f->fi_lock);
    643	ret = find_readable_file_locked(f);
    644	spin_unlock(&f->fi_lock);
    645
    646	return ret;
    647}
    648
    649struct nfsd_file *
    650find_any_file(struct nfs4_file *f)
    651{
    652	struct nfsd_file *ret;
    653
    654	if (!f)
    655		return NULL;
    656	spin_lock(&f->fi_lock);
    657	ret = __nfs4_get_fd(f, O_RDWR);
    658	if (!ret) {
    659		ret = __nfs4_get_fd(f, O_WRONLY);
    660		if (!ret)
    661			ret = __nfs4_get_fd(f, O_RDONLY);
    662	}
    663	spin_unlock(&f->fi_lock);
    664	return ret;
    665}
    666
    667static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
    668{
    669	struct nfsd_file *ret = NULL;
    670
    671	spin_lock(&f->fi_lock);
    672	if (f->fi_deleg_file)
    673		ret = nfsd_file_get(f->fi_deleg_file);
    674	spin_unlock(&f->fi_lock);
    675	return ret;
    676}
    677
    678static atomic_long_t num_delegations;
    679unsigned long max_delegations;
    680
    681/*
    682 * Open owner state (share locks)
    683 */
    684
    685/* hash tables for lock and open owners */
    686#define OWNER_HASH_BITS              8
    687#define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
    688#define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
    689
    690static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
    691{
    692	unsigned int ret;
    693
    694	ret = opaque_hashval(ownername->data, ownername->len);
    695	return ret & OWNER_HASH_MASK;
    696}
    697
    698/* hash table for nfs4_file */
    699#define FILE_HASH_BITS                   8
    700#define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
    701
    702static unsigned int file_hashval(struct svc_fh *fh)
    703{
    704	struct inode *inode = d_inode(fh->fh_dentry);
    705
    706	/* XXX: why not (here & in file cache) use inode? */
    707	return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS);
    708}
    709
    710static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
    711
    712/*
    713 * Check if courtesy clients have conflicting access and resolve it if possible
    714 *
    715 * access:  is op_share_access if share_access is true.
    716 *	    Check if access mode, op_share_access, would conflict with
    717 *	    the current deny mode of the file 'fp'.
    718 * access:  is op_share_deny if share_access is false.
    719 *	    Check if the deny mode, op_share_deny, would conflict with
    720 *	    current access of the file 'fp'.
    721 * stp:     skip checking this entry.
    722 * new_stp: normal open, not open upgrade.
    723 *
    724 * Function returns:
    725 *	false - access/deny mode conflict with normal client.
    726 *	true  - no conflict or conflict with courtesy client(s) is resolved.
    727 */
    728static bool
    729nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
    730		struct nfs4_ol_stateid *stp, u32 access, bool share_access)
    731{
    732	struct nfs4_ol_stateid *st;
    733	bool resolvable = true;
    734	unsigned char bmap;
    735	struct nfsd_net *nn;
    736	struct nfs4_client *clp;
    737
    738	lockdep_assert_held(&fp->fi_lock);
    739	list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
    740		/* ignore lock stateid */
    741		if (st->st_openstp)
    742			continue;
    743		if (st == stp && new_stp)
    744			continue;
    745		/* check file access against deny mode or vice versa */
    746		bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
    747		if (!(access & bmap_to_share_mode(bmap)))
    748			continue;
    749		clp = st->st_stid.sc_client;
    750		if (try_to_expire_client(clp))
    751			continue;
    752		resolvable = false;
    753		break;
    754	}
    755	if (resolvable) {
    756		clp = stp->st_stid.sc_client;
    757		nn = net_generic(clp->net, nfsd_net_id);
    758		mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
    759	}
    760	return resolvable;
    761}
    762
    763static void
    764__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
    765{
    766	lockdep_assert_held(&fp->fi_lock);
    767
    768	if (access & NFS4_SHARE_ACCESS_WRITE)
    769		atomic_inc(&fp->fi_access[O_WRONLY]);
    770	if (access & NFS4_SHARE_ACCESS_READ)
    771		atomic_inc(&fp->fi_access[O_RDONLY]);
    772}
    773
    774static __be32
    775nfs4_file_get_access(struct nfs4_file *fp, u32 access)
    776{
    777	lockdep_assert_held(&fp->fi_lock);
    778
    779	/* Does this access mode make sense? */
    780	if (access & ~NFS4_SHARE_ACCESS_BOTH)
    781		return nfserr_inval;
    782
    783	/* Does it conflict with a deny mode already set? */
    784	if ((access & fp->fi_share_deny) != 0)
    785		return nfserr_share_denied;
    786
    787	__nfs4_file_get_access(fp, access);
    788	return nfs_ok;
    789}
    790
    791static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
    792{
    793	/* Common case is that there is no deny mode. */
    794	if (deny) {
    795		/* Does this deny mode make sense? */
    796		if (deny & ~NFS4_SHARE_DENY_BOTH)
    797			return nfserr_inval;
    798
    799		if ((deny & NFS4_SHARE_DENY_READ) &&
    800		    atomic_read(&fp->fi_access[O_RDONLY]))
    801			return nfserr_share_denied;
    802
    803		if ((deny & NFS4_SHARE_DENY_WRITE) &&
    804		    atomic_read(&fp->fi_access[O_WRONLY]))
    805			return nfserr_share_denied;
    806	}
    807	return nfs_ok;
    808}
    809
    810static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
    811{
    812	might_lock(&fp->fi_lock);
    813
    814	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
    815		struct nfsd_file *f1 = NULL;
    816		struct nfsd_file *f2 = NULL;
    817
    818		swap(f1, fp->fi_fds[oflag]);
    819		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
    820			swap(f2, fp->fi_fds[O_RDWR]);
    821		spin_unlock(&fp->fi_lock);
    822		if (f1)
    823			nfsd_file_put(f1);
    824		if (f2)
    825			nfsd_file_put(f2);
    826	}
    827}
    828
    829static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
    830{
    831	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
    832
    833	if (access & NFS4_SHARE_ACCESS_WRITE)
    834		__nfs4_file_put_access(fp, O_WRONLY);
    835	if (access & NFS4_SHARE_ACCESS_READ)
    836		__nfs4_file_put_access(fp, O_RDONLY);
    837}
    838
    839/*
    840 * Allocate a new open/delegation state counter. This is needed for
    841 * pNFS for proper return on close semantics.
    842 *
    843 * Note that we only allocate it for pNFS-enabled exports, otherwise
    844 * all pointers to struct nfs4_clnt_odstate are always NULL.
    845 */
    846static struct nfs4_clnt_odstate *
    847alloc_clnt_odstate(struct nfs4_client *clp)
    848{
    849	struct nfs4_clnt_odstate *co;
    850
    851	co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
    852	if (co) {
    853		co->co_client = clp;
    854		refcount_set(&co->co_odcount, 1);
    855	}
    856	return co;
    857}
    858
    859static void
    860hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
    861{
    862	struct nfs4_file *fp = co->co_file;
    863
    864	lockdep_assert_held(&fp->fi_lock);
    865	list_add(&co->co_perfile, &fp->fi_clnt_odstate);
    866}
    867
    868static inline void
    869get_clnt_odstate(struct nfs4_clnt_odstate *co)
    870{
    871	if (co)
    872		refcount_inc(&co->co_odcount);
    873}
    874
    875static void
    876put_clnt_odstate(struct nfs4_clnt_odstate *co)
    877{
    878	struct nfs4_file *fp;
    879
    880	if (!co)
    881		return;
    882
    883	fp = co->co_file;
    884	if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
    885		list_del(&co->co_perfile);
    886		spin_unlock(&fp->fi_lock);
    887
    888		nfsd4_return_all_file_layouts(co->co_client, fp);
    889		kmem_cache_free(odstate_slab, co);
    890	}
    891}
    892
    893static struct nfs4_clnt_odstate *
    894find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
    895{
    896	struct nfs4_clnt_odstate *co;
    897	struct nfs4_client *cl;
    898
    899	if (!new)
    900		return NULL;
    901
    902	cl = new->co_client;
    903
    904	spin_lock(&fp->fi_lock);
    905	list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
    906		if (co->co_client == cl) {
    907			get_clnt_odstate(co);
    908			goto out;
    909		}
    910	}
    911	co = new;
    912	co->co_file = fp;
    913	hash_clnt_odstate_locked(new);
    914out:
    915	spin_unlock(&fp->fi_lock);
    916	return co;
    917}
    918
    919struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
    920				  void (*sc_free)(struct nfs4_stid *))
    921{
    922	struct nfs4_stid *stid;
    923	int new_id;
    924
    925	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
    926	if (!stid)
    927		return NULL;
    928
    929	idr_preload(GFP_KERNEL);
    930	spin_lock(&cl->cl_lock);
    931	/* Reserving 0 for start of file in nfsdfs "states" file: */
    932	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
    933	spin_unlock(&cl->cl_lock);
    934	idr_preload_end();
    935	if (new_id < 0)
    936		goto out_free;
    937
    938	stid->sc_free = sc_free;
    939	stid->sc_client = cl;
    940	stid->sc_stateid.si_opaque.so_id = new_id;
    941	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
    942	/* Will be incremented before return to client: */
    943	refcount_set(&stid->sc_count, 1);
    944	spin_lock_init(&stid->sc_lock);
    945	INIT_LIST_HEAD(&stid->sc_cp_list);
    946
    947	/*
    948	 * It shouldn't be a problem to reuse an opaque stateid value.
    949	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
    950	 * example, a stray write retransmission could be accepted by
    951	 * the server when it should have been rejected.  Therefore,
    952	 * adopt a trick from the sctp code to attempt to maximize the
    953	 * amount of time until an id is reused, by ensuring they always
    954	 * "increase" (mod INT_MAX):
    955	 */
    956	return stid;
    957out_free:
    958	kmem_cache_free(slab, stid);
    959	return NULL;
    960}
    961
    962/*
    963 * Create a unique stateid_t to represent each COPY.
    964 */
    965static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
    966			      unsigned char sc_type)
    967{
    968	int new_id;
    969
    970	stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
    971	stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
    972	stid->sc_type = sc_type;
    973
    974	idr_preload(GFP_KERNEL);
    975	spin_lock(&nn->s2s_cp_lock);
    976	new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
    977	stid->stid.si_opaque.so_id = new_id;
    978	stid->stid.si_generation = 1;
    979	spin_unlock(&nn->s2s_cp_lock);
    980	idr_preload_end();
    981	if (new_id < 0)
    982		return 0;
    983	return 1;
    984}
    985
    986int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
    987{
    988	return nfs4_init_cp_state(nn, &copy->cp_stateid, NFS4_COPY_STID);
    989}
    990
    991struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
    992						     struct nfs4_stid *p_stid)
    993{
    994	struct nfs4_cpntf_state *cps;
    995
    996	cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
    997	if (!cps)
    998		return NULL;
    999	cps->cpntf_time = ktime_get_boottime_seconds();
   1000	refcount_set(&cps->cp_stateid.sc_count, 1);
   1001	if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
   1002		goto out_free;
   1003	spin_lock(&nn->s2s_cp_lock);
   1004	list_add(&cps->cp_list, &p_stid->sc_cp_list);
   1005	spin_unlock(&nn->s2s_cp_lock);
   1006	return cps;
   1007out_free:
   1008	kfree(cps);
   1009	return NULL;
   1010}
   1011
   1012void nfs4_free_copy_state(struct nfsd4_copy *copy)
   1013{
   1014	struct nfsd_net *nn;
   1015
   1016	WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
   1017	nn = net_generic(copy->cp_clp->net, nfsd_net_id);
   1018	spin_lock(&nn->s2s_cp_lock);
   1019	idr_remove(&nn->s2s_cp_stateids,
   1020		   copy->cp_stateid.stid.si_opaque.so_id);
   1021	spin_unlock(&nn->s2s_cp_lock);
   1022}
   1023
   1024static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
   1025{
   1026	struct nfs4_cpntf_state *cps;
   1027	struct nfsd_net *nn;
   1028
   1029	nn = net_generic(net, nfsd_net_id);
   1030	spin_lock(&nn->s2s_cp_lock);
   1031	while (!list_empty(&stid->sc_cp_list)) {
   1032		cps = list_first_entry(&stid->sc_cp_list,
   1033				       struct nfs4_cpntf_state, cp_list);
   1034		_free_cpntf_state_locked(nn, cps);
   1035	}
   1036	spin_unlock(&nn->s2s_cp_lock);
   1037}
   1038
   1039static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
   1040{
   1041	struct nfs4_stid *stid;
   1042
   1043	stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
   1044	if (!stid)
   1045		return NULL;
   1046
   1047	return openlockstateid(stid);
   1048}
   1049
   1050static void nfs4_free_deleg(struct nfs4_stid *stid)
   1051{
   1052	kmem_cache_free(deleg_slab, stid);
   1053	atomic_long_dec(&num_delegations);
   1054}
   1055
   1056/*
   1057 * When we recall a delegation, we should be careful not to hand it
   1058 * out again straight away.
   1059 * To ensure this we keep a pair of bloom filters ('new' and 'old')
   1060 * in which the filehandles of recalled delegations are "stored".
   1061 * If a filehandle appear in either filter, a delegation is blocked.
   1062 * When a delegation is recalled, the filehandle is stored in the "new"
   1063 * filter.
   1064 * Every 30 seconds we swap the filters and clear the "new" one,
   1065 * unless both are empty of course.
   1066 *
   1067 * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
   1068 * low 3 bytes as hash-table indices.
   1069 *
   1070 * 'blocked_delegations_lock', which is always taken in block_delegations(),
   1071 * is used to manage concurrent access.  Testing does not need the lock
   1072 * except when swapping the two filters.
   1073 */
   1074static DEFINE_SPINLOCK(blocked_delegations_lock);
   1075static struct bloom_pair {
   1076	int	entries, old_entries;
   1077	time64_t swap_time;
   1078	int	new; /* index into 'set' */
   1079	DECLARE_BITMAP(set[2], 256);
   1080} blocked_delegations;
   1081
   1082static int delegation_blocked(struct knfsd_fh *fh)
   1083{
   1084	u32 hash;
   1085	struct bloom_pair *bd = &blocked_delegations;
   1086
   1087	if (bd->entries == 0)
   1088		return 0;
   1089	if (ktime_get_seconds() - bd->swap_time > 30) {
   1090		spin_lock(&blocked_delegations_lock);
   1091		if (ktime_get_seconds() - bd->swap_time > 30) {
   1092			bd->entries -= bd->old_entries;
   1093			bd->old_entries = bd->entries;
   1094			memset(bd->set[bd->new], 0,
   1095			       sizeof(bd->set[0]));
   1096			bd->new = 1-bd->new;
   1097			bd->swap_time = ktime_get_seconds();
   1098		}
   1099		spin_unlock(&blocked_delegations_lock);
   1100	}
   1101	hash = jhash(&fh->fh_raw, fh->fh_size, 0);
   1102	if (test_bit(hash&255, bd->set[0]) &&
   1103	    test_bit((hash>>8)&255, bd->set[0]) &&
   1104	    test_bit((hash>>16)&255, bd->set[0]))
   1105		return 1;
   1106
   1107	if (test_bit(hash&255, bd->set[1]) &&
   1108	    test_bit((hash>>8)&255, bd->set[1]) &&
   1109	    test_bit((hash>>16)&255, bd->set[1]))
   1110		return 1;
   1111
   1112	return 0;
   1113}
   1114
   1115static void block_delegations(struct knfsd_fh *fh)
   1116{
   1117	u32 hash;
   1118	struct bloom_pair *bd = &blocked_delegations;
   1119
   1120	hash = jhash(&fh->fh_raw, fh->fh_size, 0);
   1121
   1122	spin_lock(&blocked_delegations_lock);
   1123	__set_bit(hash&255, bd->set[bd->new]);
   1124	__set_bit((hash>>8)&255, bd->set[bd->new]);
   1125	__set_bit((hash>>16)&255, bd->set[bd->new]);
   1126	if (bd->entries == 0)
   1127		bd->swap_time = ktime_get_seconds();
   1128	bd->entries += 1;
   1129	spin_unlock(&blocked_delegations_lock);
   1130}
   1131
   1132static struct nfs4_delegation *
   1133alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
   1134		 struct svc_fh *current_fh,
   1135		 struct nfs4_clnt_odstate *odstate)
   1136{
   1137	struct nfs4_delegation *dp;
   1138	long n;
   1139
   1140	dprintk("NFSD alloc_init_deleg\n");
   1141	n = atomic_long_inc_return(&num_delegations);
   1142	if (n < 0 || n > max_delegations)
   1143		goto out_dec;
   1144	if (delegation_blocked(&current_fh->fh_handle))
   1145		goto out_dec;
   1146	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
   1147	if (dp == NULL)
   1148		goto out_dec;
   1149
   1150	/*
   1151	 * delegation seqid's are never incremented.  The 4.1 special
   1152	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
   1153	 * 0 anyway just for consistency and use 1:
   1154	 */
   1155	dp->dl_stid.sc_stateid.si_generation = 1;
   1156	INIT_LIST_HEAD(&dp->dl_perfile);
   1157	INIT_LIST_HEAD(&dp->dl_perclnt);
   1158	INIT_LIST_HEAD(&dp->dl_recall_lru);
   1159	dp->dl_clnt_odstate = odstate;
   1160	get_clnt_odstate(odstate);
   1161	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
   1162	dp->dl_retries = 1;
   1163	dp->dl_recalled = false;
   1164	nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
   1165		      &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
   1166	get_nfs4_file(fp);
   1167	dp->dl_stid.sc_file = fp;
   1168	return dp;
   1169out_dec:
   1170	atomic_long_dec(&num_delegations);
   1171	return NULL;
   1172}
   1173
   1174void
   1175nfs4_put_stid(struct nfs4_stid *s)
   1176{
   1177	struct nfs4_file *fp = s->sc_file;
   1178	struct nfs4_client *clp = s->sc_client;
   1179
   1180	might_lock(&clp->cl_lock);
   1181
   1182	if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
   1183		wake_up_all(&close_wq);
   1184		return;
   1185	}
   1186	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
   1187	nfs4_free_cpntf_statelist(clp->net, s);
   1188	spin_unlock(&clp->cl_lock);
   1189	s->sc_free(s);
   1190	if (fp)
   1191		put_nfs4_file(fp);
   1192}
   1193
   1194void
   1195nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
   1196{
   1197	stateid_t *src = &stid->sc_stateid;
   1198
   1199	spin_lock(&stid->sc_lock);
   1200	if (unlikely(++src->si_generation == 0))
   1201		src->si_generation = 1;
   1202	memcpy(dst, src, sizeof(*dst));
   1203	spin_unlock(&stid->sc_lock);
   1204}
   1205
   1206static void put_deleg_file(struct nfs4_file *fp)
   1207{
   1208	struct nfsd_file *nf = NULL;
   1209
   1210	spin_lock(&fp->fi_lock);
   1211	if (--fp->fi_delegees == 0)
   1212		swap(nf, fp->fi_deleg_file);
   1213	spin_unlock(&fp->fi_lock);
   1214
   1215	if (nf)
   1216		nfsd_file_put(nf);
   1217}
   1218
   1219static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
   1220{
   1221	struct nfs4_file *fp = dp->dl_stid.sc_file;
   1222	struct nfsd_file *nf = fp->fi_deleg_file;
   1223
   1224	WARN_ON_ONCE(!fp->fi_delegees);
   1225
   1226	vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
   1227	put_deleg_file(fp);
   1228}
   1229
   1230static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
   1231{
   1232	put_clnt_odstate(dp->dl_clnt_odstate);
   1233	nfs4_unlock_deleg_lease(dp);
   1234	nfs4_put_stid(&dp->dl_stid);
   1235}
   1236
   1237void nfs4_unhash_stid(struct nfs4_stid *s)
   1238{
   1239	s->sc_type = 0;
   1240}
   1241
   1242/**
   1243 * nfs4_delegation_exists - Discover if this delegation already exists
   1244 * @clp:     a pointer to the nfs4_client we're granting a delegation to
   1245 * @fp:      a pointer to the nfs4_file we're granting a delegation on
   1246 *
   1247 * Return:
   1248 *      On success: true iff an existing delegation is found
   1249 */
   1250
   1251static bool
   1252nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
   1253{
   1254	struct nfs4_delegation *searchdp = NULL;
   1255	struct nfs4_client *searchclp = NULL;
   1256
   1257	lockdep_assert_held(&state_lock);
   1258	lockdep_assert_held(&fp->fi_lock);
   1259
   1260	list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
   1261		searchclp = searchdp->dl_stid.sc_client;
   1262		if (clp == searchclp) {
   1263			return true;
   1264		}
   1265	}
   1266	return false;
   1267}
   1268
   1269/**
   1270 * hash_delegation_locked - Add a delegation to the appropriate lists
   1271 * @dp:     a pointer to the nfs4_delegation we are adding.
   1272 * @fp:     a pointer to the nfs4_file we're granting a delegation on
   1273 *
   1274 * Return:
   1275 *      On success: NULL if the delegation was successfully hashed.
   1276 *
   1277 *      On error: -EAGAIN if one was previously granted to this
   1278 *                 nfs4_client for this nfs4_file. Delegation is not hashed.
   1279 *
   1280 */
   1281
   1282static int
   1283hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
   1284{
   1285	struct nfs4_client *clp = dp->dl_stid.sc_client;
   1286
   1287	lockdep_assert_held(&state_lock);
   1288	lockdep_assert_held(&fp->fi_lock);
   1289
   1290	if (nfs4_delegation_exists(clp, fp))
   1291		return -EAGAIN;
   1292	refcount_inc(&dp->dl_stid.sc_count);
   1293	dp->dl_stid.sc_type = NFS4_DELEG_STID;
   1294	list_add(&dp->dl_perfile, &fp->fi_delegations);
   1295	list_add(&dp->dl_perclnt, &clp->cl_delegations);
   1296	return 0;
   1297}
   1298
   1299static bool delegation_hashed(struct nfs4_delegation *dp)
   1300{
   1301	return !(list_empty(&dp->dl_perfile));
   1302}
   1303
   1304static bool
   1305unhash_delegation_locked(struct nfs4_delegation *dp)
   1306{
   1307	struct nfs4_file *fp = dp->dl_stid.sc_file;
   1308
   1309	lockdep_assert_held(&state_lock);
   1310
   1311	if (!delegation_hashed(dp))
   1312		return false;
   1313
   1314	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
   1315	/* Ensure that deleg break won't try to requeue it */
   1316	++dp->dl_time;
   1317	spin_lock(&fp->fi_lock);
   1318	list_del_init(&dp->dl_perclnt);
   1319	list_del_init(&dp->dl_recall_lru);
   1320	list_del_init(&dp->dl_perfile);
   1321	spin_unlock(&fp->fi_lock);
   1322	return true;
   1323}
   1324
   1325static void destroy_delegation(struct nfs4_delegation *dp)
   1326{
   1327	bool unhashed;
   1328
   1329	spin_lock(&state_lock);
   1330	unhashed = unhash_delegation_locked(dp);
   1331	spin_unlock(&state_lock);
   1332	if (unhashed)
   1333		destroy_unhashed_deleg(dp);
   1334}
   1335
   1336static void revoke_delegation(struct nfs4_delegation *dp)
   1337{
   1338	struct nfs4_client *clp = dp->dl_stid.sc_client;
   1339
   1340	WARN_ON(!list_empty(&dp->dl_recall_lru));
   1341
   1342	if (clp->cl_minorversion) {
   1343		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
   1344		refcount_inc(&dp->dl_stid.sc_count);
   1345		spin_lock(&clp->cl_lock);
   1346		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
   1347		spin_unlock(&clp->cl_lock);
   1348	}
   1349	destroy_unhashed_deleg(dp);
   1350}
   1351
   1352/* 
   1353 * SETCLIENTID state 
   1354 */
   1355
   1356static unsigned int clientid_hashval(u32 id)
   1357{
   1358	return id & CLIENT_HASH_MASK;
   1359}
   1360
   1361static unsigned int clientstr_hashval(struct xdr_netobj name)
   1362{
   1363	return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
   1364}
   1365
   1366/*
   1367 * A stateid that had a deny mode associated with it is being released
   1368 * or downgraded. Recalculate the deny mode on the file.
   1369 */
   1370static void
   1371recalculate_deny_mode(struct nfs4_file *fp)
   1372{
   1373	struct nfs4_ol_stateid *stp;
   1374
   1375	spin_lock(&fp->fi_lock);
   1376	fp->fi_share_deny = 0;
   1377	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
   1378		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
   1379	spin_unlock(&fp->fi_lock);
   1380}
   1381
   1382static void
   1383reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
   1384{
   1385	int i;
   1386	bool change = false;
   1387
   1388	for (i = 1; i < 4; i++) {
   1389		if ((i & deny) != i) {
   1390			change = true;
   1391			clear_deny(i, stp);
   1392		}
   1393	}
   1394
   1395	/* Recalculate per-file deny mode if there was a change */
   1396	if (change)
   1397		recalculate_deny_mode(stp->st_stid.sc_file);
   1398}
   1399
   1400/* release all access and file references for a given stateid */
   1401static void
   1402release_all_access(struct nfs4_ol_stateid *stp)
   1403{
   1404	int i;
   1405	struct nfs4_file *fp = stp->st_stid.sc_file;
   1406
   1407	if (fp && stp->st_deny_bmap != 0)
   1408		recalculate_deny_mode(fp);
   1409
   1410	for (i = 1; i < 4; i++) {
   1411		if (test_access(i, stp))
   1412			nfs4_file_put_access(stp->st_stid.sc_file, i);
   1413		clear_access(i, stp);
   1414	}
   1415}
   1416
   1417static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
   1418{
   1419	kfree(sop->so_owner.data);
   1420	sop->so_ops->so_free(sop);
   1421}
   1422
   1423static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
   1424{
   1425	struct nfs4_client *clp = sop->so_client;
   1426
   1427	might_lock(&clp->cl_lock);
   1428
   1429	if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
   1430		return;
   1431	sop->so_ops->so_unhash(sop);
   1432	spin_unlock(&clp->cl_lock);
   1433	nfs4_free_stateowner(sop);
   1434}
   1435
   1436static bool
   1437nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
   1438{
   1439	return list_empty(&stp->st_perfile);
   1440}
   1441
   1442static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
   1443{
   1444	struct nfs4_file *fp = stp->st_stid.sc_file;
   1445
   1446	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
   1447
   1448	if (list_empty(&stp->st_perfile))
   1449		return false;
   1450
   1451	spin_lock(&fp->fi_lock);
   1452	list_del_init(&stp->st_perfile);
   1453	spin_unlock(&fp->fi_lock);
   1454	list_del(&stp->st_perstateowner);
   1455	return true;
   1456}
   1457
   1458static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
   1459{
   1460	struct nfs4_ol_stateid *stp = openlockstateid(stid);
   1461
   1462	put_clnt_odstate(stp->st_clnt_odstate);
   1463	release_all_access(stp);
   1464	if (stp->st_stateowner)
   1465		nfs4_put_stateowner(stp->st_stateowner);
   1466	kmem_cache_free(stateid_slab, stid);
   1467}
   1468
   1469static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
   1470{
   1471	struct nfs4_ol_stateid *stp = openlockstateid(stid);
   1472	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
   1473	struct nfsd_file *nf;
   1474
   1475	nf = find_any_file(stp->st_stid.sc_file);
   1476	if (nf) {
   1477		get_file(nf->nf_file);
   1478		filp_close(nf->nf_file, (fl_owner_t)lo);
   1479		nfsd_file_put(nf);
   1480	}
   1481	nfs4_free_ol_stateid(stid);
   1482}
   1483
   1484/*
   1485 * Put the persistent reference to an already unhashed generic stateid, while
   1486 * holding the cl_lock. If it's the last reference, then put it onto the
   1487 * reaplist for later destruction.
   1488 */
   1489static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
   1490				       struct list_head *reaplist)
   1491{
   1492	struct nfs4_stid *s = &stp->st_stid;
   1493	struct nfs4_client *clp = s->sc_client;
   1494
   1495	lockdep_assert_held(&clp->cl_lock);
   1496
   1497	WARN_ON_ONCE(!list_empty(&stp->st_locks));
   1498
   1499	if (!refcount_dec_and_test(&s->sc_count)) {
   1500		wake_up_all(&close_wq);
   1501		return;
   1502	}
   1503
   1504	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
   1505	list_add(&stp->st_locks, reaplist);
   1506}
   1507
   1508static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
   1509{
   1510	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
   1511
   1512	if (!unhash_ol_stateid(stp))
   1513		return false;
   1514	list_del_init(&stp->st_locks);
   1515	nfs4_unhash_stid(&stp->st_stid);
   1516	return true;
   1517}
   1518
   1519static void release_lock_stateid(struct nfs4_ol_stateid *stp)
   1520{
   1521	struct nfs4_client *clp = stp->st_stid.sc_client;
   1522	bool unhashed;
   1523
   1524	spin_lock(&clp->cl_lock);
   1525	unhashed = unhash_lock_stateid(stp);
   1526	spin_unlock(&clp->cl_lock);
   1527	if (unhashed)
   1528		nfs4_put_stid(&stp->st_stid);
   1529}
   1530
   1531static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
   1532{
   1533	struct nfs4_client *clp = lo->lo_owner.so_client;
   1534
   1535	lockdep_assert_held(&clp->cl_lock);
   1536
   1537	list_del_init(&lo->lo_owner.so_strhash);
   1538}
   1539
   1540/*
   1541 * Free a list of generic stateids that were collected earlier after being
   1542 * fully unhashed.
   1543 */
   1544static void
   1545free_ol_stateid_reaplist(struct list_head *reaplist)
   1546{
   1547	struct nfs4_ol_stateid *stp;
   1548	struct nfs4_file *fp;
   1549
   1550	might_sleep();
   1551
   1552	while (!list_empty(reaplist)) {
   1553		stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
   1554				       st_locks);
   1555		list_del(&stp->st_locks);
   1556		fp = stp->st_stid.sc_file;
   1557		stp->st_stid.sc_free(&stp->st_stid);
   1558		if (fp)
   1559			put_nfs4_file(fp);
   1560	}
   1561}
   1562
   1563static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
   1564				       struct list_head *reaplist)
   1565{
   1566	struct nfs4_ol_stateid *stp;
   1567
   1568	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
   1569
   1570	while (!list_empty(&open_stp->st_locks)) {
   1571		stp = list_entry(open_stp->st_locks.next,
   1572				struct nfs4_ol_stateid, st_locks);
   1573		WARN_ON(!unhash_lock_stateid(stp));
   1574		put_ol_stateid_locked(stp, reaplist);
   1575	}
   1576}
   1577
   1578static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
   1579				struct list_head *reaplist)
   1580{
   1581	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
   1582
   1583	if (!unhash_ol_stateid(stp))
   1584		return false;
   1585	release_open_stateid_locks(stp, reaplist);
   1586	return true;
   1587}
   1588
   1589static void release_open_stateid(struct nfs4_ol_stateid *stp)
   1590{
   1591	LIST_HEAD(reaplist);
   1592
   1593	spin_lock(&stp->st_stid.sc_client->cl_lock);
   1594	if (unhash_open_stateid(stp, &reaplist))
   1595		put_ol_stateid_locked(stp, &reaplist);
   1596	spin_unlock(&stp->st_stid.sc_client->cl_lock);
   1597	free_ol_stateid_reaplist(&reaplist);
   1598}
   1599
   1600static void unhash_openowner_locked(struct nfs4_openowner *oo)
   1601{
   1602	struct nfs4_client *clp = oo->oo_owner.so_client;
   1603
   1604	lockdep_assert_held(&clp->cl_lock);
   1605
   1606	list_del_init(&oo->oo_owner.so_strhash);
   1607	list_del_init(&oo->oo_perclient);
   1608}
   1609
   1610static void release_last_closed_stateid(struct nfs4_openowner *oo)
   1611{
   1612	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
   1613					  nfsd_net_id);
   1614	struct nfs4_ol_stateid *s;
   1615
   1616	spin_lock(&nn->client_lock);
   1617	s = oo->oo_last_closed_stid;
   1618	if (s) {
   1619		list_del_init(&oo->oo_close_lru);
   1620		oo->oo_last_closed_stid = NULL;
   1621	}
   1622	spin_unlock(&nn->client_lock);
   1623	if (s)
   1624		nfs4_put_stid(&s->st_stid);
   1625}
   1626
   1627static void release_openowner(struct nfs4_openowner *oo)
   1628{
   1629	struct nfs4_ol_stateid *stp;
   1630	struct nfs4_client *clp = oo->oo_owner.so_client;
   1631	struct list_head reaplist;
   1632
   1633	INIT_LIST_HEAD(&reaplist);
   1634
   1635	spin_lock(&clp->cl_lock);
   1636	unhash_openowner_locked(oo);
   1637	while (!list_empty(&oo->oo_owner.so_stateids)) {
   1638		stp = list_first_entry(&oo->oo_owner.so_stateids,
   1639				struct nfs4_ol_stateid, st_perstateowner);
   1640		if (unhash_open_stateid(stp, &reaplist))
   1641			put_ol_stateid_locked(stp, &reaplist);
   1642	}
   1643	spin_unlock(&clp->cl_lock);
   1644	free_ol_stateid_reaplist(&reaplist);
   1645	release_last_closed_stateid(oo);
   1646	nfs4_put_stateowner(&oo->oo_owner);
   1647}
   1648
   1649static inline int
   1650hash_sessionid(struct nfs4_sessionid *sessionid)
   1651{
   1652	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
   1653
   1654	return sid->sequence % SESSION_HASH_SIZE;
   1655}
   1656
   1657#ifdef CONFIG_SUNRPC_DEBUG
   1658static inline void
   1659dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
   1660{
   1661	u32 *ptr = (u32 *)(&sessionid->data[0]);
   1662	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
   1663}
   1664#else
   1665static inline void
   1666dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
   1667{
   1668}
   1669#endif
   1670
   1671/*
   1672 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
   1673 * won't be used for replay.
   1674 */
   1675void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
   1676{
   1677	struct nfs4_stateowner *so = cstate->replay_owner;
   1678
   1679	if (nfserr == nfserr_replay_me)
   1680		return;
   1681
   1682	if (!seqid_mutating_err(ntohl(nfserr))) {
   1683		nfsd4_cstate_clear_replay(cstate);
   1684		return;
   1685	}
   1686	if (!so)
   1687		return;
   1688	if (so->so_is_open_owner)
   1689		release_last_closed_stateid(openowner(so));
   1690	so->so_seqid++;
   1691	return;
   1692}
   1693
   1694static void
   1695gen_sessionid(struct nfsd4_session *ses)
   1696{
   1697	struct nfs4_client *clp = ses->se_client;
   1698	struct nfsd4_sessionid *sid;
   1699
   1700	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
   1701	sid->clientid = clp->cl_clientid;
   1702	sid->sequence = current_sessionid++;
   1703	sid->reserved = 0;
   1704}
   1705
   1706/*
   1707 * The protocol defines ca_maxresponssize_cached to include the size of
   1708 * the rpc header, but all we need to cache is the data starting after
   1709 * the end of the initial SEQUENCE operation--the rest we regenerate
   1710 * each time.  Therefore we can advertise a ca_maxresponssize_cached
   1711 * value that is the number of bytes in our cache plus a few additional
   1712 * bytes.  In order to stay on the safe side, and not promise more than
   1713 * we can cache, those additional bytes must be the minimum possible: 24
   1714 * bytes of rpc header (xid through accept state, with AUTH_NULL
   1715 * verifier), 12 for the compound header (with zero-length tag), and 44
   1716 * for the SEQUENCE op response:
   1717 */
   1718#define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
   1719
   1720static void
   1721free_session_slots(struct nfsd4_session *ses)
   1722{
   1723	int i;
   1724
   1725	for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
   1726		free_svc_cred(&ses->se_slots[i]->sl_cred);
   1727		kfree(ses->se_slots[i]);
   1728	}
   1729}
   1730
   1731/*
   1732 * We don't actually need to cache the rpc and session headers, so we
   1733 * can allocate a little less for each slot:
   1734 */
   1735static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
   1736{
   1737	u32 size;
   1738
   1739	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
   1740		size = 0;
   1741	else
   1742		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
   1743	return size + sizeof(struct nfsd4_slot);
   1744}
   1745
   1746/*
   1747 * XXX: If we run out of reserved DRC memory we could (up to a point)
   1748 * re-negotiate active sessions and reduce their slot usage to make
   1749 * room for new connections. For now we just fail the create session.
   1750 */
   1751static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
   1752{
   1753	u32 slotsize = slot_bytes(ca);
   1754	u32 num = ca->maxreqs;
   1755	unsigned long avail, total_avail;
   1756	unsigned int scale_factor;
   1757
   1758	spin_lock(&nfsd_drc_lock);
   1759	if (nfsd_drc_max_mem > nfsd_drc_mem_used)
   1760		total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
   1761	else
   1762		/* We have handed out more space than we chose in
   1763		 * set_max_drc() to allow.  That isn't really a
   1764		 * problem as long as that doesn't make us think we
   1765		 * have lots more due to integer overflow.
   1766		 */
   1767		total_avail = 0;
   1768	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
   1769	/*
   1770	 * Never use more than a fraction of the remaining memory,
   1771	 * unless it's the only way to give this client a slot.
   1772	 * The chosen fraction is either 1/8 or 1/number of threads,
   1773	 * whichever is smaller.  This ensures there are adequate
   1774	 * slots to support multiple clients per thread.
   1775	 * Give the client one slot even if that would require
   1776	 * over-allocation--it is better than failure.
   1777	 */
   1778	scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
   1779
   1780	avail = clamp_t(unsigned long, avail, slotsize,
   1781			total_avail/scale_factor);
   1782	num = min_t(int, num, avail / slotsize);
   1783	num = max_t(int, num, 1);
   1784	nfsd_drc_mem_used += num * slotsize;
   1785	spin_unlock(&nfsd_drc_lock);
   1786
   1787	return num;
   1788}
   1789
   1790static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
   1791{
   1792	int slotsize = slot_bytes(ca);
   1793
   1794	spin_lock(&nfsd_drc_lock);
   1795	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
   1796	spin_unlock(&nfsd_drc_lock);
   1797}
   1798
   1799static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
   1800					   struct nfsd4_channel_attrs *battrs)
   1801{
   1802	int numslots = fattrs->maxreqs;
   1803	int slotsize = slot_bytes(fattrs);
   1804	struct nfsd4_session *new;
   1805	int mem, i;
   1806
   1807	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
   1808			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
   1809	mem = numslots * sizeof(struct nfsd4_slot *);
   1810
   1811	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
   1812	if (!new)
   1813		return NULL;
   1814	/* allocate each struct nfsd4_slot and data cache in one piece */
   1815	for (i = 0; i < numslots; i++) {
   1816		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
   1817		if (!new->se_slots[i])
   1818			goto out_free;
   1819	}
   1820
   1821	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
   1822	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
   1823
   1824	return new;
   1825out_free:
   1826	while (i--)
   1827		kfree(new->se_slots[i]);
   1828	kfree(new);
   1829	return NULL;
   1830}
   1831
   1832static void free_conn(struct nfsd4_conn *c)
   1833{
   1834	svc_xprt_put(c->cn_xprt);
   1835	kfree(c);
   1836}
   1837
   1838static void nfsd4_conn_lost(struct svc_xpt_user *u)
   1839{
   1840	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
   1841	struct nfs4_client *clp = c->cn_session->se_client;
   1842
   1843	trace_nfsd_cb_lost(clp);
   1844
   1845	spin_lock(&clp->cl_lock);
   1846	if (!list_empty(&c->cn_persession)) {
   1847		list_del(&c->cn_persession);
   1848		free_conn(c);
   1849	}
   1850	nfsd4_probe_callback(clp);
   1851	spin_unlock(&clp->cl_lock);
   1852}
   1853
   1854static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
   1855{
   1856	struct nfsd4_conn *conn;
   1857
   1858	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
   1859	if (!conn)
   1860		return NULL;
   1861	svc_xprt_get(rqstp->rq_xprt);
   1862	conn->cn_xprt = rqstp->rq_xprt;
   1863	conn->cn_flags = flags;
   1864	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
   1865	return conn;
   1866}
   1867
   1868static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
   1869{
   1870	conn->cn_session = ses;
   1871	list_add(&conn->cn_persession, &ses->se_conns);
   1872}
   1873
   1874static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
   1875{
   1876	struct nfs4_client *clp = ses->se_client;
   1877
   1878	spin_lock(&clp->cl_lock);
   1879	__nfsd4_hash_conn(conn, ses);
   1880	spin_unlock(&clp->cl_lock);
   1881}
   1882
   1883static int nfsd4_register_conn(struct nfsd4_conn *conn)
   1884{
   1885	conn->cn_xpt_user.callback = nfsd4_conn_lost;
   1886	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
   1887}
   1888
   1889static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
   1890{
   1891	int ret;
   1892
   1893	nfsd4_hash_conn(conn, ses);
   1894	ret = nfsd4_register_conn(conn);
   1895	if (ret)
   1896		/* oops; xprt is already down: */
   1897		nfsd4_conn_lost(&conn->cn_xpt_user);
   1898	/* We may have gained or lost a callback channel: */
   1899	nfsd4_probe_callback_sync(ses->se_client);
   1900}
   1901
   1902static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
   1903{
   1904	u32 dir = NFS4_CDFC4_FORE;
   1905
   1906	if (cses->flags & SESSION4_BACK_CHAN)
   1907		dir |= NFS4_CDFC4_BACK;
   1908	return alloc_conn(rqstp, dir);
   1909}
   1910
   1911/* must be called under client_lock */
   1912static void nfsd4_del_conns(struct nfsd4_session *s)
   1913{
   1914	struct nfs4_client *clp = s->se_client;
   1915	struct nfsd4_conn *c;
   1916
   1917	spin_lock(&clp->cl_lock);
   1918	while (!list_empty(&s->se_conns)) {
   1919		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
   1920		list_del_init(&c->cn_persession);
   1921		spin_unlock(&clp->cl_lock);
   1922
   1923		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
   1924		free_conn(c);
   1925
   1926		spin_lock(&clp->cl_lock);
   1927	}
   1928	spin_unlock(&clp->cl_lock);
   1929}
   1930
   1931static void __free_session(struct nfsd4_session *ses)
   1932{
   1933	free_session_slots(ses);
   1934	kfree(ses);
   1935}
   1936
   1937static void free_session(struct nfsd4_session *ses)
   1938{
   1939	nfsd4_del_conns(ses);
   1940	nfsd4_put_drc_mem(&ses->se_fchannel);
   1941	__free_session(ses);
   1942}
   1943
   1944static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
   1945{
   1946	int idx;
   1947	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   1948
   1949	new->se_client = clp;
   1950	gen_sessionid(new);
   1951
   1952	INIT_LIST_HEAD(&new->se_conns);
   1953
   1954	new->se_cb_seq_nr = 1;
   1955	new->se_flags = cses->flags;
   1956	new->se_cb_prog = cses->callback_prog;
   1957	new->se_cb_sec = cses->cb_sec;
   1958	atomic_set(&new->se_ref, 0);
   1959	idx = hash_sessionid(&new->se_sessionid);
   1960	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
   1961	spin_lock(&clp->cl_lock);
   1962	list_add(&new->se_perclnt, &clp->cl_sessions);
   1963	spin_unlock(&clp->cl_lock);
   1964
   1965	{
   1966		struct sockaddr *sa = svc_addr(rqstp);
   1967		/*
   1968		 * This is a little silly; with sessions there's no real
   1969		 * use for the callback address.  Use the peer address
   1970		 * as a reasonable default for now, but consider fixing
   1971		 * the rpc client not to require an address in the
   1972		 * future:
   1973		 */
   1974		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
   1975		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
   1976	}
   1977}
   1978
   1979/* caller must hold client_lock */
   1980static struct nfsd4_session *
   1981__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
   1982{
   1983	struct nfsd4_session *elem;
   1984	int idx;
   1985	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   1986
   1987	lockdep_assert_held(&nn->client_lock);
   1988
   1989	dump_sessionid(__func__, sessionid);
   1990	idx = hash_sessionid(sessionid);
   1991	/* Search in the appropriate list */
   1992	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
   1993		if (!memcmp(elem->se_sessionid.data, sessionid->data,
   1994			    NFS4_MAX_SESSIONID_LEN)) {
   1995			return elem;
   1996		}
   1997	}
   1998
   1999	dprintk("%s: session not found\n", __func__);
   2000	return NULL;
   2001}
   2002
   2003static struct nfsd4_session *
   2004find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
   2005		__be32 *ret)
   2006{
   2007	struct nfsd4_session *session;
   2008	__be32 status = nfserr_badsession;
   2009
   2010	session = __find_in_sessionid_hashtbl(sessionid, net);
   2011	if (!session)
   2012		goto out;
   2013	status = nfsd4_get_session_locked(session);
   2014	if (status)
   2015		session = NULL;
   2016out:
   2017	*ret = status;
   2018	return session;
   2019}
   2020
   2021/* caller must hold client_lock */
   2022static void
   2023unhash_session(struct nfsd4_session *ses)
   2024{
   2025	struct nfs4_client *clp = ses->se_client;
   2026	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
   2027
   2028	lockdep_assert_held(&nn->client_lock);
   2029
   2030	list_del(&ses->se_hash);
   2031	spin_lock(&ses->se_client->cl_lock);
   2032	list_del(&ses->se_perclnt);
   2033	spin_unlock(&ses->se_client->cl_lock);
   2034}
   2035
   2036/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
   2037static int
   2038STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
   2039{
   2040	/*
   2041	 * We're assuming the clid was not given out from a boot
   2042	 * precisely 2^32 (about 136 years) before this one.  That seems
   2043	 * a safe assumption:
   2044	 */
   2045	if (clid->cl_boot == (u32)nn->boot_time)
   2046		return 0;
   2047	trace_nfsd_clid_stale(clid);
   2048	return 1;
   2049}
   2050
   2051/* 
   2052 * XXX Should we use a slab cache ?
   2053 * This type of memory management is somewhat inefficient, but we use it
   2054 * anyway since SETCLIENTID is not a common operation.
   2055 */
   2056static struct nfs4_client *alloc_client(struct xdr_netobj name)
   2057{
   2058	struct nfs4_client *clp;
   2059	int i;
   2060
   2061	clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
   2062	if (clp == NULL)
   2063		return NULL;
   2064	xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
   2065	if (clp->cl_name.data == NULL)
   2066		goto err_no_name;
   2067	clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
   2068						 sizeof(struct list_head),
   2069						 GFP_KERNEL);
   2070	if (!clp->cl_ownerstr_hashtbl)
   2071		goto err_no_hashtbl;
   2072	for (i = 0; i < OWNER_HASH_SIZE; i++)
   2073		INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
   2074	INIT_LIST_HEAD(&clp->cl_sessions);
   2075	idr_init(&clp->cl_stateids);
   2076	atomic_set(&clp->cl_rpc_users, 0);
   2077	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
   2078	clp->cl_state = NFSD4_ACTIVE;
   2079	atomic_set(&clp->cl_delegs_in_recall, 0);
   2080	INIT_LIST_HEAD(&clp->cl_idhash);
   2081	INIT_LIST_HEAD(&clp->cl_openowners);
   2082	INIT_LIST_HEAD(&clp->cl_delegations);
   2083	INIT_LIST_HEAD(&clp->cl_lru);
   2084	INIT_LIST_HEAD(&clp->cl_revoked);
   2085#ifdef CONFIG_NFSD_PNFS
   2086	INIT_LIST_HEAD(&clp->cl_lo_states);
   2087#endif
   2088	INIT_LIST_HEAD(&clp->async_copies);
   2089	spin_lock_init(&clp->async_lock);
   2090	spin_lock_init(&clp->cl_lock);
   2091	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
   2092	return clp;
   2093err_no_hashtbl:
   2094	kfree(clp->cl_name.data);
   2095err_no_name:
   2096	kmem_cache_free(client_slab, clp);
   2097	return NULL;
   2098}
   2099
   2100static void __free_client(struct kref *k)
   2101{
   2102	struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
   2103	struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
   2104
   2105	free_svc_cred(&clp->cl_cred);
   2106	kfree(clp->cl_ownerstr_hashtbl);
   2107	kfree(clp->cl_name.data);
   2108	kfree(clp->cl_nii_domain.data);
   2109	kfree(clp->cl_nii_name.data);
   2110	idr_destroy(&clp->cl_stateids);
   2111	kmem_cache_free(client_slab, clp);
   2112}
   2113
   2114static void drop_client(struct nfs4_client *clp)
   2115{
   2116	kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
   2117}
   2118
   2119static void
   2120free_client(struct nfs4_client *clp)
   2121{
   2122	while (!list_empty(&clp->cl_sessions)) {
   2123		struct nfsd4_session *ses;
   2124		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
   2125				se_perclnt);
   2126		list_del(&ses->se_perclnt);
   2127		WARN_ON_ONCE(atomic_read(&ses->se_ref));
   2128		free_session(ses);
   2129	}
   2130	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
   2131	if (clp->cl_nfsd_dentry) {
   2132		nfsd_client_rmdir(clp->cl_nfsd_dentry);
   2133		clp->cl_nfsd_dentry = NULL;
   2134		wake_up_all(&expiry_wq);
   2135	}
   2136	drop_client(clp);
   2137}
   2138
   2139/* must be called under the client_lock */
   2140static void
   2141unhash_client_locked(struct nfs4_client *clp)
   2142{
   2143	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
   2144	struct nfsd4_session *ses;
   2145
   2146	lockdep_assert_held(&nn->client_lock);
   2147
   2148	/* Mark the client as expired! */
   2149	clp->cl_time = 0;
   2150	/* Make it invisible */
   2151	if (!list_empty(&clp->cl_idhash)) {
   2152		list_del_init(&clp->cl_idhash);
   2153		if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
   2154			rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
   2155		else
   2156			rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
   2157	}
   2158	list_del_init(&clp->cl_lru);
   2159	spin_lock(&clp->cl_lock);
   2160	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
   2161		list_del_init(&ses->se_hash);
   2162	spin_unlock(&clp->cl_lock);
   2163}
   2164
   2165static void
   2166unhash_client(struct nfs4_client *clp)
   2167{
   2168	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
   2169
   2170	spin_lock(&nn->client_lock);
   2171	unhash_client_locked(clp);
   2172	spin_unlock(&nn->client_lock);
   2173}
   2174
   2175static __be32 mark_client_expired_locked(struct nfs4_client *clp)
   2176{
   2177	if (atomic_read(&clp->cl_rpc_users))
   2178		return nfserr_jukebox;
   2179	unhash_client_locked(clp);
   2180	return nfs_ok;
   2181}
   2182
   2183static void
   2184__destroy_client(struct nfs4_client *clp)
   2185{
   2186	int i;
   2187	struct nfs4_openowner *oo;
   2188	struct nfs4_delegation *dp;
   2189	struct list_head reaplist;
   2190
   2191	INIT_LIST_HEAD(&reaplist);
   2192	spin_lock(&state_lock);
   2193	while (!list_empty(&clp->cl_delegations)) {
   2194		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
   2195		WARN_ON(!unhash_delegation_locked(dp));
   2196		list_add(&dp->dl_recall_lru, &reaplist);
   2197	}
   2198	spin_unlock(&state_lock);
   2199	while (!list_empty(&reaplist)) {
   2200		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
   2201		list_del_init(&dp->dl_recall_lru);
   2202		destroy_unhashed_deleg(dp);
   2203	}
   2204	while (!list_empty(&clp->cl_revoked)) {
   2205		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
   2206		list_del_init(&dp->dl_recall_lru);
   2207		nfs4_put_stid(&dp->dl_stid);
   2208	}
   2209	while (!list_empty(&clp->cl_openowners)) {
   2210		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
   2211		nfs4_get_stateowner(&oo->oo_owner);
   2212		release_openowner(oo);
   2213	}
   2214	for (i = 0; i < OWNER_HASH_SIZE; i++) {
   2215		struct nfs4_stateowner *so, *tmp;
   2216
   2217		list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
   2218					 so_strhash) {
   2219			/* Should be no openowners at this point */
   2220			WARN_ON_ONCE(so->so_is_open_owner);
   2221			remove_blocked_locks(lockowner(so));
   2222		}
   2223	}
   2224	nfsd4_return_all_client_layouts(clp);
   2225	nfsd4_shutdown_copy(clp);
   2226	nfsd4_shutdown_callback(clp);
   2227	if (clp->cl_cb_conn.cb_xprt)
   2228		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
   2229	free_client(clp);
   2230	wake_up_all(&expiry_wq);
   2231}
   2232
   2233static void
   2234destroy_client(struct nfs4_client *clp)
   2235{
   2236	unhash_client(clp);
   2237	__destroy_client(clp);
   2238}
   2239
   2240static void inc_reclaim_complete(struct nfs4_client *clp)
   2241{
   2242	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
   2243
   2244	if (!nn->track_reclaim_completes)
   2245		return;
   2246	if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
   2247		return;
   2248	if (atomic_inc_return(&nn->nr_reclaim_complete) ==
   2249			nn->reclaim_str_hashtbl_size) {
   2250		printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
   2251				clp->net->ns.inum);
   2252		nfsd4_end_grace(nn);
   2253	}
   2254}
   2255
   2256static void expire_client(struct nfs4_client *clp)
   2257{
   2258	unhash_client(clp);
   2259	nfsd4_client_record_remove(clp);
   2260	__destroy_client(clp);
   2261}
   2262
   2263static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
   2264{
   2265	memcpy(target->cl_verifier.data, source->data,
   2266			sizeof(target->cl_verifier.data));
   2267}
   2268
   2269static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
   2270{
   2271	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
   2272	target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
   2273}
   2274
   2275static int copy_cred(struct svc_cred *target, struct svc_cred *source)
   2276{
   2277	target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
   2278	target->cr_raw_principal = kstrdup(source->cr_raw_principal,
   2279								GFP_KERNEL);
   2280	target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
   2281	if ((source->cr_principal && !target->cr_principal) ||
   2282	    (source->cr_raw_principal && !target->cr_raw_principal) ||
   2283	    (source->cr_targ_princ && !target->cr_targ_princ))
   2284		return -ENOMEM;
   2285
   2286	target->cr_flavor = source->cr_flavor;
   2287	target->cr_uid = source->cr_uid;
   2288	target->cr_gid = source->cr_gid;
   2289	target->cr_group_info = source->cr_group_info;
   2290	get_group_info(target->cr_group_info);
   2291	target->cr_gss_mech = source->cr_gss_mech;
   2292	if (source->cr_gss_mech)
   2293		gss_mech_get(source->cr_gss_mech);
   2294	return 0;
   2295}
   2296
   2297static int
   2298compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
   2299{
   2300	if (o1->len < o2->len)
   2301		return -1;
   2302	if (o1->len > o2->len)
   2303		return 1;
   2304	return memcmp(o1->data, o2->data, o1->len);
   2305}
   2306
   2307static int
   2308same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
   2309{
   2310	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
   2311}
   2312
   2313static int
   2314same_clid(clientid_t *cl1, clientid_t *cl2)
   2315{
   2316	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
   2317}
   2318
   2319static bool groups_equal(struct group_info *g1, struct group_info *g2)
   2320{
   2321	int i;
   2322
   2323	if (g1->ngroups != g2->ngroups)
   2324		return false;
   2325	for (i=0; i<g1->ngroups; i++)
   2326		if (!gid_eq(g1->gid[i], g2->gid[i]))
   2327			return false;
   2328	return true;
   2329}
   2330
   2331/*
   2332 * RFC 3530 language requires clid_inuse be returned when the
   2333 * "principal" associated with a requests differs from that previously
   2334 * used.  We use uid, gid's, and gss principal string as our best
   2335 * approximation.  We also don't want to allow non-gss use of a client
   2336 * established using gss: in theory cr_principal should catch that
   2337 * change, but in practice cr_principal can be null even in the gss case
   2338 * since gssd doesn't always pass down a principal string.
   2339 */
   2340static bool is_gss_cred(struct svc_cred *cr)
   2341{
   2342	/* Is cr_flavor one of the gss "pseudoflavors"?: */
   2343	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
   2344}
   2345
   2346
   2347static bool
   2348same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
   2349{
   2350	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
   2351		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
   2352		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
   2353		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
   2354		return false;
   2355	/* XXX: check that cr_targ_princ fields match ? */
   2356	if (cr1->cr_principal == cr2->cr_principal)
   2357		return true;
   2358	if (!cr1->cr_principal || !cr2->cr_principal)
   2359		return false;
   2360	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
   2361}
   2362
   2363static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
   2364{
   2365	struct svc_cred *cr = &rqstp->rq_cred;
   2366	u32 service;
   2367
   2368	if (!cr->cr_gss_mech)
   2369		return false;
   2370	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
   2371	return service == RPC_GSS_SVC_INTEGRITY ||
   2372	       service == RPC_GSS_SVC_PRIVACY;
   2373}
   2374
   2375bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
   2376{
   2377	struct svc_cred *cr = &rqstp->rq_cred;
   2378
   2379	if (!cl->cl_mach_cred)
   2380		return true;
   2381	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
   2382		return false;
   2383	if (!svc_rqst_integrity_protected(rqstp))
   2384		return false;
   2385	if (cl->cl_cred.cr_raw_principal)
   2386		return 0 == strcmp(cl->cl_cred.cr_raw_principal,
   2387						cr->cr_raw_principal);
   2388	if (!cr->cr_principal)
   2389		return false;
   2390	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
   2391}
   2392
   2393static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
   2394{
   2395	__be32 verf[2];
   2396
   2397	/*
   2398	 * This is opaque to client, so no need to byte-swap. Use
   2399	 * __force to keep sparse happy
   2400	 */
   2401	verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
   2402	verf[1] = (__force __be32)nn->clverifier_counter++;
   2403	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
   2404}
   2405
   2406static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
   2407{
   2408	clp->cl_clientid.cl_boot = (u32)nn->boot_time;
   2409	clp->cl_clientid.cl_id = nn->clientid_counter++;
   2410	gen_confirm(clp, nn);
   2411}
   2412
   2413static struct nfs4_stid *
   2414find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
   2415{
   2416	struct nfs4_stid *ret;
   2417
   2418	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
   2419	if (!ret || !ret->sc_type)
   2420		return NULL;
   2421	return ret;
   2422}
   2423
   2424static struct nfs4_stid *
   2425find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
   2426{
   2427	struct nfs4_stid *s;
   2428
   2429	spin_lock(&cl->cl_lock);
   2430	s = find_stateid_locked(cl, t);
   2431	if (s != NULL) {
   2432		if (typemask & s->sc_type)
   2433			refcount_inc(&s->sc_count);
   2434		else
   2435			s = NULL;
   2436	}
   2437	spin_unlock(&cl->cl_lock);
   2438	return s;
   2439}
   2440
   2441static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
   2442{
   2443	struct nfsdfs_client *nc;
   2444	nc = get_nfsdfs_client(inode);
   2445	if (!nc)
   2446		return NULL;
   2447	return container_of(nc, struct nfs4_client, cl_nfsdfs);
   2448}
   2449
   2450static void seq_quote_mem(struct seq_file *m, char *data, int len)
   2451{
   2452	seq_printf(m, "\"");
   2453	seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
   2454	seq_printf(m, "\"");
   2455}
   2456
   2457static const char *cb_state2str(int state)
   2458{
   2459	switch (state) {
   2460	case NFSD4_CB_UP:
   2461		return "UP";
   2462	case NFSD4_CB_UNKNOWN:
   2463		return "UNKNOWN";
   2464	case NFSD4_CB_DOWN:
   2465		return "DOWN";
   2466	case NFSD4_CB_FAULT:
   2467		return "FAULT";
   2468	}
   2469	return "UNDEFINED";
   2470}
   2471
   2472static int client_info_show(struct seq_file *m, void *v)
   2473{
   2474	struct inode *inode = m->private;
   2475	struct nfs4_client *clp;
   2476	u64 clid;
   2477
   2478	clp = get_nfsdfs_clp(inode);
   2479	if (!clp)
   2480		return -ENXIO;
   2481	memcpy(&clid, &clp->cl_clientid, sizeof(clid));
   2482	seq_printf(m, "clientid: 0x%llx\n", clid);
   2483	seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
   2484
   2485	if (clp->cl_state == NFSD4_COURTESY)
   2486		seq_puts(m, "status: courtesy\n");
   2487	else if (clp->cl_state == NFSD4_EXPIRABLE)
   2488		seq_puts(m, "status: expirable\n");
   2489	else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
   2490		seq_puts(m, "status: confirmed\n");
   2491	else
   2492		seq_puts(m, "status: unconfirmed\n");
   2493	seq_printf(m, "seconds from last renew: %lld\n",
   2494		ktime_get_boottime_seconds() - clp->cl_time);
   2495	seq_printf(m, "name: ");
   2496	seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
   2497	seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
   2498	if (clp->cl_nii_domain.data) {
   2499		seq_printf(m, "Implementation domain: ");
   2500		seq_quote_mem(m, clp->cl_nii_domain.data,
   2501					clp->cl_nii_domain.len);
   2502		seq_printf(m, "\nImplementation name: ");
   2503		seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
   2504		seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
   2505			clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
   2506	}
   2507	seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
   2508	seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
   2509	drop_client(clp);
   2510
   2511	return 0;
   2512}
   2513
   2514static int client_info_open(struct inode *inode, struct file *file)
   2515{
   2516	return single_open(file, client_info_show, inode);
   2517}
   2518
   2519static const struct file_operations client_info_fops = {
   2520	.open		= client_info_open,
   2521	.read		= seq_read,
   2522	.llseek		= seq_lseek,
   2523	.release	= single_release,
   2524};
   2525
   2526static void *states_start(struct seq_file *s, loff_t *pos)
   2527	__acquires(&clp->cl_lock)
   2528{
   2529	struct nfs4_client *clp = s->private;
   2530	unsigned long id = *pos;
   2531	void *ret;
   2532
   2533	spin_lock(&clp->cl_lock);
   2534	ret = idr_get_next_ul(&clp->cl_stateids, &id);
   2535	*pos = id;
   2536	return ret;
   2537}
   2538
   2539static void *states_next(struct seq_file *s, void *v, loff_t *pos)
   2540{
   2541	struct nfs4_client *clp = s->private;
   2542	unsigned long id = *pos;
   2543	void *ret;
   2544
   2545	id = *pos;
   2546	id++;
   2547	ret = idr_get_next_ul(&clp->cl_stateids, &id);
   2548	*pos = id;
   2549	return ret;
   2550}
   2551
   2552static void states_stop(struct seq_file *s, void *v)
   2553	__releases(&clp->cl_lock)
   2554{
   2555	struct nfs4_client *clp = s->private;
   2556
   2557	spin_unlock(&clp->cl_lock);
   2558}
   2559
   2560static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
   2561{
   2562         seq_printf(s, "filename: \"%pD2\"", f->nf_file);
   2563}
   2564
   2565static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
   2566{
   2567	struct inode *inode = f->nf_inode;
   2568
   2569	seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
   2570					MAJOR(inode->i_sb->s_dev),
   2571					 MINOR(inode->i_sb->s_dev),
   2572					 inode->i_ino);
   2573}
   2574
   2575static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
   2576{
   2577	seq_printf(s, "owner: ");
   2578	seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
   2579}
   2580
   2581static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
   2582{
   2583	seq_printf(s, "0x%.8x", stid->si_generation);
   2584	seq_printf(s, "%12phN", &stid->si_opaque);
   2585}
   2586
   2587static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
   2588{
   2589	struct nfs4_ol_stateid *ols;
   2590	struct nfs4_file *nf;
   2591	struct nfsd_file *file;
   2592	struct nfs4_stateowner *oo;
   2593	unsigned int access, deny;
   2594
   2595	if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
   2596		return 0; /* XXX: or SEQ_SKIP? */
   2597	ols = openlockstateid(st);
   2598	oo = ols->st_stateowner;
   2599	nf = st->sc_file;
   2600	file = find_any_file(nf);
   2601	if (!file)
   2602		return 0;
   2603
   2604	seq_printf(s, "- ");
   2605	nfs4_show_stateid(s, &st->sc_stateid);
   2606	seq_printf(s, ": { type: open, ");
   2607
   2608	access = bmap_to_share_mode(ols->st_access_bmap);
   2609	deny   = bmap_to_share_mode(ols->st_deny_bmap);
   2610
   2611	seq_printf(s, "access: %s%s, ",
   2612		access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
   2613		access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
   2614	seq_printf(s, "deny: %s%s, ",
   2615		deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
   2616		deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
   2617
   2618	nfs4_show_superblock(s, file);
   2619	seq_printf(s, ", ");
   2620	nfs4_show_fname(s, file);
   2621	seq_printf(s, ", ");
   2622	nfs4_show_owner(s, oo);
   2623	seq_printf(s, " }\n");
   2624	nfsd_file_put(file);
   2625
   2626	return 0;
   2627}
   2628
   2629static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
   2630{
   2631	struct nfs4_ol_stateid *ols;
   2632	struct nfs4_file *nf;
   2633	struct nfsd_file *file;
   2634	struct nfs4_stateowner *oo;
   2635
   2636	ols = openlockstateid(st);
   2637	oo = ols->st_stateowner;
   2638	nf = st->sc_file;
   2639	file = find_any_file(nf);
   2640	if (!file)
   2641		return 0;
   2642
   2643	seq_printf(s, "- ");
   2644	nfs4_show_stateid(s, &st->sc_stateid);
   2645	seq_printf(s, ": { type: lock, ");
   2646
   2647	/*
   2648	 * Note: a lock stateid isn't really the same thing as a lock,
   2649	 * it's the locking state held by one owner on a file, and there
   2650	 * may be multiple (or no) lock ranges associated with it.
   2651	 * (Same for the matter is true of open stateids.)
   2652	 */
   2653
   2654	nfs4_show_superblock(s, file);
   2655	/* XXX: open stateid? */
   2656	seq_printf(s, ", ");
   2657	nfs4_show_fname(s, file);
   2658	seq_printf(s, ", ");
   2659	nfs4_show_owner(s, oo);
   2660	seq_printf(s, " }\n");
   2661	nfsd_file_put(file);
   2662
   2663	return 0;
   2664}
   2665
   2666static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
   2667{
   2668	struct nfs4_delegation *ds;
   2669	struct nfs4_file *nf;
   2670	struct nfsd_file *file;
   2671
   2672	ds = delegstateid(st);
   2673	nf = st->sc_file;
   2674	file = find_deleg_file(nf);
   2675	if (!file)
   2676		return 0;
   2677
   2678	seq_printf(s, "- ");
   2679	nfs4_show_stateid(s, &st->sc_stateid);
   2680	seq_printf(s, ": { type: deleg, ");
   2681
   2682	/* Kinda dead code as long as we only support read delegs: */
   2683	seq_printf(s, "access: %s, ",
   2684		ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
   2685
   2686	/* XXX: lease time, whether it's being recalled. */
   2687
   2688	nfs4_show_superblock(s, file);
   2689	seq_printf(s, ", ");
   2690	nfs4_show_fname(s, file);
   2691	seq_printf(s, " }\n");
   2692	nfsd_file_put(file);
   2693
   2694	return 0;
   2695}
   2696
   2697static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
   2698{
   2699	struct nfs4_layout_stateid *ls;
   2700	struct nfsd_file *file;
   2701
   2702	ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
   2703	file = ls->ls_file;
   2704
   2705	seq_printf(s, "- ");
   2706	nfs4_show_stateid(s, &st->sc_stateid);
   2707	seq_printf(s, ": { type: layout, ");
   2708
   2709	/* XXX: What else would be useful? */
   2710
   2711	nfs4_show_superblock(s, file);
   2712	seq_printf(s, ", ");
   2713	nfs4_show_fname(s, file);
   2714	seq_printf(s, " }\n");
   2715
   2716	return 0;
   2717}
   2718
   2719static int states_show(struct seq_file *s, void *v)
   2720{
   2721	struct nfs4_stid *st = v;
   2722
   2723	switch (st->sc_type) {
   2724	case NFS4_OPEN_STID:
   2725		return nfs4_show_open(s, st);
   2726	case NFS4_LOCK_STID:
   2727		return nfs4_show_lock(s, st);
   2728	case NFS4_DELEG_STID:
   2729		return nfs4_show_deleg(s, st);
   2730	case NFS4_LAYOUT_STID:
   2731		return nfs4_show_layout(s, st);
   2732	default:
   2733		return 0; /* XXX: or SEQ_SKIP? */
   2734	}
   2735	/* XXX: copy stateids? */
   2736}
   2737
   2738static struct seq_operations states_seq_ops = {
   2739	.start = states_start,
   2740	.next = states_next,
   2741	.stop = states_stop,
   2742	.show = states_show
   2743};
   2744
   2745static int client_states_open(struct inode *inode, struct file *file)
   2746{
   2747	struct seq_file *s;
   2748	struct nfs4_client *clp;
   2749	int ret;
   2750
   2751	clp = get_nfsdfs_clp(inode);
   2752	if (!clp)
   2753		return -ENXIO;
   2754
   2755	ret = seq_open(file, &states_seq_ops);
   2756	if (ret)
   2757		return ret;
   2758	s = file->private_data;
   2759	s->private = clp;
   2760	return 0;
   2761}
   2762
   2763static int client_opens_release(struct inode *inode, struct file *file)
   2764{
   2765	struct seq_file *m = file->private_data;
   2766	struct nfs4_client *clp = m->private;
   2767
   2768	/* XXX: alternatively, we could get/drop in seq start/stop */
   2769	drop_client(clp);
   2770	return 0;
   2771}
   2772
   2773static const struct file_operations client_states_fops = {
   2774	.open		= client_states_open,
   2775	.read		= seq_read,
   2776	.llseek		= seq_lseek,
   2777	.release	= client_opens_release,
   2778};
   2779
   2780/*
   2781 * Normally we refuse to destroy clients that are in use, but here the
   2782 * administrator is telling us to just do it.  We also want to wait
   2783 * so the caller has a guarantee that the client's locks are gone by
   2784 * the time the write returns:
   2785 */
   2786static void force_expire_client(struct nfs4_client *clp)
   2787{
   2788	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
   2789	bool already_expired;
   2790
   2791	trace_nfsd_clid_admin_expired(&clp->cl_clientid);
   2792
   2793	spin_lock(&nn->client_lock);
   2794	clp->cl_time = 0;
   2795	spin_unlock(&nn->client_lock);
   2796
   2797	wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
   2798	spin_lock(&nn->client_lock);
   2799	already_expired = list_empty(&clp->cl_lru);
   2800	if (!already_expired)
   2801		unhash_client_locked(clp);
   2802	spin_unlock(&nn->client_lock);
   2803
   2804	if (!already_expired)
   2805		expire_client(clp);
   2806	else
   2807		wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
   2808}
   2809
   2810static ssize_t client_ctl_write(struct file *file, const char __user *buf,
   2811				   size_t size, loff_t *pos)
   2812{
   2813	char *data;
   2814	struct nfs4_client *clp;
   2815
   2816	data = simple_transaction_get(file, buf, size);
   2817	if (IS_ERR(data))
   2818		return PTR_ERR(data);
   2819	if (size != 7 || 0 != memcmp(data, "expire\n", 7))
   2820		return -EINVAL;
   2821	clp = get_nfsdfs_clp(file_inode(file));
   2822	if (!clp)
   2823		return -ENXIO;
   2824	force_expire_client(clp);
   2825	drop_client(clp);
   2826	return 7;
   2827}
   2828
   2829static const struct file_operations client_ctl_fops = {
   2830	.write		= client_ctl_write,
   2831	.release	= simple_transaction_release,
   2832};
   2833
   2834static const struct tree_descr client_files[] = {
   2835	[0] = {"info", &client_info_fops, S_IRUSR},
   2836	[1] = {"states", &client_states_fops, S_IRUSR},
   2837	[2] = {"ctl", &client_ctl_fops, S_IWUSR},
   2838	[3] = {""},
   2839};
   2840
   2841static struct nfs4_client *create_client(struct xdr_netobj name,
   2842		struct svc_rqst *rqstp, nfs4_verifier *verf)
   2843{
   2844	struct nfs4_client *clp;
   2845	struct sockaddr *sa = svc_addr(rqstp);
   2846	int ret;
   2847	struct net *net = SVC_NET(rqstp);
   2848	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   2849	struct dentry *dentries[ARRAY_SIZE(client_files)];
   2850
   2851	clp = alloc_client(name);
   2852	if (clp == NULL)
   2853		return NULL;
   2854
   2855	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
   2856	if (ret) {
   2857		free_client(clp);
   2858		return NULL;
   2859	}
   2860	gen_clid(clp, nn);
   2861	kref_init(&clp->cl_nfsdfs.cl_ref);
   2862	nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
   2863	clp->cl_time = ktime_get_boottime_seconds();
   2864	clear_bit(0, &clp->cl_cb_slot_busy);
   2865	copy_verf(clp, verf);
   2866	memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
   2867	clp->cl_cb_session = NULL;
   2868	clp->net = net;
   2869	clp->cl_nfsd_dentry = nfsd_client_mkdir(
   2870		nn, &clp->cl_nfsdfs,
   2871		clp->cl_clientid.cl_id - nn->clientid_base,
   2872		client_files, dentries);
   2873	clp->cl_nfsd_info_dentry = dentries[0];
   2874	if (!clp->cl_nfsd_dentry) {
   2875		free_client(clp);
   2876		return NULL;
   2877	}
   2878	return clp;
   2879}
   2880
   2881static void
   2882add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
   2883{
   2884	struct rb_node **new = &(root->rb_node), *parent = NULL;
   2885	struct nfs4_client *clp;
   2886
   2887	while (*new) {
   2888		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
   2889		parent = *new;
   2890
   2891		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
   2892			new = &((*new)->rb_left);
   2893		else
   2894			new = &((*new)->rb_right);
   2895	}
   2896
   2897	rb_link_node(&new_clp->cl_namenode, parent, new);
   2898	rb_insert_color(&new_clp->cl_namenode, root);
   2899}
   2900
   2901static struct nfs4_client *
   2902find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
   2903{
   2904	int cmp;
   2905	struct rb_node *node = root->rb_node;
   2906	struct nfs4_client *clp;
   2907
   2908	while (node) {
   2909		clp = rb_entry(node, struct nfs4_client, cl_namenode);
   2910		cmp = compare_blob(&clp->cl_name, name);
   2911		if (cmp > 0)
   2912			node = node->rb_left;
   2913		else if (cmp < 0)
   2914			node = node->rb_right;
   2915		else
   2916			return clp;
   2917	}
   2918	return NULL;
   2919}
   2920
   2921static void
   2922add_to_unconfirmed(struct nfs4_client *clp)
   2923{
   2924	unsigned int idhashval;
   2925	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
   2926
   2927	lockdep_assert_held(&nn->client_lock);
   2928
   2929	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
   2930	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
   2931	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
   2932	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
   2933	renew_client_locked(clp);
   2934}
   2935
   2936static void
   2937move_to_confirmed(struct nfs4_client *clp)
   2938{
   2939	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
   2940	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
   2941
   2942	lockdep_assert_held(&nn->client_lock);
   2943
   2944	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
   2945	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
   2946	add_clp_to_name_tree(clp, &nn->conf_name_tree);
   2947	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
   2948	trace_nfsd_clid_confirmed(&clp->cl_clientid);
   2949	renew_client_locked(clp);
   2950}
   2951
   2952static struct nfs4_client *
   2953find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
   2954{
   2955	struct nfs4_client *clp;
   2956	unsigned int idhashval = clientid_hashval(clid->cl_id);
   2957
   2958	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
   2959		if (same_clid(&clp->cl_clientid, clid)) {
   2960			if ((bool)clp->cl_minorversion != sessions)
   2961				return NULL;
   2962			renew_client_locked(clp);
   2963			return clp;
   2964		}
   2965	}
   2966	return NULL;
   2967}
   2968
   2969static struct nfs4_client *
   2970find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
   2971{
   2972	struct list_head *tbl = nn->conf_id_hashtbl;
   2973
   2974	lockdep_assert_held(&nn->client_lock);
   2975	return find_client_in_id_table(tbl, clid, sessions);
   2976}
   2977
   2978static struct nfs4_client *
   2979find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
   2980{
   2981	struct list_head *tbl = nn->unconf_id_hashtbl;
   2982
   2983	lockdep_assert_held(&nn->client_lock);
   2984	return find_client_in_id_table(tbl, clid, sessions);
   2985}
   2986
   2987static bool clp_used_exchangeid(struct nfs4_client *clp)
   2988{
   2989	return clp->cl_exchange_flags != 0;
   2990} 
   2991
   2992static struct nfs4_client *
   2993find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
   2994{
   2995	lockdep_assert_held(&nn->client_lock);
   2996	return find_clp_in_name_tree(name, &nn->conf_name_tree);
   2997}
   2998
   2999static struct nfs4_client *
   3000find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
   3001{
   3002	lockdep_assert_held(&nn->client_lock);
   3003	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
   3004}
   3005
   3006static void
   3007gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
   3008{
   3009	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
   3010	struct sockaddr	*sa = svc_addr(rqstp);
   3011	u32 scopeid = rpc_get_scope_id(sa);
   3012	unsigned short expected_family;
   3013
   3014	/* Currently, we only support tcp and tcp6 for the callback channel */
   3015	if (se->se_callback_netid_len == 3 &&
   3016	    !memcmp(se->se_callback_netid_val, "tcp", 3))
   3017		expected_family = AF_INET;
   3018	else if (se->se_callback_netid_len == 4 &&
   3019		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
   3020		expected_family = AF_INET6;
   3021	else
   3022		goto out_err;
   3023
   3024	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
   3025					    se->se_callback_addr_len,
   3026					    (struct sockaddr *)&conn->cb_addr,
   3027					    sizeof(conn->cb_addr));
   3028
   3029	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
   3030		goto out_err;
   3031
   3032	if (conn->cb_addr.ss_family == AF_INET6)
   3033		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
   3034
   3035	conn->cb_prog = se->se_callback_prog;
   3036	conn->cb_ident = se->se_callback_ident;
   3037	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
   3038	trace_nfsd_cb_args(clp, conn);
   3039	return;
   3040out_err:
   3041	conn->cb_addr.ss_family = AF_UNSPEC;
   3042	conn->cb_addrlen = 0;
   3043	trace_nfsd_cb_nodelegs(clp);
   3044	return;
   3045}
   3046
   3047/*
   3048 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
   3049 */
   3050static void
   3051nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
   3052{
   3053	struct xdr_buf *buf = resp->xdr->buf;
   3054	struct nfsd4_slot *slot = resp->cstate.slot;
   3055	unsigned int base;
   3056
   3057	dprintk("--> %s slot %p\n", __func__, slot);
   3058
   3059	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
   3060	slot->sl_opcnt = resp->opcnt;
   3061	slot->sl_status = resp->cstate.status;
   3062	free_svc_cred(&slot->sl_cred);
   3063	copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
   3064
   3065	if (!nfsd4_cache_this(resp)) {
   3066		slot->sl_flags &= ~NFSD4_SLOT_CACHED;
   3067		return;
   3068	}
   3069	slot->sl_flags |= NFSD4_SLOT_CACHED;
   3070
   3071	base = resp->cstate.data_offset;
   3072	slot->sl_datalen = buf->len - base;
   3073	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
   3074		WARN(1, "%s: sessions DRC could not cache compound\n",
   3075		     __func__);
   3076	return;
   3077}
   3078
   3079/*
   3080 * Encode the replay sequence operation from the slot values.
   3081 * If cachethis is FALSE encode the uncached rep error on the next
   3082 * operation which sets resp->p and increments resp->opcnt for
   3083 * nfs4svc_encode_compoundres.
   3084 *
   3085 */
   3086static __be32
   3087nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
   3088			  struct nfsd4_compoundres *resp)
   3089{
   3090	struct nfsd4_op *op;
   3091	struct nfsd4_slot *slot = resp->cstate.slot;
   3092
   3093	/* Encode the replayed sequence operation */
   3094	op = &args->ops[resp->opcnt - 1];
   3095	nfsd4_encode_operation(resp, op);
   3096
   3097	if (slot->sl_flags & NFSD4_SLOT_CACHED)
   3098		return op->status;
   3099	if (args->opcnt == 1) {
   3100		/*
   3101		 * The original operation wasn't a solo sequence--we
   3102		 * always cache those--so this retry must not match the
   3103		 * original:
   3104		 */
   3105		op->status = nfserr_seq_false_retry;
   3106	} else {
   3107		op = &args->ops[resp->opcnt++];
   3108		op->status = nfserr_retry_uncached_rep;
   3109		nfsd4_encode_operation(resp, op);
   3110	}
   3111	return op->status;
   3112}
   3113
   3114/*
   3115 * The sequence operation is not cached because we can use the slot and
   3116 * session values.
   3117 */
   3118static __be32
   3119nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
   3120			 struct nfsd4_sequence *seq)
   3121{
   3122	struct nfsd4_slot *slot = resp->cstate.slot;
   3123	struct xdr_stream *xdr = resp->xdr;
   3124	__be32 *p;
   3125	__be32 status;
   3126
   3127	dprintk("--> %s slot %p\n", __func__, slot);
   3128
   3129	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
   3130	if (status)
   3131		return status;
   3132
   3133	p = xdr_reserve_space(xdr, slot->sl_datalen);
   3134	if (!p) {
   3135		WARN_ON_ONCE(1);
   3136		return nfserr_serverfault;
   3137	}
   3138	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
   3139	xdr_commit_encode(xdr);
   3140
   3141	resp->opcnt = slot->sl_opcnt;
   3142	return slot->sl_status;
   3143}
   3144
   3145/*
   3146 * Set the exchange_id flags returned by the server.
   3147 */
   3148static void
   3149nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
   3150{
   3151#ifdef CONFIG_NFSD_PNFS
   3152	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
   3153#else
   3154	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
   3155#endif
   3156
   3157	/* Referrals are supported, Migration is not. */
   3158	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
   3159
   3160	/* set the wire flags to return to client. */
   3161	clid->flags = new->cl_exchange_flags;
   3162}
   3163
   3164static bool client_has_openowners(struct nfs4_client *clp)
   3165{
   3166	struct nfs4_openowner *oo;
   3167
   3168	list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
   3169		if (!list_empty(&oo->oo_owner.so_stateids))
   3170			return true;
   3171	}
   3172	return false;
   3173}
   3174
   3175static bool client_has_state(struct nfs4_client *clp)
   3176{
   3177	return client_has_openowners(clp)
   3178#ifdef CONFIG_NFSD_PNFS
   3179		|| !list_empty(&clp->cl_lo_states)
   3180#endif
   3181		|| !list_empty(&clp->cl_delegations)
   3182		|| !list_empty(&clp->cl_sessions)
   3183		|| !list_empty(&clp->async_copies);
   3184}
   3185
   3186static __be32 copy_impl_id(struct nfs4_client *clp,
   3187				struct nfsd4_exchange_id *exid)
   3188{
   3189	if (!exid->nii_domain.data)
   3190		return 0;
   3191	xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
   3192	if (!clp->cl_nii_domain.data)
   3193		return nfserr_jukebox;
   3194	xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
   3195	if (!clp->cl_nii_name.data)
   3196		return nfserr_jukebox;
   3197	clp->cl_nii_time = exid->nii_time;
   3198	return 0;
   3199}
   3200
   3201__be32
   3202nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   3203		union nfsd4_op_u *u)
   3204{
   3205	struct nfsd4_exchange_id *exid = &u->exchange_id;
   3206	struct nfs4_client *conf, *new;
   3207	struct nfs4_client *unconf = NULL;
   3208	__be32 status;
   3209	char			addr_str[INET6_ADDRSTRLEN];
   3210	nfs4_verifier		verf = exid->verifier;
   3211	struct sockaddr		*sa = svc_addr(rqstp);
   3212	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
   3213	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   3214
   3215	rpc_ntop(sa, addr_str, sizeof(addr_str));
   3216	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
   3217		"ip_addr=%s flags %x, spa_how %u\n",
   3218		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
   3219		addr_str, exid->flags, exid->spa_how);
   3220
   3221	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
   3222		return nfserr_inval;
   3223
   3224	new = create_client(exid->clname, rqstp, &verf);
   3225	if (new == NULL)
   3226		return nfserr_jukebox;
   3227	status = copy_impl_id(new, exid);
   3228	if (status)
   3229		goto out_nolock;
   3230
   3231	switch (exid->spa_how) {
   3232	case SP4_MACH_CRED:
   3233		exid->spo_must_enforce[0] = 0;
   3234		exid->spo_must_enforce[1] = (
   3235			1 << (OP_BIND_CONN_TO_SESSION - 32) |
   3236			1 << (OP_EXCHANGE_ID - 32) |
   3237			1 << (OP_CREATE_SESSION - 32) |
   3238			1 << (OP_DESTROY_SESSION - 32) |
   3239			1 << (OP_DESTROY_CLIENTID - 32));
   3240
   3241		exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
   3242					1 << (OP_OPEN_DOWNGRADE) |
   3243					1 << (OP_LOCKU) |
   3244					1 << (OP_DELEGRETURN));
   3245
   3246		exid->spo_must_allow[1] &= (
   3247					1 << (OP_TEST_STATEID - 32) |
   3248					1 << (OP_FREE_STATEID - 32));
   3249		if (!svc_rqst_integrity_protected(rqstp)) {
   3250			status = nfserr_inval;
   3251			goto out_nolock;
   3252		}
   3253		/*
   3254		 * Sometimes userspace doesn't give us a principal.
   3255		 * Which is a bug, really.  Anyway, we can't enforce
   3256		 * MACH_CRED in that case, better to give up now:
   3257		 */
   3258		if (!new->cl_cred.cr_principal &&
   3259					!new->cl_cred.cr_raw_principal) {
   3260			status = nfserr_serverfault;
   3261			goto out_nolock;
   3262		}
   3263		new->cl_mach_cred = true;
   3264		break;
   3265	case SP4_NONE:
   3266		break;
   3267	default:				/* checked by xdr code */
   3268		WARN_ON_ONCE(1);
   3269		fallthrough;
   3270	case SP4_SSV:
   3271		status = nfserr_encr_alg_unsupp;
   3272		goto out_nolock;
   3273	}
   3274
   3275	/* Cases below refer to rfc 5661 section 18.35.4: */
   3276	spin_lock(&nn->client_lock);
   3277	conf = find_confirmed_client_by_name(&exid->clname, nn);
   3278	if (conf) {
   3279		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
   3280		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
   3281
   3282		if (update) {
   3283			if (!clp_used_exchangeid(conf)) { /* buggy client */
   3284				status = nfserr_inval;
   3285				goto out;
   3286			}
   3287			if (!nfsd4_mach_creds_match(conf, rqstp)) {
   3288				status = nfserr_wrong_cred;
   3289				goto out;
   3290			}
   3291			if (!creds_match) { /* case 9 */
   3292				status = nfserr_perm;
   3293				goto out;
   3294			}
   3295			if (!verfs_match) { /* case 8 */
   3296				status = nfserr_not_same;
   3297				goto out;
   3298			}
   3299			/* case 6 */
   3300			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
   3301			trace_nfsd_clid_confirmed_r(conf);
   3302			goto out_copy;
   3303		}
   3304		if (!creds_match) { /* case 3 */
   3305			if (client_has_state(conf)) {
   3306				status = nfserr_clid_inuse;
   3307				trace_nfsd_clid_cred_mismatch(conf, rqstp);
   3308				goto out;
   3309			}
   3310			goto out_new;
   3311		}
   3312		if (verfs_match) { /* case 2 */
   3313			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
   3314			trace_nfsd_clid_confirmed_r(conf);
   3315			goto out_copy;
   3316		}
   3317		/* case 5, client reboot */
   3318		trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
   3319		conf = NULL;
   3320		goto out_new;
   3321	}
   3322
   3323	if (update) { /* case 7 */
   3324		status = nfserr_noent;
   3325		goto out;
   3326	}
   3327
   3328	unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
   3329	if (unconf) /* case 4, possible retry or client restart */
   3330		unhash_client_locked(unconf);
   3331
   3332	/* case 1, new owner ID */
   3333	trace_nfsd_clid_fresh(new);
   3334
   3335out_new:
   3336	if (conf) {
   3337		status = mark_client_expired_locked(conf);
   3338		if (status)
   3339			goto out;
   3340		trace_nfsd_clid_replaced(&conf->cl_clientid);
   3341	}
   3342	new->cl_minorversion = cstate->minorversion;
   3343	new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
   3344	new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
   3345
   3346	add_to_unconfirmed(new);
   3347	swap(new, conf);
   3348out_copy:
   3349	exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
   3350	exid->clientid.cl_id = conf->cl_clientid.cl_id;
   3351
   3352	exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
   3353	nfsd4_set_ex_flags(conf, exid);
   3354
   3355	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
   3356		conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
   3357	status = nfs_ok;
   3358
   3359out:
   3360	spin_unlock(&nn->client_lock);
   3361out_nolock:
   3362	if (new)
   3363		expire_client(new);
   3364	if (unconf) {
   3365		trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
   3366		expire_client(unconf);
   3367	}
   3368	return status;
   3369}
   3370
   3371static __be32
   3372check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
   3373{
   3374	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
   3375		slot_seqid);
   3376
   3377	/* The slot is in use, and no response has been sent. */
   3378	if (slot_inuse) {
   3379		if (seqid == slot_seqid)
   3380			return nfserr_jukebox;
   3381		else
   3382			return nfserr_seq_misordered;
   3383	}
   3384	/* Note unsigned 32-bit arithmetic handles wraparound: */
   3385	if (likely(seqid == slot_seqid + 1))
   3386		return nfs_ok;
   3387	if (seqid == slot_seqid)
   3388		return nfserr_replay_cache;
   3389	return nfserr_seq_misordered;
   3390}
   3391
   3392/*
   3393 * Cache the create session result into the create session single DRC
   3394 * slot cache by saving the xdr structure. sl_seqid has been set.
   3395 * Do this for solo or embedded create session operations.
   3396 */
   3397static void
   3398nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
   3399			   struct nfsd4_clid_slot *slot, __be32 nfserr)
   3400{
   3401	slot->sl_status = nfserr;
   3402	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
   3403}
   3404
   3405static __be32
   3406nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
   3407			    struct nfsd4_clid_slot *slot)
   3408{
   3409	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
   3410	return slot->sl_status;
   3411}
   3412
   3413#define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
   3414			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
   3415			1 +	/* MIN tag is length with zero, only length */ \
   3416			3 +	/* version, opcount, opcode */ \
   3417			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
   3418				/* seqid, slotID, slotID, cache */ \
   3419			4 ) * sizeof(__be32))
   3420
   3421#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
   3422			2 +	/* verifier: AUTH_NULL, length 0 */\
   3423			1 +	/* status */ \
   3424			1 +	/* MIN tag is length with zero, only length */ \
   3425			3 +	/* opcount, opcode, opstatus*/ \
   3426			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
   3427				/* seqid, slotID, slotID, slotID, status */ \
   3428			5 ) * sizeof(__be32))
   3429
   3430static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
   3431{
   3432	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
   3433
   3434	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
   3435		return nfserr_toosmall;
   3436	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
   3437		return nfserr_toosmall;
   3438	ca->headerpadsz = 0;
   3439	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
   3440	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
   3441	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
   3442	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
   3443			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
   3444	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
   3445	/*
   3446	 * Note decreasing slot size below client's request may make it
   3447	 * difficult for client to function correctly, whereas
   3448	 * decreasing the number of slots will (just?) affect
   3449	 * performance.  When short on memory we therefore prefer to
   3450	 * decrease number of slots instead of their size.  Clients that
   3451	 * request larger slots than they need will get poor results:
   3452	 * Note that we always allow at least one slot, because our
   3453	 * accounting is soft and provides no guarantees either way.
   3454	 */
   3455	ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
   3456
   3457	return nfs_ok;
   3458}
   3459
   3460/*
   3461 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
   3462 * These are based on similar macros in linux/sunrpc/msg_prot.h .
   3463 */
   3464#define RPC_MAX_HEADER_WITH_AUTH_SYS \
   3465	(RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
   3466
   3467#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
   3468	(RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
   3469
   3470#define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
   3471				 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
   3472#define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
   3473				 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
   3474				 sizeof(__be32))
   3475
   3476static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
   3477{
   3478	ca->headerpadsz = 0;
   3479
   3480	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
   3481		return nfserr_toosmall;
   3482	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
   3483		return nfserr_toosmall;
   3484	ca->maxresp_cached = 0;
   3485	if (ca->maxops < 2)
   3486		return nfserr_toosmall;
   3487
   3488	return nfs_ok;
   3489}
   3490
   3491static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
   3492{
   3493	switch (cbs->flavor) {
   3494	case RPC_AUTH_NULL:
   3495	case RPC_AUTH_UNIX:
   3496		return nfs_ok;
   3497	default:
   3498		/*
   3499		 * GSS case: the spec doesn't allow us to return this
   3500		 * error.  But it also doesn't allow us not to support
   3501		 * GSS.
   3502		 * I'd rather this fail hard than return some error the
   3503		 * client might think it can already handle:
   3504		 */
   3505		return nfserr_encr_alg_unsupp;
   3506	}
   3507}
   3508
   3509__be32
   3510nfsd4_create_session(struct svc_rqst *rqstp,
   3511		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
   3512{
   3513	struct nfsd4_create_session *cr_ses = &u->create_session;
   3514	struct sockaddr *sa = svc_addr(rqstp);
   3515	struct nfs4_client *conf, *unconf;
   3516	struct nfs4_client *old = NULL;
   3517	struct nfsd4_session *new;
   3518	struct nfsd4_conn *conn;
   3519	struct nfsd4_clid_slot *cs_slot = NULL;
   3520	__be32 status = 0;
   3521	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   3522
   3523	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
   3524		return nfserr_inval;
   3525	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
   3526	if (status)
   3527		return status;
   3528	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
   3529	if (status)
   3530		return status;
   3531	status = check_backchannel_attrs(&cr_ses->back_channel);
   3532	if (status)
   3533		goto out_release_drc_mem;
   3534	status = nfserr_jukebox;
   3535	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
   3536	if (!new)
   3537		goto out_release_drc_mem;
   3538	conn = alloc_conn_from_crses(rqstp, cr_ses);
   3539	if (!conn)
   3540		goto out_free_session;
   3541
   3542	spin_lock(&nn->client_lock);
   3543	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
   3544	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
   3545	WARN_ON_ONCE(conf && unconf);
   3546
   3547	if (conf) {
   3548		status = nfserr_wrong_cred;
   3549		if (!nfsd4_mach_creds_match(conf, rqstp))
   3550			goto out_free_conn;
   3551		cs_slot = &conf->cl_cs_slot;
   3552		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
   3553		if (status) {
   3554			if (status == nfserr_replay_cache)
   3555				status = nfsd4_replay_create_session(cr_ses, cs_slot);
   3556			goto out_free_conn;
   3557		}
   3558	} else if (unconf) {
   3559		status = nfserr_clid_inuse;
   3560		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
   3561		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
   3562			trace_nfsd_clid_cred_mismatch(unconf, rqstp);
   3563			goto out_free_conn;
   3564		}
   3565		status = nfserr_wrong_cred;
   3566		if (!nfsd4_mach_creds_match(unconf, rqstp))
   3567			goto out_free_conn;
   3568		cs_slot = &unconf->cl_cs_slot;
   3569		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
   3570		if (status) {
   3571			/* an unconfirmed replay returns misordered */
   3572			status = nfserr_seq_misordered;
   3573			goto out_free_conn;
   3574		}
   3575		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
   3576		if (old) {
   3577			status = mark_client_expired_locked(old);
   3578			if (status) {
   3579				old = NULL;
   3580				goto out_free_conn;
   3581			}
   3582			trace_nfsd_clid_replaced(&old->cl_clientid);
   3583		}
   3584		move_to_confirmed(unconf);
   3585		conf = unconf;
   3586	} else {
   3587		status = nfserr_stale_clientid;
   3588		goto out_free_conn;
   3589	}
   3590	status = nfs_ok;
   3591	/* Persistent sessions are not supported */
   3592	cr_ses->flags &= ~SESSION4_PERSIST;
   3593	/* Upshifting from TCP to RDMA is not supported */
   3594	cr_ses->flags &= ~SESSION4_RDMA;
   3595
   3596	init_session(rqstp, new, conf, cr_ses);
   3597	nfsd4_get_session_locked(new);
   3598
   3599	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
   3600	       NFS4_MAX_SESSIONID_LEN);
   3601	cs_slot->sl_seqid++;
   3602	cr_ses->seqid = cs_slot->sl_seqid;
   3603
   3604	/* cache solo and embedded create sessions under the client_lock */
   3605	nfsd4_cache_create_session(cr_ses, cs_slot, status);
   3606	spin_unlock(&nn->client_lock);
   3607	if (conf == unconf)
   3608		fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
   3609	/* init connection and backchannel */
   3610	nfsd4_init_conn(rqstp, conn, new);
   3611	nfsd4_put_session(new);
   3612	if (old)
   3613		expire_client(old);
   3614	return status;
   3615out_free_conn:
   3616	spin_unlock(&nn->client_lock);
   3617	free_conn(conn);
   3618	if (old)
   3619		expire_client(old);
   3620out_free_session:
   3621	__free_session(new);
   3622out_release_drc_mem:
   3623	nfsd4_put_drc_mem(&cr_ses->fore_channel);
   3624	return status;
   3625}
   3626
   3627static __be32 nfsd4_map_bcts_dir(u32 *dir)
   3628{
   3629	switch (*dir) {
   3630	case NFS4_CDFC4_FORE:
   3631	case NFS4_CDFC4_BACK:
   3632		return nfs_ok;
   3633	case NFS4_CDFC4_FORE_OR_BOTH:
   3634	case NFS4_CDFC4_BACK_OR_BOTH:
   3635		*dir = NFS4_CDFC4_BOTH;
   3636		return nfs_ok;
   3637	}
   3638	return nfserr_inval;
   3639}
   3640
   3641__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
   3642		struct nfsd4_compound_state *cstate,
   3643		union nfsd4_op_u *u)
   3644{
   3645	struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
   3646	struct nfsd4_session *session = cstate->session;
   3647	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   3648	__be32 status;
   3649
   3650	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
   3651	if (status)
   3652		return status;
   3653	spin_lock(&nn->client_lock);
   3654	session->se_cb_prog = bc->bc_cb_program;
   3655	session->se_cb_sec = bc->bc_cb_sec;
   3656	spin_unlock(&nn->client_lock);
   3657
   3658	nfsd4_probe_callback(session->se_client);
   3659
   3660	return nfs_ok;
   3661}
   3662
   3663static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
   3664{
   3665	struct nfsd4_conn *c;
   3666
   3667	list_for_each_entry(c, &s->se_conns, cn_persession) {
   3668		if (c->cn_xprt == xpt) {
   3669			return c;
   3670		}
   3671	}
   3672	return NULL;
   3673}
   3674
   3675static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
   3676		struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
   3677{
   3678	struct nfs4_client *clp = session->se_client;
   3679	struct svc_xprt *xpt = rqst->rq_xprt;
   3680	struct nfsd4_conn *c;
   3681	__be32 status;
   3682
   3683	/* Following the last paragraph of RFC 5661 Section 18.34.3: */
   3684	spin_lock(&clp->cl_lock);
   3685	c = __nfsd4_find_conn(xpt, session);
   3686	if (!c)
   3687		status = nfserr_noent;
   3688	else if (req == c->cn_flags)
   3689		status = nfs_ok;
   3690	else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
   3691				c->cn_flags != NFS4_CDFC4_BACK)
   3692		status = nfs_ok;
   3693	else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
   3694				c->cn_flags != NFS4_CDFC4_FORE)
   3695		status = nfs_ok;
   3696	else
   3697		status = nfserr_inval;
   3698	spin_unlock(&clp->cl_lock);
   3699	if (status == nfs_ok && conn)
   3700		*conn = c;
   3701	return status;
   3702}
   3703
   3704__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
   3705		     struct nfsd4_compound_state *cstate,
   3706		     union nfsd4_op_u *u)
   3707{
   3708	struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
   3709	__be32 status;
   3710	struct nfsd4_conn *conn;
   3711	struct nfsd4_session *session;
   3712	struct net *net = SVC_NET(rqstp);
   3713	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   3714
   3715	if (!nfsd4_last_compound_op(rqstp))
   3716		return nfserr_not_only_op;
   3717	spin_lock(&nn->client_lock);
   3718	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
   3719	spin_unlock(&nn->client_lock);
   3720	if (!session)
   3721		goto out_no_session;
   3722	status = nfserr_wrong_cred;
   3723	if (!nfsd4_mach_creds_match(session->se_client, rqstp))
   3724		goto out;
   3725	status = nfsd4_match_existing_connection(rqstp, session,
   3726			bcts->dir, &conn);
   3727	if (status == nfs_ok) {
   3728		if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
   3729				bcts->dir == NFS4_CDFC4_BACK)
   3730			conn->cn_flags |= NFS4_CDFC4_BACK;
   3731		nfsd4_probe_callback(session->se_client);
   3732		goto out;
   3733	}
   3734	if (status == nfserr_inval)
   3735		goto out;
   3736	status = nfsd4_map_bcts_dir(&bcts->dir);
   3737	if (status)
   3738		goto out;
   3739	conn = alloc_conn(rqstp, bcts->dir);
   3740	status = nfserr_jukebox;
   3741	if (!conn)
   3742		goto out;
   3743	nfsd4_init_conn(rqstp, conn, session);
   3744	status = nfs_ok;
   3745out:
   3746	nfsd4_put_session(session);
   3747out_no_session:
   3748	return status;
   3749}
   3750
   3751static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
   3752{
   3753	if (!cstate->session)
   3754		return false;
   3755	return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
   3756}
   3757
   3758__be32
   3759nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
   3760		union nfsd4_op_u *u)
   3761{
   3762	struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
   3763	struct nfsd4_session *ses;
   3764	__be32 status;
   3765	int ref_held_by_me = 0;
   3766	struct net *net = SVC_NET(r);
   3767	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   3768
   3769	status = nfserr_not_only_op;
   3770	if (nfsd4_compound_in_session(cstate, sessionid)) {
   3771		if (!nfsd4_last_compound_op(r))
   3772			goto out;
   3773		ref_held_by_me++;
   3774	}
   3775	dump_sessionid(__func__, sessionid);
   3776	spin_lock(&nn->client_lock);
   3777	ses = find_in_sessionid_hashtbl(sessionid, net, &status);
   3778	if (!ses)
   3779		goto out_client_lock;
   3780	status = nfserr_wrong_cred;
   3781	if (!nfsd4_mach_creds_match(ses->se_client, r))
   3782		goto out_put_session;
   3783	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
   3784	if (status)
   3785		goto out_put_session;
   3786	unhash_session(ses);
   3787	spin_unlock(&nn->client_lock);
   3788
   3789	nfsd4_probe_callback_sync(ses->se_client);
   3790
   3791	spin_lock(&nn->client_lock);
   3792	status = nfs_ok;
   3793out_put_session:
   3794	nfsd4_put_session_locked(ses);
   3795out_client_lock:
   3796	spin_unlock(&nn->client_lock);
   3797out:
   3798	return status;
   3799}
   3800
   3801static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
   3802{
   3803	struct nfs4_client *clp = ses->se_client;
   3804	struct nfsd4_conn *c;
   3805	__be32 status = nfs_ok;
   3806	int ret;
   3807
   3808	spin_lock(&clp->cl_lock);
   3809	c = __nfsd4_find_conn(new->cn_xprt, ses);
   3810	if (c)
   3811		goto out_free;
   3812	status = nfserr_conn_not_bound_to_session;
   3813	if (clp->cl_mach_cred)
   3814		goto out_free;
   3815	__nfsd4_hash_conn(new, ses);
   3816	spin_unlock(&clp->cl_lock);
   3817	ret = nfsd4_register_conn(new);
   3818	if (ret)
   3819		/* oops; xprt is already down: */
   3820		nfsd4_conn_lost(&new->cn_xpt_user);
   3821	return nfs_ok;
   3822out_free:
   3823	spin_unlock(&clp->cl_lock);
   3824	free_conn(new);
   3825	return status;
   3826}
   3827
   3828static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
   3829{
   3830	struct nfsd4_compoundargs *args = rqstp->rq_argp;
   3831
   3832	return args->opcnt > session->se_fchannel.maxops;
   3833}
   3834
   3835static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
   3836				  struct nfsd4_session *session)
   3837{
   3838	struct xdr_buf *xb = &rqstp->rq_arg;
   3839
   3840	return xb->len > session->se_fchannel.maxreq_sz;
   3841}
   3842
   3843static bool replay_matches_cache(struct svc_rqst *rqstp,
   3844		 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
   3845{
   3846	struct nfsd4_compoundargs *argp = rqstp->rq_argp;
   3847
   3848	if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
   3849	    (bool)seq->cachethis)
   3850		return false;
   3851	/*
   3852	 * If there's an error then the reply can have fewer ops than
   3853	 * the call.
   3854	 */
   3855	if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
   3856		return false;
   3857	/*
   3858	 * But if we cached a reply with *more* ops than the call you're
   3859	 * sending us now, then this new call is clearly not really a
   3860	 * replay of the old one:
   3861	 */
   3862	if (slot->sl_opcnt > argp->opcnt)
   3863		return false;
   3864	/* This is the only check explicitly called by spec: */
   3865	if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
   3866		return false;
   3867	/*
   3868	 * There may be more comparisons we could actually do, but the
   3869	 * spec doesn't require us to catch every case where the calls
   3870	 * don't match (that would require caching the call as well as
   3871	 * the reply), so we don't bother.
   3872	 */
   3873	return true;
   3874}
   3875
   3876__be32
   3877nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   3878		union nfsd4_op_u *u)
   3879{
   3880	struct nfsd4_sequence *seq = &u->sequence;
   3881	struct nfsd4_compoundres *resp = rqstp->rq_resp;
   3882	struct xdr_stream *xdr = resp->xdr;
   3883	struct nfsd4_session *session;
   3884	struct nfs4_client *clp;
   3885	struct nfsd4_slot *slot;
   3886	struct nfsd4_conn *conn;
   3887	__be32 status;
   3888	int buflen;
   3889	struct net *net = SVC_NET(rqstp);
   3890	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   3891
   3892	if (resp->opcnt != 1)
   3893		return nfserr_sequence_pos;
   3894
   3895	/*
   3896	 * Will be either used or freed by nfsd4_sequence_check_conn
   3897	 * below.
   3898	 */
   3899	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
   3900	if (!conn)
   3901		return nfserr_jukebox;
   3902
   3903	spin_lock(&nn->client_lock);
   3904	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
   3905	if (!session)
   3906		goto out_no_session;
   3907	clp = session->se_client;
   3908
   3909	status = nfserr_too_many_ops;
   3910	if (nfsd4_session_too_many_ops(rqstp, session))
   3911		goto out_put_session;
   3912
   3913	status = nfserr_req_too_big;
   3914	if (nfsd4_request_too_big(rqstp, session))
   3915		goto out_put_session;
   3916
   3917	status = nfserr_badslot;
   3918	if (seq->slotid >= session->se_fchannel.maxreqs)
   3919		goto out_put_session;
   3920
   3921	slot = session->se_slots[seq->slotid];
   3922	dprintk("%s: slotid %d\n", __func__, seq->slotid);
   3923
   3924	/* We do not negotiate the number of slots yet, so set the
   3925	 * maxslots to the session maxreqs which is used to encode
   3926	 * sr_highest_slotid and the sr_target_slot id to maxslots */
   3927	seq->maxslots = session->se_fchannel.maxreqs;
   3928
   3929	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
   3930					slot->sl_flags & NFSD4_SLOT_INUSE);
   3931	if (status == nfserr_replay_cache) {
   3932		status = nfserr_seq_misordered;
   3933		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
   3934			goto out_put_session;
   3935		status = nfserr_seq_false_retry;
   3936		if (!replay_matches_cache(rqstp, seq, slot))
   3937			goto out_put_session;
   3938		cstate->slot = slot;
   3939		cstate->session = session;
   3940		cstate->clp = clp;
   3941		/* Return the cached reply status and set cstate->status
   3942		 * for nfsd4_proc_compound processing */
   3943		status = nfsd4_replay_cache_entry(resp, seq);
   3944		cstate->status = nfserr_replay_cache;
   3945		goto out;
   3946	}
   3947	if (status)
   3948		goto out_put_session;
   3949
   3950	status = nfsd4_sequence_check_conn(conn, session);
   3951	conn = NULL;
   3952	if (status)
   3953		goto out_put_session;
   3954
   3955	buflen = (seq->cachethis) ?
   3956			session->se_fchannel.maxresp_cached :
   3957			session->se_fchannel.maxresp_sz;
   3958	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
   3959				    nfserr_rep_too_big;
   3960	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
   3961		goto out_put_session;
   3962	svc_reserve(rqstp, buflen);
   3963
   3964	status = nfs_ok;
   3965	/* Success! bump slot seqid */
   3966	slot->sl_seqid = seq->seqid;
   3967	slot->sl_flags |= NFSD4_SLOT_INUSE;
   3968	if (seq->cachethis)
   3969		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
   3970	else
   3971		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
   3972
   3973	cstate->slot = slot;
   3974	cstate->session = session;
   3975	cstate->clp = clp;
   3976
   3977out:
   3978	switch (clp->cl_cb_state) {
   3979	case NFSD4_CB_DOWN:
   3980		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
   3981		break;
   3982	case NFSD4_CB_FAULT:
   3983		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
   3984		break;
   3985	default:
   3986		seq->status_flags = 0;
   3987	}
   3988	if (!list_empty(&clp->cl_revoked))
   3989		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
   3990out_no_session:
   3991	if (conn)
   3992		free_conn(conn);
   3993	spin_unlock(&nn->client_lock);
   3994	return status;
   3995out_put_session:
   3996	nfsd4_put_session_locked(session);
   3997	goto out_no_session;
   3998}
   3999
   4000void
   4001nfsd4_sequence_done(struct nfsd4_compoundres *resp)
   4002{
   4003	struct nfsd4_compound_state *cs = &resp->cstate;
   4004
   4005	if (nfsd4_has_session(cs)) {
   4006		if (cs->status != nfserr_replay_cache) {
   4007			nfsd4_store_cache_entry(resp);
   4008			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
   4009		}
   4010		/* Drop session reference that was taken in nfsd4_sequence() */
   4011		nfsd4_put_session(cs->session);
   4012	} else if (cs->clp)
   4013		put_client_renew(cs->clp);
   4014}
   4015
   4016__be32
   4017nfsd4_destroy_clientid(struct svc_rqst *rqstp,
   4018		struct nfsd4_compound_state *cstate,
   4019		union nfsd4_op_u *u)
   4020{
   4021	struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
   4022	struct nfs4_client *conf, *unconf;
   4023	struct nfs4_client *clp = NULL;
   4024	__be32 status = 0;
   4025	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   4026
   4027	spin_lock(&nn->client_lock);
   4028	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
   4029	conf = find_confirmed_client(&dc->clientid, true, nn);
   4030	WARN_ON_ONCE(conf && unconf);
   4031
   4032	if (conf) {
   4033		if (client_has_state(conf)) {
   4034			status = nfserr_clientid_busy;
   4035			goto out;
   4036		}
   4037		status = mark_client_expired_locked(conf);
   4038		if (status)
   4039			goto out;
   4040		clp = conf;
   4041	} else if (unconf)
   4042		clp = unconf;
   4043	else {
   4044		status = nfserr_stale_clientid;
   4045		goto out;
   4046	}
   4047	if (!nfsd4_mach_creds_match(clp, rqstp)) {
   4048		clp = NULL;
   4049		status = nfserr_wrong_cred;
   4050		goto out;
   4051	}
   4052	trace_nfsd_clid_destroyed(&clp->cl_clientid);
   4053	unhash_client_locked(clp);
   4054out:
   4055	spin_unlock(&nn->client_lock);
   4056	if (clp)
   4057		expire_client(clp);
   4058	return status;
   4059}
   4060
   4061__be32
   4062nfsd4_reclaim_complete(struct svc_rqst *rqstp,
   4063		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
   4064{
   4065	struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
   4066	struct nfs4_client *clp = cstate->clp;
   4067	__be32 status = 0;
   4068
   4069	if (rc->rca_one_fs) {
   4070		if (!cstate->current_fh.fh_dentry)
   4071			return nfserr_nofilehandle;
   4072		/*
   4073		 * We don't take advantage of the rca_one_fs case.
   4074		 * That's OK, it's optional, we can safely ignore it.
   4075		 */
   4076		return nfs_ok;
   4077	}
   4078
   4079	status = nfserr_complete_already;
   4080	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
   4081		goto out;
   4082
   4083	status = nfserr_stale_clientid;
   4084	if (is_client_expired(clp))
   4085		/*
   4086		 * The following error isn't really legal.
   4087		 * But we only get here if the client just explicitly
   4088		 * destroyed the client.  Surely it no longer cares what
   4089		 * error it gets back on an operation for the dead
   4090		 * client.
   4091		 */
   4092		goto out;
   4093
   4094	status = nfs_ok;
   4095	trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
   4096	nfsd4_client_record_create(clp);
   4097	inc_reclaim_complete(clp);
   4098out:
   4099	return status;
   4100}
   4101
   4102__be32
   4103nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   4104		  union nfsd4_op_u *u)
   4105{
   4106	struct nfsd4_setclientid *setclid = &u->setclientid;
   4107	struct xdr_netobj 	clname = setclid->se_name;
   4108	nfs4_verifier		clverifier = setclid->se_verf;
   4109	struct nfs4_client	*conf, *new;
   4110	struct nfs4_client	*unconf = NULL;
   4111	__be32 			status;
   4112	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   4113
   4114	new = create_client(clname, rqstp, &clverifier);
   4115	if (new == NULL)
   4116		return nfserr_jukebox;
   4117	spin_lock(&nn->client_lock);
   4118	conf = find_confirmed_client_by_name(&clname, nn);
   4119	if (conf && client_has_state(conf)) {
   4120		status = nfserr_clid_inuse;
   4121		if (clp_used_exchangeid(conf))
   4122			goto out;
   4123		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
   4124			trace_nfsd_clid_cred_mismatch(conf, rqstp);
   4125			goto out;
   4126		}
   4127	}
   4128	unconf = find_unconfirmed_client_by_name(&clname, nn);
   4129	if (unconf)
   4130		unhash_client_locked(unconf);
   4131	if (conf) {
   4132		if (same_verf(&conf->cl_verifier, &clverifier)) {
   4133			copy_clid(new, conf);
   4134			gen_confirm(new, nn);
   4135		} else
   4136			trace_nfsd_clid_verf_mismatch(conf, rqstp,
   4137						      &clverifier);
   4138	} else
   4139		trace_nfsd_clid_fresh(new);
   4140	new->cl_minorversion = 0;
   4141	gen_callback(new, setclid, rqstp);
   4142	add_to_unconfirmed(new);
   4143	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
   4144	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
   4145	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
   4146	new = NULL;
   4147	status = nfs_ok;
   4148out:
   4149	spin_unlock(&nn->client_lock);
   4150	if (new)
   4151		free_client(new);
   4152	if (unconf) {
   4153		trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
   4154		expire_client(unconf);
   4155	}
   4156	return status;
   4157}
   4158
   4159__be32
   4160nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
   4161			struct nfsd4_compound_state *cstate,
   4162			union nfsd4_op_u *u)
   4163{
   4164	struct nfsd4_setclientid_confirm *setclientid_confirm =
   4165			&u->setclientid_confirm;
   4166	struct nfs4_client *conf, *unconf;
   4167	struct nfs4_client *old = NULL;
   4168	nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
   4169	clientid_t * clid = &setclientid_confirm->sc_clientid;
   4170	__be32 status;
   4171	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   4172
   4173	if (STALE_CLIENTID(clid, nn))
   4174		return nfserr_stale_clientid;
   4175
   4176	spin_lock(&nn->client_lock);
   4177	conf = find_confirmed_client(clid, false, nn);
   4178	unconf = find_unconfirmed_client(clid, false, nn);
   4179	/*
   4180	 * We try hard to give out unique clientid's, so if we get an
   4181	 * attempt to confirm the same clientid with a different cred,
   4182	 * the client may be buggy; this should never happen.
   4183	 *
   4184	 * Nevertheless, RFC 7530 recommends INUSE for this case:
   4185	 */
   4186	status = nfserr_clid_inuse;
   4187	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
   4188		trace_nfsd_clid_cred_mismatch(unconf, rqstp);
   4189		goto out;
   4190	}
   4191	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
   4192		trace_nfsd_clid_cred_mismatch(conf, rqstp);
   4193		goto out;
   4194	}
   4195	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
   4196		if (conf && same_verf(&confirm, &conf->cl_confirm)) {
   4197			status = nfs_ok;
   4198		} else
   4199			status = nfserr_stale_clientid;
   4200		goto out;
   4201	}
   4202	status = nfs_ok;
   4203	if (conf) {
   4204		old = unconf;
   4205		unhash_client_locked(old);
   4206		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
   4207	} else {
   4208		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
   4209		if (old) {
   4210			status = nfserr_clid_inuse;
   4211			if (client_has_state(old)
   4212					&& !same_creds(&unconf->cl_cred,
   4213							&old->cl_cred)) {
   4214				old = NULL;
   4215				goto out;
   4216			}
   4217			status = mark_client_expired_locked(old);
   4218			if (status) {
   4219				old = NULL;
   4220				goto out;
   4221			}
   4222			trace_nfsd_clid_replaced(&old->cl_clientid);
   4223		}
   4224		move_to_confirmed(unconf);
   4225		conf = unconf;
   4226	}
   4227	get_client_locked(conf);
   4228	spin_unlock(&nn->client_lock);
   4229	if (conf == unconf)
   4230		fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
   4231	nfsd4_probe_callback(conf);
   4232	spin_lock(&nn->client_lock);
   4233	put_client_renew_locked(conf);
   4234out:
   4235	spin_unlock(&nn->client_lock);
   4236	if (old)
   4237		expire_client(old);
   4238	return status;
   4239}
   4240
   4241static struct nfs4_file *nfsd4_alloc_file(void)
   4242{
   4243	return kmem_cache_alloc(file_slab, GFP_KERNEL);
   4244}
   4245
   4246/* OPEN Share state helper functions */
   4247static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
   4248				struct nfs4_file *fp)
   4249{
   4250	lockdep_assert_held(&state_lock);
   4251
   4252	refcount_set(&fp->fi_ref, 1);
   4253	spin_lock_init(&fp->fi_lock);
   4254	INIT_LIST_HEAD(&fp->fi_stateids);
   4255	INIT_LIST_HEAD(&fp->fi_delegations);
   4256	INIT_LIST_HEAD(&fp->fi_clnt_odstate);
   4257	fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
   4258	fp->fi_deleg_file = NULL;
   4259	fp->fi_had_conflict = false;
   4260	fp->fi_share_deny = 0;
   4261	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
   4262	memset(fp->fi_access, 0, sizeof(fp->fi_access));
   4263	fp->fi_aliased = false;
   4264	fp->fi_inode = d_inode(fh->fh_dentry);
   4265#ifdef CONFIG_NFSD_PNFS
   4266	INIT_LIST_HEAD(&fp->fi_lo_states);
   4267	atomic_set(&fp->fi_lo_recalls, 0);
   4268#endif
   4269	hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
   4270}
   4271
   4272void
   4273nfsd4_free_slabs(void)
   4274{
   4275	kmem_cache_destroy(client_slab);
   4276	kmem_cache_destroy(openowner_slab);
   4277	kmem_cache_destroy(lockowner_slab);
   4278	kmem_cache_destroy(file_slab);
   4279	kmem_cache_destroy(stateid_slab);
   4280	kmem_cache_destroy(deleg_slab);
   4281	kmem_cache_destroy(odstate_slab);
   4282}
   4283
   4284int
   4285nfsd4_init_slabs(void)
   4286{
   4287	client_slab = kmem_cache_create("nfsd4_clients",
   4288			sizeof(struct nfs4_client), 0, 0, NULL);
   4289	if (client_slab == NULL)
   4290		goto out;
   4291	openowner_slab = kmem_cache_create("nfsd4_openowners",
   4292			sizeof(struct nfs4_openowner), 0, 0, NULL);
   4293	if (openowner_slab == NULL)
   4294		goto out_free_client_slab;
   4295	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
   4296			sizeof(struct nfs4_lockowner), 0, 0, NULL);
   4297	if (lockowner_slab == NULL)
   4298		goto out_free_openowner_slab;
   4299	file_slab = kmem_cache_create("nfsd4_files",
   4300			sizeof(struct nfs4_file), 0, 0, NULL);
   4301	if (file_slab == NULL)
   4302		goto out_free_lockowner_slab;
   4303	stateid_slab = kmem_cache_create("nfsd4_stateids",
   4304			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
   4305	if (stateid_slab == NULL)
   4306		goto out_free_file_slab;
   4307	deleg_slab = kmem_cache_create("nfsd4_delegations",
   4308			sizeof(struct nfs4_delegation), 0, 0, NULL);
   4309	if (deleg_slab == NULL)
   4310		goto out_free_stateid_slab;
   4311	odstate_slab = kmem_cache_create("nfsd4_odstate",
   4312			sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
   4313	if (odstate_slab == NULL)
   4314		goto out_free_deleg_slab;
   4315	return 0;
   4316
   4317out_free_deleg_slab:
   4318	kmem_cache_destroy(deleg_slab);
   4319out_free_stateid_slab:
   4320	kmem_cache_destroy(stateid_slab);
   4321out_free_file_slab:
   4322	kmem_cache_destroy(file_slab);
   4323out_free_lockowner_slab:
   4324	kmem_cache_destroy(lockowner_slab);
   4325out_free_openowner_slab:
   4326	kmem_cache_destroy(openowner_slab);
   4327out_free_client_slab:
   4328	kmem_cache_destroy(client_slab);
   4329out:
   4330	return -ENOMEM;
   4331}
   4332
   4333static void init_nfs4_replay(struct nfs4_replay *rp)
   4334{
   4335	rp->rp_status = nfserr_serverfault;
   4336	rp->rp_buflen = 0;
   4337	rp->rp_buf = rp->rp_ibuf;
   4338	mutex_init(&rp->rp_mutex);
   4339}
   4340
   4341static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
   4342		struct nfs4_stateowner *so)
   4343{
   4344	if (!nfsd4_has_session(cstate)) {
   4345		mutex_lock(&so->so_replay.rp_mutex);
   4346		cstate->replay_owner = nfs4_get_stateowner(so);
   4347	}
   4348}
   4349
   4350void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
   4351{
   4352	struct nfs4_stateowner *so = cstate->replay_owner;
   4353
   4354	if (so != NULL) {
   4355		cstate->replay_owner = NULL;
   4356		mutex_unlock(&so->so_replay.rp_mutex);
   4357		nfs4_put_stateowner(so);
   4358	}
   4359}
   4360
   4361static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
   4362{
   4363	struct nfs4_stateowner *sop;
   4364
   4365	sop = kmem_cache_alloc(slab, GFP_KERNEL);
   4366	if (!sop)
   4367		return NULL;
   4368
   4369	xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
   4370	if (!sop->so_owner.data) {
   4371		kmem_cache_free(slab, sop);
   4372		return NULL;
   4373	}
   4374
   4375	INIT_LIST_HEAD(&sop->so_stateids);
   4376	sop->so_client = clp;
   4377	init_nfs4_replay(&sop->so_replay);
   4378	atomic_set(&sop->so_count, 1);
   4379	return sop;
   4380}
   4381
   4382static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
   4383{
   4384	lockdep_assert_held(&clp->cl_lock);
   4385
   4386	list_add(&oo->oo_owner.so_strhash,
   4387		 &clp->cl_ownerstr_hashtbl[strhashval]);
   4388	list_add(&oo->oo_perclient, &clp->cl_openowners);
   4389}
   4390
   4391static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
   4392{
   4393	unhash_openowner_locked(openowner(so));
   4394}
   4395
   4396static void nfs4_free_openowner(struct nfs4_stateowner *so)
   4397{
   4398	struct nfs4_openowner *oo = openowner(so);
   4399
   4400	kmem_cache_free(openowner_slab, oo);
   4401}
   4402
   4403static const struct nfs4_stateowner_operations openowner_ops = {
   4404	.so_unhash =	nfs4_unhash_openowner,
   4405	.so_free =	nfs4_free_openowner,
   4406};
   4407
   4408static struct nfs4_ol_stateid *
   4409nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
   4410{
   4411	struct nfs4_ol_stateid *local, *ret = NULL;
   4412	struct nfs4_openowner *oo = open->op_openowner;
   4413
   4414	lockdep_assert_held(&fp->fi_lock);
   4415
   4416	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
   4417		/* ignore lock owners */
   4418		if (local->st_stateowner->so_is_open_owner == 0)
   4419			continue;
   4420		if (local->st_stateowner != &oo->oo_owner)
   4421			continue;
   4422		if (local->st_stid.sc_type == NFS4_OPEN_STID) {
   4423			ret = local;
   4424			refcount_inc(&ret->st_stid.sc_count);
   4425			break;
   4426		}
   4427	}
   4428	return ret;
   4429}
   4430
   4431static __be32
   4432nfsd4_verify_open_stid(struct nfs4_stid *s)
   4433{
   4434	__be32 ret = nfs_ok;
   4435
   4436	switch (s->sc_type) {
   4437	default:
   4438		break;
   4439	case 0:
   4440	case NFS4_CLOSED_STID:
   4441	case NFS4_CLOSED_DELEG_STID:
   4442		ret = nfserr_bad_stateid;
   4443		break;
   4444	case NFS4_REVOKED_DELEG_STID:
   4445		ret = nfserr_deleg_revoked;
   4446	}
   4447	return ret;
   4448}
   4449
   4450/* Lock the stateid st_mutex, and deal with races with CLOSE */
   4451static __be32
   4452nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
   4453{
   4454	__be32 ret;
   4455
   4456	mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
   4457	ret = nfsd4_verify_open_stid(&stp->st_stid);
   4458	if (ret != nfs_ok)
   4459		mutex_unlock(&stp->st_mutex);
   4460	return ret;
   4461}
   4462
   4463static struct nfs4_ol_stateid *
   4464nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
   4465{
   4466	struct nfs4_ol_stateid *stp;
   4467	for (;;) {
   4468		spin_lock(&fp->fi_lock);
   4469		stp = nfsd4_find_existing_open(fp, open);
   4470		spin_unlock(&fp->fi_lock);
   4471		if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
   4472			break;
   4473		nfs4_put_stid(&stp->st_stid);
   4474	}
   4475	return stp;
   4476}
   4477
   4478static struct nfs4_openowner *
   4479alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
   4480			   struct nfsd4_compound_state *cstate)
   4481{
   4482	struct nfs4_client *clp = cstate->clp;
   4483	struct nfs4_openowner *oo, *ret;
   4484
   4485	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
   4486	if (!oo)
   4487		return NULL;
   4488	oo->oo_owner.so_ops = &openowner_ops;
   4489	oo->oo_owner.so_is_open_owner = 1;
   4490	oo->oo_owner.so_seqid = open->op_seqid;
   4491	oo->oo_flags = 0;
   4492	if (nfsd4_has_session(cstate))
   4493		oo->oo_flags |= NFS4_OO_CONFIRMED;
   4494	oo->oo_time = 0;
   4495	oo->oo_last_closed_stid = NULL;
   4496	INIT_LIST_HEAD(&oo->oo_close_lru);
   4497	spin_lock(&clp->cl_lock);
   4498	ret = find_openstateowner_str_locked(strhashval, open, clp);
   4499	if (ret == NULL) {
   4500		hash_openowner(oo, clp, strhashval);
   4501		ret = oo;
   4502	} else
   4503		nfs4_free_stateowner(&oo->oo_owner);
   4504
   4505	spin_unlock(&clp->cl_lock);
   4506	return ret;
   4507}
   4508
   4509static struct nfs4_ol_stateid *
   4510init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
   4511{
   4512
   4513	struct nfs4_openowner *oo = open->op_openowner;
   4514	struct nfs4_ol_stateid *retstp = NULL;
   4515	struct nfs4_ol_stateid *stp;
   4516
   4517	stp = open->op_stp;
   4518	/* We are moving these outside of the spinlocks to avoid the warnings */
   4519	mutex_init(&stp->st_mutex);
   4520	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
   4521
   4522retry:
   4523	spin_lock(&oo->oo_owner.so_client->cl_lock);
   4524	spin_lock(&fp->fi_lock);
   4525
   4526	retstp = nfsd4_find_existing_open(fp, open);
   4527	if (retstp)
   4528		goto out_unlock;
   4529
   4530	open->op_stp = NULL;
   4531	refcount_inc(&stp->st_stid.sc_count);
   4532	stp->st_stid.sc_type = NFS4_OPEN_STID;
   4533	INIT_LIST_HEAD(&stp->st_locks);
   4534	stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
   4535	get_nfs4_file(fp);
   4536	stp->st_stid.sc_file = fp;
   4537	stp->st_access_bmap = 0;
   4538	stp->st_deny_bmap = 0;
   4539	stp->st_openstp = NULL;
   4540	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
   4541	list_add(&stp->st_perfile, &fp->fi_stateids);
   4542
   4543out_unlock:
   4544	spin_unlock(&fp->fi_lock);
   4545	spin_unlock(&oo->oo_owner.so_client->cl_lock);
   4546	if (retstp) {
   4547		/* Handle races with CLOSE */
   4548		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
   4549			nfs4_put_stid(&retstp->st_stid);
   4550			goto retry;
   4551		}
   4552		/* To keep mutex tracking happy */
   4553		mutex_unlock(&stp->st_mutex);
   4554		stp = retstp;
   4555	}
   4556	return stp;
   4557}
   4558
   4559/*
   4560 * In the 4.0 case we need to keep the owners around a little while to handle
   4561 * CLOSE replay. We still do need to release any file access that is held by
   4562 * them before returning however.
   4563 */
   4564static void
   4565move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
   4566{
   4567	struct nfs4_ol_stateid *last;
   4568	struct nfs4_openowner *oo = openowner(s->st_stateowner);
   4569	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
   4570						nfsd_net_id);
   4571
   4572	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
   4573
   4574	/*
   4575	 * We know that we hold one reference via nfsd4_close, and another
   4576	 * "persistent" reference for the client. If the refcount is higher
   4577	 * than 2, then there are still calls in progress that are using this
   4578	 * stateid. We can't put the sc_file reference until they are finished.
   4579	 * Wait for the refcount to drop to 2. Since it has been unhashed,
   4580	 * there should be no danger of the refcount going back up again at
   4581	 * this point.
   4582	 */
   4583	wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
   4584
   4585	release_all_access(s);
   4586	if (s->st_stid.sc_file) {
   4587		put_nfs4_file(s->st_stid.sc_file);
   4588		s->st_stid.sc_file = NULL;
   4589	}
   4590
   4591	spin_lock(&nn->client_lock);
   4592	last = oo->oo_last_closed_stid;
   4593	oo->oo_last_closed_stid = s;
   4594	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
   4595	oo->oo_time = ktime_get_boottime_seconds();
   4596	spin_unlock(&nn->client_lock);
   4597	if (last)
   4598		nfs4_put_stid(&last->st_stid);
   4599}
   4600
   4601/* search file_hashtbl[] for file */
   4602static struct nfs4_file *
   4603find_file_locked(struct svc_fh *fh, unsigned int hashval)
   4604{
   4605	struct nfs4_file *fp;
   4606
   4607	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
   4608				lockdep_is_held(&state_lock)) {
   4609		if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
   4610			if (refcount_inc_not_zero(&fp->fi_ref))
   4611				return fp;
   4612		}
   4613	}
   4614	return NULL;
   4615}
   4616
   4617static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh,
   4618				     unsigned int hashval)
   4619{
   4620	struct nfs4_file *fp;
   4621	struct nfs4_file *ret = NULL;
   4622	bool alias_found = false;
   4623
   4624	spin_lock(&state_lock);
   4625	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
   4626				 lockdep_is_held(&state_lock)) {
   4627		if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
   4628			if (refcount_inc_not_zero(&fp->fi_ref))
   4629				ret = fp;
   4630		} else if (d_inode(fh->fh_dentry) == fp->fi_inode)
   4631			fp->fi_aliased = alias_found = true;
   4632	}
   4633	if (likely(ret == NULL)) {
   4634		nfsd4_init_file(fh, hashval, new);
   4635		new->fi_aliased = alias_found;
   4636		ret = new;
   4637	}
   4638	spin_unlock(&state_lock);
   4639	return ret;
   4640}
   4641
   4642static struct nfs4_file * find_file(struct svc_fh *fh)
   4643{
   4644	struct nfs4_file *fp;
   4645	unsigned int hashval = file_hashval(fh);
   4646
   4647	rcu_read_lock();
   4648	fp = find_file_locked(fh, hashval);
   4649	rcu_read_unlock();
   4650	return fp;
   4651}
   4652
   4653static struct nfs4_file *
   4654find_or_add_file(struct nfs4_file *new, struct svc_fh *fh)
   4655{
   4656	struct nfs4_file *fp;
   4657	unsigned int hashval = file_hashval(fh);
   4658
   4659	rcu_read_lock();
   4660	fp = find_file_locked(fh, hashval);
   4661	rcu_read_unlock();
   4662	if (fp)
   4663		return fp;
   4664
   4665	return insert_file(new, fh, hashval);
   4666}
   4667
   4668/*
   4669 * Called to check deny when READ with all zero stateid or
   4670 * WRITE with all zero or all one stateid
   4671 */
   4672static __be32
   4673nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
   4674{
   4675	struct nfs4_file *fp;
   4676	__be32 ret = nfs_ok;
   4677
   4678	fp = find_file(current_fh);
   4679	if (!fp)
   4680		return ret;
   4681	/* Check for conflicting share reservations */
   4682	spin_lock(&fp->fi_lock);
   4683	if (fp->fi_share_deny & deny_type)
   4684		ret = nfserr_locked;
   4685	spin_unlock(&fp->fi_lock);
   4686	put_nfs4_file(fp);
   4687	return ret;
   4688}
   4689
   4690static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
   4691{
   4692	struct nfs4_delegation *dp = cb_to_delegation(cb);
   4693	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
   4694					  nfsd_net_id);
   4695
   4696	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
   4697
   4698	/*
   4699	 * We can't do this in nfsd_break_deleg_cb because it is
   4700	 * already holding inode->i_lock.
   4701	 *
   4702	 * If the dl_time != 0, then we know that it has already been
   4703	 * queued for a lease break. Don't queue it again.
   4704	 */
   4705	spin_lock(&state_lock);
   4706	if (delegation_hashed(dp) && dp->dl_time == 0) {
   4707		dp->dl_time = ktime_get_boottime_seconds();
   4708		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
   4709	}
   4710	spin_unlock(&state_lock);
   4711}
   4712
   4713static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
   4714		struct rpc_task *task)
   4715{
   4716	struct nfs4_delegation *dp = cb_to_delegation(cb);
   4717
   4718	if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
   4719	    dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
   4720	        return 1;
   4721
   4722	switch (task->tk_status) {
   4723	case 0:
   4724		return 1;
   4725	case -NFS4ERR_DELAY:
   4726		rpc_delay(task, 2 * HZ);
   4727		return 0;
   4728	case -EBADHANDLE:
   4729	case -NFS4ERR_BAD_STATEID:
   4730		/*
   4731		 * Race: client probably got cb_recall before open reply
   4732		 * granting delegation.
   4733		 */
   4734		if (dp->dl_retries--) {
   4735			rpc_delay(task, 2 * HZ);
   4736			return 0;
   4737		}
   4738		fallthrough;
   4739	default:
   4740		return 1;
   4741	}
   4742}
   4743
   4744static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
   4745{
   4746	struct nfs4_delegation *dp = cb_to_delegation(cb);
   4747
   4748	nfs4_put_stid(&dp->dl_stid);
   4749}
   4750
   4751static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
   4752	.prepare	= nfsd4_cb_recall_prepare,
   4753	.done		= nfsd4_cb_recall_done,
   4754	.release	= nfsd4_cb_recall_release,
   4755};
   4756
   4757static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
   4758{
   4759	/*
   4760	 * We're assuming the state code never drops its reference
   4761	 * without first removing the lease.  Since we're in this lease
   4762	 * callback (and since the lease code is serialized by the
   4763	 * i_lock) we know the server hasn't removed the lease yet, and
   4764	 * we know it's safe to take a reference.
   4765	 */
   4766	refcount_inc(&dp->dl_stid.sc_count);
   4767	nfsd4_run_cb(&dp->dl_recall);
   4768}
   4769
   4770/* Called from break_lease() with i_lock held. */
   4771static bool
   4772nfsd_break_deleg_cb(struct file_lock *fl)
   4773{
   4774	bool ret = false;
   4775	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
   4776	struct nfs4_file *fp = dp->dl_stid.sc_file;
   4777	struct nfs4_client *clp = dp->dl_stid.sc_client;
   4778	struct nfsd_net *nn;
   4779
   4780	trace_nfsd_cb_recall(&dp->dl_stid);
   4781
   4782	dp->dl_recalled = true;
   4783	atomic_inc(&clp->cl_delegs_in_recall);
   4784	if (try_to_expire_client(clp)) {
   4785		nn = net_generic(clp->net, nfsd_net_id);
   4786		mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
   4787	}
   4788
   4789	/*
   4790	 * We don't want the locks code to timeout the lease for us;
   4791	 * we'll remove it ourself if a delegation isn't returned
   4792	 * in time:
   4793	 */
   4794	fl->fl_break_time = 0;
   4795
   4796	spin_lock(&fp->fi_lock);
   4797	fp->fi_had_conflict = true;
   4798	nfsd_break_one_deleg(dp);
   4799	spin_unlock(&fp->fi_lock);
   4800	return ret;
   4801}
   4802
   4803/**
   4804 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
   4805 * @fl: Lock state to check
   4806 *
   4807 * Return values:
   4808 *   %true: Lease conflict was resolved
   4809 *   %false: Lease conflict was not resolved.
   4810 */
   4811static bool nfsd_breaker_owns_lease(struct file_lock *fl)
   4812{
   4813	struct nfs4_delegation *dl = fl->fl_owner;
   4814	struct svc_rqst *rqst;
   4815	struct nfs4_client *clp;
   4816
   4817	if (!i_am_nfsd())
   4818		return false;
   4819	rqst = kthread_data(current);
   4820	/* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
   4821	if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
   4822		return false;
   4823	clp = *(rqst->rq_lease_breaker);
   4824	return dl->dl_stid.sc_client == clp;
   4825}
   4826
   4827static int
   4828nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
   4829		     struct list_head *dispose)
   4830{
   4831	struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner;
   4832	struct nfs4_client *clp = dp->dl_stid.sc_client;
   4833
   4834	if (arg & F_UNLCK) {
   4835		if (dp->dl_recalled)
   4836			atomic_dec(&clp->cl_delegs_in_recall);
   4837		return lease_modify(onlist, arg, dispose);
   4838	} else
   4839		return -EAGAIN;
   4840}
   4841
   4842static const struct lock_manager_operations nfsd_lease_mng_ops = {
   4843	.lm_breaker_owns_lease = nfsd_breaker_owns_lease,
   4844	.lm_break = nfsd_break_deleg_cb,
   4845	.lm_change = nfsd_change_deleg_cb,
   4846};
   4847
   4848static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
   4849{
   4850	if (nfsd4_has_session(cstate))
   4851		return nfs_ok;
   4852	if (seqid == so->so_seqid - 1)
   4853		return nfserr_replay_me;
   4854	if (seqid == so->so_seqid)
   4855		return nfs_ok;
   4856	return nfserr_bad_seqid;
   4857}
   4858
   4859static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
   4860						struct nfsd_net *nn)
   4861{
   4862	struct nfs4_client *found;
   4863
   4864	spin_lock(&nn->client_lock);
   4865	found = find_confirmed_client(clid, sessions, nn);
   4866	if (found)
   4867		atomic_inc(&found->cl_rpc_users);
   4868	spin_unlock(&nn->client_lock);
   4869	return found;
   4870}
   4871
   4872static __be32 set_client(clientid_t *clid,
   4873		struct nfsd4_compound_state *cstate,
   4874		struct nfsd_net *nn)
   4875{
   4876	if (cstate->clp) {
   4877		if (!same_clid(&cstate->clp->cl_clientid, clid))
   4878			return nfserr_stale_clientid;
   4879		return nfs_ok;
   4880	}
   4881	if (STALE_CLIENTID(clid, nn))
   4882		return nfserr_stale_clientid;
   4883	/*
   4884	 * We're in the 4.0 case (otherwise the SEQUENCE op would have
   4885	 * set cstate->clp), so session = false:
   4886	 */
   4887	cstate->clp = lookup_clientid(clid, false, nn);
   4888	if (!cstate->clp)
   4889		return nfserr_expired;
   4890	return nfs_ok;
   4891}
   4892
   4893__be32
   4894nfsd4_process_open1(struct nfsd4_compound_state *cstate,
   4895		    struct nfsd4_open *open, struct nfsd_net *nn)
   4896{
   4897	clientid_t *clientid = &open->op_clientid;
   4898	struct nfs4_client *clp = NULL;
   4899	unsigned int strhashval;
   4900	struct nfs4_openowner *oo = NULL;
   4901	__be32 status;
   4902
   4903	/*
   4904	 * In case we need it later, after we've already created the
   4905	 * file and don't want to risk a further failure:
   4906	 */
   4907	open->op_file = nfsd4_alloc_file();
   4908	if (open->op_file == NULL)
   4909		return nfserr_jukebox;
   4910
   4911	status = set_client(clientid, cstate, nn);
   4912	if (status)
   4913		return status;
   4914	clp = cstate->clp;
   4915
   4916	strhashval = ownerstr_hashval(&open->op_owner);
   4917	oo = find_openstateowner_str(strhashval, open, clp);
   4918	open->op_openowner = oo;
   4919	if (!oo) {
   4920		goto new_owner;
   4921	}
   4922	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
   4923		/* Replace unconfirmed owners without checking for replay. */
   4924		release_openowner(oo);
   4925		open->op_openowner = NULL;
   4926		goto new_owner;
   4927	}
   4928	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
   4929	if (status)
   4930		return status;
   4931	goto alloc_stateid;
   4932new_owner:
   4933	oo = alloc_init_open_stateowner(strhashval, open, cstate);
   4934	if (oo == NULL)
   4935		return nfserr_jukebox;
   4936	open->op_openowner = oo;
   4937alloc_stateid:
   4938	open->op_stp = nfs4_alloc_open_stateid(clp);
   4939	if (!open->op_stp)
   4940		return nfserr_jukebox;
   4941
   4942	if (nfsd4_has_session(cstate) &&
   4943	    (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
   4944		open->op_odstate = alloc_clnt_odstate(clp);
   4945		if (!open->op_odstate)
   4946			return nfserr_jukebox;
   4947	}
   4948
   4949	return nfs_ok;
   4950}
   4951
   4952static inline __be32
   4953nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
   4954{
   4955	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
   4956		return nfserr_openmode;
   4957	else
   4958		return nfs_ok;
   4959}
   4960
   4961static int share_access_to_flags(u32 share_access)
   4962{
   4963	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
   4964}
   4965
   4966static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
   4967{
   4968	struct nfs4_stid *ret;
   4969
   4970	ret = find_stateid_by_type(cl, s,
   4971				NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
   4972	if (!ret)
   4973		return NULL;
   4974	return delegstateid(ret);
   4975}
   4976
   4977static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
   4978{
   4979	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
   4980	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
   4981}
   4982
   4983static __be32
   4984nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
   4985		struct nfs4_delegation **dp)
   4986{
   4987	int flags;
   4988	__be32 status = nfserr_bad_stateid;
   4989	struct nfs4_delegation *deleg;
   4990
   4991	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
   4992	if (deleg == NULL)
   4993		goto out;
   4994	if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
   4995		nfs4_put_stid(&deleg->dl_stid);
   4996		if (cl->cl_minorversion)
   4997			status = nfserr_deleg_revoked;
   4998		goto out;
   4999	}
   5000	flags = share_access_to_flags(open->op_share_access);
   5001	status = nfs4_check_delegmode(deleg, flags);
   5002	if (status) {
   5003		nfs4_put_stid(&deleg->dl_stid);
   5004		goto out;
   5005	}
   5006	*dp = deleg;
   5007out:
   5008	if (!nfsd4_is_deleg_cur(open))
   5009		return nfs_ok;
   5010	if (status)
   5011		return status;
   5012	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
   5013	return nfs_ok;
   5014}
   5015
   5016static inline int nfs4_access_to_access(u32 nfs4_access)
   5017{
   5018	int flags = 0;
   5019
   5020	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
   5021		flags |= NFSD_MAY_READ;
   5022	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
   5023		flags |= NFSD_MAY_WRITE;
   5024	return flags;
   5025}
   5026
   5027static inline __be32
   5028nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
   5029		struct nfsd4_open *open)
   5030{
   5031	struct iattr iattr = {
   5032		.ia_valid = ATTR_SIZE,
   5033		.ia_size = 0,
   5034	};
   5035	if (!open->op_truncate)
   5036		return 0;
   5037	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
   5038		return nfserr_inval;
   5039	return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
   5040}
   5041
   5042static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
   5043		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
   5044		struct nfsd4_open *open, bool new_stp)
   5045{
   5046	struct nfsd_file *nf = NULL;
   5047	__be32 status;
   5048	int oflag = nfs4_access_to_omode(open->op_share_access);
   5049	int access = nfs4_access_to_access(open->op_share_access);
   5050	unsigned char old_access_bmap, old_deny_bmap;
   5051
   5052	spin_lock(&fp->fi_lock);
   5053
   5054	/*
   5055	 * Are we trying to set a deny mode that would conflict with
   5056	 * current access?
   5057	 */
   5058	status = nfs4_file_check_deny(fp, open->op_share_deny);
   5059	if (status != nfs_ok) {
   5060		if (status != nfserr_share_denied) {
   5061			spin_unlock(&fp->fi_lock);
   5062			goto out;
   5063		}
   5064		if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
   5065				stp, open->op_share_deny, false))
   5066			status = nfserr_jukebox;
   5067		spin_unlock(&fp->fi_lock);
   5068		goto out;
   5069	}
   5070
   5071	/* set access to the file */
   5072	status = nfs4_file_get_access(fp, open->op_share_access);
   5073	if (status != nfs_ok) {
   5074		if (status != nfserr_share_denied) {
   5075			spin_unlock(&fp->fi_lock);
   5076			goto out;
   5077		}
   5078		if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
   5079				stp, open->op_share_access, true))
   5080			status = nfserr_jukebox;
   5081		spin_unlock(&fp->fi_lock);
   5082		goto out;
   5083	}
   5084
   5085	/* Set access bits in stateid */
   5086	old_access_bmap = stp->st_access_bmap;
   5087	set_access(open->op_share_access, stp);
   5088
   5089	/* Set new deny mask */
   5090	old_deny_bmap = stp->st_deny_bmap;
   5091	set_deny(open->op_share_deny, stp);
   5092	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
   5093
   5094	if (!fp->fi_fds[oflag]) {
   5095		spin_unlock(&fp->fi_lock);
   5096
   5097		if (!open->op_filp) {
   5098			status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
   5099			if (status != nfs_ok)
   5100				goto out_put_access;
   5101		} else {
   5102			status = nfsd_file_create(rqstp, cur_fh, access, &nf);
   5103			if (status != nfs_ok)
   5104				goto out_put_access;
   5105			nf->nf_file = open->op_filp;
   5106			open->op_filp = NULL;
   5107		}
   5108
   5109		spin_lock(&fp->fi_lock);
   5110		if (!fp->fi_fds[oflag]) {
   5111			fp->fi_fds[oflag] = nf;
   5112			nf = NULL;
   5113		}
   5114	}
   5115	spin_unlock(&fp->fi_lock);
   5116	if (nf)
   5117		nfsd_file_put(nf);
   5118
   5119	status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
   5120								access));
   5121	if (status)
   5122		goto out_put_access;
   5123
   5124	status = nfsd4_truncate(rqstp, cur_fh, open);
   5125	if (status)
   5126		goto out_put_access;
   5127out:
   5128	return status;
   5129out_put_access:
   5130	stp->st_access_bmap = old_access_bmap;
   5131	nfs4_file_put_access(fp, open->op_share_access);
   5132	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
   5133	goto out;
   5134}
   5135
   5136static __be32
   5137nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
   5138		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
   5139		struct nfsd4_open *open)
   5140{
   5141	__be32 status;
   5142	unsigned char old_deny_bmap = stp->st_deny_bmap;
   5143
   5144	if (!test_access(open->op_share_access, stp))
   5145		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
   5146
   5147	/* test and set deny mode */
   5148	spin_lock(&fp->fi_lock);
   5149	status = nfs4_file_check_deny(fp, open->op_share_deny);
   5150	if (status == nfs_ok) {
   5151		if (status != nfserr_share_denied) {
   5152			set_deny(open->op_share_deny, stp);
   5153			fp->fi_share_deny |=
   5154				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
   5155		} else {
   5156			if (nfs4_resolve_deny_conflicts_locked(fp, false,
   5157					stp, open->op_share_deny, false))
   5158				status = nfserr_jukebox;
   5159		}
   5160	}
   5161	spin_unlock(&fp->fi_lock);
   5162
   5163	if (status != nfs_ok)
   5164		return status;
   5165
   5166	status = nfsd4_truncate(rqstp, cur_fh, open);
   5167	if (status != nfs_ok)
   5168		reset_union_bmap_deny(old_deny_bmap, stp);
   5169	return status;
   5170}
   5171
   5172/* Should we give out recallable state?: */
   5173static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
   5174{
   5175	if (clp->cl_cb_state == NFSD4_CB_UP)
   5176		return true;
   5177	/*
   5178	 * In the sessions case, since we don't have to establish a
   5179	 * separate connection for callbacks, we assume it's OK
   5180	 * until we hear otherwise:
   5181	 */
   5182	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
   5183}
   5184
   5185static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
   5186						int flag)
   5187{
   5188	struct file_lock *fl;
   5189
   5190	fl = locks_alloc_lock();
   5191	if (!fl)
   5192		return NULL;
   5193	fl->fl_lmops = &nfsd_lease_mng_ops;
   5194	fl->fl_flags = FL_DELEG;
   5195	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
   5196	fl->fl_end = OFFSET_MAX;
   5197	fl->fl_owner = (fl_owner_t)dp;
   5198	fl->fl_pid = current->tgid;
   5199	fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
   5200	return fl;
   5201}
   5202
   5203static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
   5204					 struct nfs4_file *fp)
   5205{
   5206	struct nfs4_ol_stateid *st;
   5207	struct file *f = fp->fi_deleg_file->nf_file;
   5208	struct inode *ino = locks_inode(f);
   5209	int writes;
   5210
   5211	writes = atomic_read(&ino->i_writecount);
   5212	if (!writes)
   5213		return 0;
   5214	/*
   5215	 * There could be multiple filehandles (hence multiple
   5216	 * nfs4_files) referencing this file, but that's not too
   5217	 * common; let's just give up in that case rather than
   5218	 * trying to go look up all the clients using that other
   5219	 * nfs4_file as well:
   5220	 */
   5221	if (fp->fi_aliased)
   5222		return -EAGAIN;
   5223	/*
   5224	 * If there's a close in progress, make sure that we see it
   5225	 * clear any fi_fds[] entries before we see it decrement
   5226	 * i_writecount:
   5227	 */
   5228	smp_mb__after_atomic();
   5229
   5230	if (fp->fi_fds[O_WRONLY])
   5231		writes--;
   5232	if (fp->fi_fds[O_RDWR])
   5233		writes--;
   5234	if (writes > 0)
   5235		return -EAGAIN; /* There may be non-NFSv4 writers */
   5236	/*
   5237	 * It's possible there are non-NFSv4 write opens in progress,
   5238	 * but if they haven't incremented i_writecount yet then they
   5239	 * also haven't called break lease yet; so, they'll break this
   5240	 * lease soon enough.  So, all that's left to check for is NFSv4
   5241	 * opens:
   5242	 */
   5243	spin_lock(&fp->fi_lock);
   5244	list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
   5245		if (st->st_openstp == NULL /* it's an open */ &&
   5246		    access_permit_write(st) &&
   5247		    st->st_stid.sc_client != clp) {
   5248			spin_unlock(&fp->fi_lock);
   5249			return -EAGAIN;
   5250		}
   5251	}
   5252	spin_unlock(&fp->fi_lock);
   5253	/*
   5254	 * There's a small chance that we could be racing with another
   5255	 * NFSv4 open.  However, any open that hasn't added itself to
   5256	 * the fi_stateids list also hasn't called break_lease yet; so,
   5257	 * they'll break this lease soon enough.
   5258	 */
   5259	return 0;
   5260}
   5261
   5262static struct nfs4_delegation *
   5263nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
   5264		    struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
   5265{
   5266	int status = 0;
   5267	struct nfs4_delegation *dp;
   5268	struct nfsd_file *nf;
   5269	struct file_lock *fl;
   5270
   5271	/*
   5272	 * The fi_had_conflict and nfs_get_existing_delegation checks
   5273	 * here are just optimizations; we'll need to recheck them at
   5274	 * the end:
   5275	 */
   5276	if (fp->fi_had_conflict)
   5277		return ERR_PTR(-EAGAIN);
   5278
   5279	nf = find_readable_file(fp);
   5280	if (!nf) {
   5281		/*
   5282		 * We probably could attempt another open and get a read
   5283		 * delegation, but for now, don't bother until the
   5284		 * client actually sends us one.
   5285		 */
   5286		return ERR_PTR(-EAGAIN);
   5287	}
   5288	spin_lock(&state_lock);
   5289	spin_lock(&fp->fi_lock);
   5290	if (nfs4_delegation_exists(clp, fp))
   5291		status = -EAGAIN;
   5292	else if (!fp->fi_deleg_file) {
   5293		fp->fi_deleg_file = nf;
   5294		/* increment early to prevent fi_deleg_file from being
   5295		 * cleared */
   5296		fp->fi_delegees = 1;
   5297		nf = NULL;
   5298	} else
   5299		fp->fi_delegees++;
   5300	spin_unlock(&fp->fi_lock);
   5301	spin_unlock(&state_lock);
   5302	if (nf)
   5303		nfsd_file_put(nf);
   5304	if (status)
   5305		return ERR_PTR(status);
   5306
   5307	status = -ENOMEM;
   5308	dp = alloc_init_deleg(clp, fp, fh, odstate);
   5309	if (!dp)
   5310		goto out_delegees;
   5311
   5312	fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
   5313	if (!fl)
   5314		goto out_clnt_odstate;
   5315
   5316	status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
   5317	if (fl)
   5318		locks_free_lock(fl);
   5319	if (status)
   5320		goto out_clnt_odstate;
   5321	status = nfsd4_check_conflicting_opens(clp, fp);
   5322	if (status)
   5323		goto out_unlock;
   5324
   5325	spin_lock(&state_lock);
   5326	spin_lock(&fp->fi_lock);
   5327	if (fp->fi_had_conflict)
   5328		status = -EAGAIN;
   5329	else
   5330		status = hash_delegation_locked(dp, fp);
   5331	spin_unlock(&fp->fi_lock);
   5332	spin_unlock(&state_lock);
   5333
   5334	if (status)
   5335		goto out_unlock;
   5336
   5337	return dp;
   5338out_unlock:
   5339	vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
   5340out_clnt_odstate:
   5341	put_clnt_odstate(dp->dl_clnt_odstate);
   5342	nfs4_put_stid(&dp->dl_stid);
   5343out_delegees:
   5344	put_deleg_file(fp);
   5345	return ERR_PTR(status);
   5346}
   5347
   5348static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
   5349{
   5350	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
   5351	if (status == -EAGAIN)
   5352		open->op_why_no_deleg = WND4_CONTENTION;
   5353	else {
   5354		open->op_why_no_deleg = WND4_RESOURCE;
   5355		switch (open->op_deleg_want) {
   5356		case NFS4_SHARE_WANT_READ_DELEG:
   5357		case NFS4_SHARE_WANT_WRITE_DELEG:
   5358		case NFS4_SHARE_WANT_ANY_DELEG:
   5359			break;
   5360		case NFS4_SHARE_WANT_CANCEL:
   5361			open->op_why_no_deleg = WND4_CANCELLED;
   5362			break;
   5363		case NFS4_SHARE_WANT_NO_DELEG:
   5364			WARN_ON_ONCE(1);
   5365		}
   5366	}
   5367}
   5368
   5369/*
   5370 * Attempt to hand out a delegation.
   5371 *
   5372 * Note we don't support write delegations, and won't until the vfs has
   5373 * proper support for them.
   5374 */
   5375static void
   5376nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
   5377			struct nfs4_ol_stateid *stp)
   5378{
   5379	struct nfs4_delegation *dp;
   5380	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
   5381	struct nfs4_client *clp = stp->st_stid.sc_client;
   5382	int cb_up;
   5383	int status = 0;
   5384
   5385	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
   5386	open->op_recall = 0;
   5387	switch (open->op_claim_type) {
   5388		case NFS4_OPEN_CLAIM_PREVIOUS:
   5389			if (!cb_up)
   5390				open->op_recall = 1;
   5391			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
   5392				goto out_no_deleg;
   5393			break;
   5394		case NFS4_OPEN_CLAIM_NULL:
   5395		case NFS4_OPEN_CLAIM_FH:
   5396			/*
   5397			 * Let's not give out any delegations till everyone's
   5398			 * had the chance to reclaim theirs, *and* until
   5399			 * NLM locks have all been reclaimed:
   5400			 */
   5401			if (locks_in_grace(clp->net))
   5402				goto out_no_deleg;
   5403			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
   5404				goto out_no_deleg;
   5405			break;
   5406		default:
   5407			goto out_no_deleg;
   5408	}
   5409	dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
   5410	if (IS_ERR(dp))
   5411		goto out_no_deleg;
   5412
   5413	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
   5414
   5415	trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
   5416	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
   5417	nfs4_put_stid(&dp->dl_stid);
   5418	return;
   5419out_no_deleg:
   5420	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
   5421	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
   5422	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
   5423		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
   5424		open->op_recall = 1;
   5425	}
   5426
   5427	/* 4.1 client asking for a delegation? */
   5428	if (open->op_deleg_want)
   5429		nfsd4_open_deleg_none_ext(open, status);
   5430	return;
   5431}
   5432
   5433static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
   5434					struct nfs4_delegation *dp)
   5435{
   5436	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
   5437	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
   5438		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
   5439		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
   5440	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
   5441		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
   5442		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
   5443		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
   5444	}
   5445	/* Otherwise the client must be confused wanting a delegation
   5446	 * it already has, therefore we don't return
   5447	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
   5448	 */
   5449}
   5450
   5451/**
   5452 * nfsd4_process_open2 - finish open processing
   5453 * @rqstp: the RPC transaction being executed
   5454 * @current_fh: NFSv4 COMPOUND's current filehandle
   5455 * @open: OPEN arguments
   5456 *
   5457 * If successful, (1) truncate the file if open->op_truncate was
   5458 * set, (2) set open->op_stateid, (3) set open->op_delegation.
   5459 *
   5460 * Returns %nfs_ok on success; otherwise an nfs4stat value in
   5461 * network byte order is returned.
   5462 */
   5463__be32
   5464nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
   5465{
   5466	struct nfsd4_compoundres *resp = rqstp->rq_resp;
   5467	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
   5468	struct nfs4_file *fp = NULL;
   5469	struct nfs4_ol_stateid *stp = NULL;
   5470	struct nfs4_delegation *dp = NULL;
   5471	__be32 status;
   5472	bool new_stp = false;
   5473
   5474	/*
   5475	 * Lookup file; if found, lookup stateid and check open request,
   5476	 * and check for delegations in the process of being recalled.
   5477	 * If not found, create the nfs4_file struct
   5478	 */
   5479	fp = find_or_add_file(open->op_file, current_fh);
   5480	if (fp != open->op_file) {
   5481		status = nfs4_check_deleg(cl, open, &dp);
   5482		if (status)
   5483			goto out;
   5484		stp = nfsd4_find_and_lock_existing_open(fp, open);
   5485	} else {
   5486		open->op_file = NULL;
   5487		status = nfserr_bad_stateid;
   5488		if (nfsd4_is_deleg_cur(open))
   5489			goto out;
   5490	}
   5491
   5492	if (!stp) {
   5493		stp = init_open_stateid(fp, open);
   5494		if (!open->op_stp)
   5495			new_stp = true;
   5496	}
   5497
   5498	/*
   5499	 * OPEN the file, or upgrade an existing OPEN.
   5500	 * If truncate fails, the OPEN fails.
   5501	 *
   5502	 * stp is already locked.
   5503	 */
   5504	if (!new_stp) {
   5505		/* Stateid was found, this is an OPEN upgrade */
   5506		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
   5507		if (status) {
   5508			mutex_unlock(&stp->st_mutex);
   5509			goto out;
   5510		}
   5511	} else {
   5512		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
   5513		if (status) {
   5514			stp->st_stid.sc_type = NFS4_CLOSED_STID;
   5515			release_open_stateid(stp);
   5516			mutex_unlock(&stp->st_mutex);
   5517			goto out;
   5518		}
   5519
   5520		stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
   5521							open->op_odstate);
   5522		if (stp->st_clnt_odstate == open->op_odstate)
   5523			open->op_odstate = NULL;
   5524	}
   5525
   5526	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
   5527	mutex_unlock(&stp->st_mutex);
   5528
   5529	if (nfsd4_has_session(&resp->cstate)) {
   5530		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
   5531			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
   5532			open->op_why_no_deleg = WND4_NOT_WANTED;
   5533			goto nodeleg;
   5534		}
   5535	}
   5536
   5537	/*
   5538	* Attempt to hand out a delegation. No error return, because the
   5539	* OPEN succeeds even if we fail.
   5540	*/
   5541	nfs4_open_delegation(current_fh, open, stp);
   5542nodeleg:
   5543	status = nfs_ok;
   5544	trace_nfsd_open(&stp->st_stid.sc_stateid);
   5545out:
   5546	/* 4.1 client trying to upgrade/downgrade delegation? */
   5547	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
   5548	    open->op_deleg_want)
   5549		nfsd4_deleg_xgrade_none_ext(open, dp);
   5550
   5551	if (fp)
   5552		put_nfs4_file(fp);
   5553	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
   5554		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
   5555	/*
   5556	* To finish the open response, we just need to set the rflags.
   5557	*/
   5558	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
   5559	if (nfsd4_has_session(&resp->cstate))
   5560		open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
   5561	else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
   5562		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
   5563
   5564	if (dp)
   5565		nfs4_put_stid(&dp->dl_stid);
   5566	if (stp)
   5567		nfs4_put_stid(&stp->st_stid);
   5568
   5569	return status;
   5570}
   5571
   5572void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
   5573			      struct nfsd4_open *open)
   5574{
   5575	if (open->op_openowner) {
   5576		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
   5577
   5578		nfsd4_cstate_assign_replay(cstate, so);
   5579		nfs4_put_stateowner(so);
   5580	}
   5581	if (open->op_file)
   5582		kmem_cache_free(file_slab, open->op_file);
   5583	if (open->op_stp)
   5584		nfs4_put_stid(&open->op_stp->st_stid);
   5585	if (open->op_odstate)
   5586		kmem_cache_free(odstate_slab, open->op_odstate);
   5587}
   5588
   5589__be32
   5590nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   5591	    union nfsd4_op_u *u)
   5592{
   5593	clientid_t *clid = &u->renew;
   5594	struct nfs4_client *clp;
   5595	__be32 status;
   5596	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   5597
   5598	trace_nfsd_clid_renew(clid);
   5599	status = set_client(clid, cstate, nn);
   5600	if (status)
   5601		return status;
   5602	clp = cstate->clp;
   5603	if (!list_empty(&clp->cl_delegations)
   5604			&& clp->cl_cb_state != NFSD4_CB_UP)
   5605		return nfserr_cb_path_down;
   5606	return nfs_ok;
   5607}
   5608
   5609void
   5610nfsd4_end_grace(struct nfsd_net *nn)
   5611{
   5612	/* do nothing if grace period already ended */
   5613	if (nn->grace_ended)
   5614		return;
   5615
   5616	trace_nfsd_grace_complete(nn);
   5617	nn->grace_ended = true;
   5618	/*
   5619	 * If the server goes down again right now, an NFSv4
   5620	 * client will still be allowed to reclaim after it comes back up,
   5621	 * even if it hasn't yet had a chance to reclaim state this time.
   5622	 *
   5623	 */
   5624	nfsd4_record_grace_done(nn);
   5625	/*
   5626	 * At this point, NFSv4 clients can still reclaim.  But if the
   5627	 * server crashes, any that have not yet reclaimed will be out
   5628	 * of luck on the next boot.
   5629	 *
   5630	 * (NFSv4.1+ clients are considered to have reclaimed once they
   5631	 * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
   5632	 * have reclaimed after their first OPEN.)
   5633	 */
   5634	locks_end_grace(&nn->nfsd4_manager);
   5635	/*
   5636	 * At this point, and once lockd and/or any other containers
   5637	 * exit their grace period, further reclaims will fail and
   5638	 * regular locking can resume.
   5639	 */
   5640}
   5641
   5642/*
   5643 * If we've waited a lease period but there are still clients trying to
   5644 * reclaim, wait a little longer to give them a chance to finish.
   5645 */
   5646static bool clients_still_reclaiming(struct nfsd_net *nn)
   5647{
   5648	time64_t double_grace_period_end = nn->boot_time +
   5649					   2 * nn->nfsd4_lease;
   5650
   5651	if (nn->track_reclaim_completes &&
   5652			atomic_read(&nn->nr_reclaim_complete) ==
   5653			nn->reclaim_str_hashtbl_size)
   5654		return false;
   5655	if (!nn->somebody_reclaimed)
   5656		return false;
   5657	nn->somebody_reclaimed = false;
   5658	/*
   5659	 * If we've given them *two* lease times to reclaim, and they're
   5660	 * still not done, give up:
   5661	 */
   5662	if (ktime_get_boottime_seconds() > double_grace_period_end)
   5663		return false;
   5664	return true;
   5665}
   5666
   5667struct laundry_time {
   5668	time64_t cutoff;
   5669	time64_t new_timeo;
   5670};
   5671
   5672static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
   5673{
   5674	time64_t time_remaining;
   5675
   5676	if (last_refresh < lt->cutoff)
   5677		return true;
   5678	time_remaining = last_refresh - lt->cutoff;
   5679	lt->new_timeo = min(lt->new_timeo, time_remaining);
   5680	return false;
   5681}
   5682
   5683#ifdef CONFIG_NFSD_V4_2_INTER_SSC
   5684void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
   5685{
   5686	spin_lock_init(&nn->nfsd_ssc_lock);
   5687	INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
   5688	init_waitqueue_head(&nn->nfsd_ssc_waitq);
   5689}
   5690EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
   5691
   5692/*
   5693 * This is called when nfsd is being shutdown, after all inter_ssc
   5694 * cleanup were done, to destroy the ssc delayed unmount list.
   5695 */
   5696static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
   5697{
   5698	struct nfsd4_ssc_umount_item *ni = NULL;
   5699	struct nfsd4_ssc_umount_item *tmp;
   5700
   5701	spin_lock(&nn->nfsd_ssc_lock);
   5702	list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
   5703		list_del(&ni->nsui_list);
   5704		spin_unlock(&nn->nfsd_ssc_lock);
   5705		mntput(ni->nsui_vfsmount);
   5706		kfree(ni);
   5707		spin_lock(&nn->nfsd_ssc_lock);
   5708	}
   5709	spin_unlock(&nn->nfsd_ssc_lock);
   5710}
   5711
   5712static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
   5713{
   5714	bool do_wakeup = false;
   5715	struct nfsd4_ssc_umount_item *ni = NULL;
   5716	struct nfsd4_ssc_umount_item *tmp;
   5717
   5718	spin_lock(&nn->nfsd_ssc_lock);
   5719	list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
   5720		if (time_after(jiffies, ni->nsui_expire)) {
   5721			if (refcount_read(&ni->nsui_refcnt) > 1)
   5722				continue;
   5723
   5724			/* mark being unmount */
   5725			ni->nsui_busy = true;
   5726			spin_unlock(&nn->nfsd_ssc_lock);
   5727			mntput(ni->nsui_vfsmount);
   5728			spin_lock(&nn->nfsd_ssc_lock);
   5729
   5730			/* waiters need to start from begin of list */
   5731			list_del(&ni->nsui_list);
   5732			kfree(ni);
   5733
   5734			/* wakeup ssc_connect waiters */
   5735			do_wakeup = true;
   5736			continue;
   5737		}
   5738		break;
   5739	}
   5740	if (do_wakeup)
   5741		wake_up_all(&nn->nfsd_ssc_waitq);
   5742	spin_unlock(&nn->nfsd_ssc_lock);
   5743}
   5744#endif
   5745
   5746/* Check if any lock belonging to this lockowner has any blockers */
   5747static bool
   5748nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
   5749{
   5750	struct file_lock_context *ctx;
   5751	struct nfs4_ol_stateid *stp;
   5752	struct nfs4_file *nf;
   5753
   5754	list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
   5755		nf = stp->st_stid.sc_file;
   5756		ctx = nf->fi_inode->i_flctx;
   5757		if (!ctx)
   5758			continue;
   5759		if (locks_owner_has_blockers(ctx, lo))
   5760			return true;
   5761	}
   5762	return false;
   5763}
   5764
   5765static bool
   5766nfs4_anylock_blockers(struct nfs4_client *clp)
   5767{
   5768	int i;
   5769	struct nfs4_stateowner *so;
   5770	struct nfs4_lockowner *lo;
   5771
   5772	if (atomic_read(&clp->cl_delegs_in_recall))
   5773		return true;
   5774	spin_lock(&clp->cl_lock);
   5775	for (i = 0; i < OWNER_HASH_SIZE; i++) {
   5776		list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
   5777				so_strhash) {
   5778			if (so->so_is_open_owner)
   5779				continue;
   5780			lo = lockowner(so);
   5781			if (nfs4_lockowner_has_blockers(lo)) {
   5782				spin_unlock(&clp->cl_lock);
   5783				return true;
   5784			}
   5785		}
   5786	}
   5787	spin_unlock(&clp->cl_lock);
   5788	return false;
   5789}
   5790
   5791static void
   5792nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
   5793				struct laundry_time *lt)
   5794{
   5795	struct list_head *pos, *next;
   5796	struct nfs4_client *clp;
   5797
   5798	INIT_LIST_HEAD(reaplist);
   5799	spin_lock(&nn->client_lock);
   5800	list_for_each_safe(pos, next, &nn->client_lru) {
   5801		clp = list_entry(pos, struct nfs4_client, cl_lru);
   5802		if (clp->cl_state == NFSD4_EXPIRABLE)
   5803			goto exp_client;
   5804		if (!state_expired(lt, clp->cl_time))
   5805			break;
   5806		if (!atomic_read(&clp->cl_rpc_users))
   5807			clp->cl_state = NFSD4_COURTESY;
   5808		if (!client_has_state(clp) ||
   5809				ktime_get_boottime_seconds() >=
   5810				(clp->cl_time + NFSD_COURTESY_CLIENT_TIMEOUT))
   5811			goto exp_client;
   5812		if (nfs4_anylock_blockers(clp)) {
   5813exp_client:
   5814			if (!mark_client_expired_locked(clp))
   5815				list_add(&clp->cl_lru, reaplist);
   5816		}
   5817	}
   5818	spin_unlock(&nn->client_lock);
   5819}
   5820
   5821static time64_t
   5822nfs4_laundromat(struct nfsd_net *nn)
   5823{
   5824	struct nfs4_client *clp;
   5825	struct nfs4_openowner *oo;
   5826	struct nfs4_delegation *dp;
   5827	struct nfs4_ol_stateid *stp;
   5828	struct nfsd4_blocked_lock *nbl;
   5829	struct list_head *pos, *next, reaplist;
   5830	struct laundry_time lt = {
   5831		.cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
   5832		.new_timeo = nn->nfsd4_lease
   5833	};
   5834	struct nfs4_cpntf_state *cps;
   5835	copy_stateid_t *cps_t;
   5836	int i;
   5837
   5838	if (clients_still_reclaiming(nn)) {
   5839		lt.new_timeo = 0;
   5840		goto out;
   5841	}
   5842	nfsd4_end_grace(nn);
   5843
   5844	spin_lock(&nn->s2s_cp_lock);
   5845	idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
   5846		cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
   5847		if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
   5848				state_expired(&lt, cps->cpntf_time))
   5849			_free_cpntf_state_locked(nn, cps);
   5850	}
   5851	spin_unlock(&nn->s2s_cp_lock);
   5852	nfs4_get_client_reaplist(nn, &reaplist, &lt);
   5853	list_for_each_safe(pos, next, &reaplist) {
   5854		clp = list_entry(pos, struct nfs4_client, cl_lru);
   5855		trace_nfsd_clid_purged(&clp->cl_clientid);
   5856		list_del_init(&clp->cl_lru);
   5857		expire_client(clp);
   5858	}
   5859	spin_lock(&state_lock);
   5860	list_for_each_safe(pos, next, &nn->del_recall_lru) {
   5861		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
   5862		if (!state_expired(&lt, dp->dl_time))
   5863			break;
   5864		WARN_ON(!unhash_delegation_locked(dp));
   5865		list_add(&dp->dl_recall_lru, &reaplist);
   5866	}
   5867	spin_unlock(&state_lock);
   5868	while (!list_empty(&reaplist)) {
   5869		dp = list_first_entry(&reaplist, struct nfs4_delegation,
   5870					dl_recall_lru);
   5871		list_del_init(&dp->dl_recall_lru);
   5872		revoke_delegation(dp);
   5873	}
   5874
   5875	spin_lock(&nn->client_lock);
   5876	while (!list_empty(&nn->close_lru)) {
   5877		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
   5878					oo_close_lru);
   5879		if (!state_expired(&lt, oo->oo_time))
   5880			break;
   5881		list_del_init(&oo->oo_close_lru);
   5882		stp = oo->oo_last_closed_stid;
   5883		oo->oo_last_closed_stid = NULL;
   5884		spin_unlock(&nn->client_lock);
   5885		nfs4_put_stid(&stp->st_stid);
   5886		spin_lock(&nn->client_lock);
   5887	}
   5888	spin_unlock(&nn->client_lock);
   5889
   5890	/*
   5891	 * It's possible for a client to try and acquire an already held lock
   5892	 * that is being held for a long time, and then lose interest in it.
   5893	 * So, we clean out any un-revisited request after a lease period
   5894	 * under the assumption that the client is no longer interested.
   5895	 *
   5896	 * RFC5661, sec. 9.6 states that the client must not rely on getting
   5897	 * notifications and must continue to poll for locks, even when the
   5898	 * server supports them. Thus this shouldn't lead to clients blocking
   5899	 * indefinitely once the lock does become free.
   5900	 */
   5901	BUG_ON(!list_empty(&reaplist));
   5902	spin_lock(&nn->blocked_locks_lock);
   5903	while (!list_empty(&nn->blocked_locks_lru)) {
   5904		nbl = list_first_entry(&nn->blocked_locks_lru,
   5905					struct nfsd4_blocked_lock, nbl_lru);
   5906		if (!state_expired(&lt, nbl->nbl_time))
   5907			break;
   5908		list_move(&nbl->nbl_lru, &reaplist);
   5909		list_del_init(&nbl->nbl_list);
   5910	}
   5911	spin_unlock(&nn->blocked_locks_lock);
   5912
   5913	while (!list_empty(&reaplist)) {
   5914		nbl = list_first_entry(&reaplist,
   5915					struct nfsd4_blocked_lock, nbl_lru);
   5916		list_del_init(&nbl->nbl_lru);
   5917		free_blocked_lock(nbl);
   5918	}
   5919#ifdef CONFIG_NFSD_V4_2_INTER_SSC
   5920	/* service the server-to-server copy delayed unmount list */
   5921	nfsd4_ssc_expire_umount(nn);
   5922#endif
   5923out:
   5924	return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
   5925}
   5926
   5927static void laundromat_main(struct work_struct *);
   5928
   5929static void
   5930laundromat_main(struct work_struct *laundry)
   5931{
   5932	time64_t t;
   5933	struct delayed_work *dwork = to_delayed_work(laundry);
   5934	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
   5935					   laundromat_work);
   5936
   5937	t = nfs4_laundromat(nn);
   5938	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
   5939}
   5940
   5941static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
   5942{
   5943	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
   5944		return nfserr_bad_stateid;
   5945	return nfs_ok;
   5946}
   5947
   5948static
   5949__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
   5950{
   5951        __be32 status = nfserr_openmode;
   5952
   5953	/* For lock stateid's, we test the parent open, not the lock: */
   5954	if (stp->st_openstp)
   5955		stp = stp->st_openstp;
   5956	if ((flags & WR_STATE) && !access_permit_write(stp))
   5957                goto out;
   5958	if ((flags & RD_STATE) && !access_permit_read(stp))
   5959                goto out;
   5960	status = nfs_ok;
   5961out:
   5962	return status;
   5963}
   5964
   5965static inline __be32
   5966check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
   5967{
   5968	if (ONE_STATEID(stateid) && (flags & RD_STATE))
   5969		return nfs_ok;
   5970	else if (opens_in_grace(net)) {
   5971		/* Answer in remaining cases depends on existence of
   5972		 * conflicting state; so we must wait out the grace period. */
   5973		return nfserr_grace;
   5974	} else if (flags & WR_STATE)
   5975		return nfs4_share_conflict(current_fh,
   5976				NFS4_SHARE_DENY_WRITE);
   5977	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
   5978		return nfs4_share_conflict(current_fh,
   5979				NFS4_SHARE_DENY_READ);
   5980}
   5981
   5982static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
   5983{
   5984	/*
   5985	 * When sessions are used the stateid generation number is ignored
   5986	 * when it is zero.
   5987	 */
   5988	if (has_session && in->si_generation == 0)
   5989		return nfs_ok;
   5990
   5991	if (in->si_generation == ref->si_generation)
   5992		return nfs_ok;
   5993
   5994	/* If the client sends us a stateid from the future, it's buggy: */
   5995	if (nfsd4_stateid_generation_after(in, ref))
   5996		return nfserr_bad_stateid;
   5997	/*
   5998	 * However, we could see a stateid from the past, even from a
   5999	 * non-buggy client.  For example, if the client sends a lock
   6000	 * while some IO is outstanding, the lock may bump si_generation
   6001	 * while the IO is still in flight.  The client could avoid that
   6002	 * situation by waiting for responses on all the IO requests,
   6003	 * but better performance may result in retrying IO that
   6004	 * receives an old_stateid error if requests are rarely
   6005	 * reordered in flight:
   6006	 */
   6007	return nfserr_old_stateid;
   6008}
   6009
   6010static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
   6011{
   6012	__be32 ret;
   6013
   6014	spin_lock(&s->sc_lock);
   6015	ret = nfsd4_verify_open_stid(s);
   6016	if (ret == nfs_ok)
   6017		ret = check_stateid_generation(in, &s->sc_stateid, has_session);
   6018	spin_unlock(&s->sc_lock);
   6019	return ret;
   6020}
   6021
   6022static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
   6023{
   6024	if (ols->st_stateowner->so_is_open_owner &&
   6025	    !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
   6026		return nfserr_bad_stateid;
   6027	return nfs_ok;
   6028}
   6029
   6030static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
   6031{
   6032	struct nfs4_stid *s;
   6033	__be32 status = nfserr_bad_stateid;
   6034
   6035	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
   6036		CLOSE_STATEID(stateid))
   6037		return status;
   6038	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
   6039		return status;
   6040	spin_lock(&cl->cl_lock);
   6041	s = find_stateid_locked(cl, stateid);
   6042	if (!s)
   6043		goto out_unlock;
   6044	status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
   6045	if (status)
   6046		goto out_unlock;
   6047	switch (s->sc_type) {
   6048	case NFS4_DELEG_STID:
   6049		status = nfs_ok;
   6050		break;
   6051	case NFS4_REVOKED_DELEG_STID:
   6052		status = nfserr_deleg_revoked;
   6053		break;
   6054	case NFS4_OPEN_STID:
   6055	case NFS4_LOCK_STID:
   6056		status = nfsd4_check_openowner_confirmed(openlockstateid(s));
   6057		break;
   6058	default:
   6059		printk("unknown stateid type %x\n", s->sc_type);
   6060		fallthrough;
   6061	case NFS4_CLOSED_STID:
   6062	case NFS4_CLOSED_DELEG_STID:
   6063		status = nfserr_bad_stateid;
   6064	}
   6065out_unlock:
   6066	spin_unlock(&cl->cl_lock);
   6067	return status;
   6068}
   6069
   6070__be32
   6071nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
   6072		     stateid_t *stateid, unsigned char typemask,
   6073		     struct nfs4_stid **s, struct nfsd_net *nn)
   6074{
   6075	__be32 status;
   6076	bool return_revoked = false;
   6077
   6078	/*
   6079	 *  only return revoked delegations if explicitly asked.
   6080	 *  otherwise we report revoked or bad_stateid status.
   6081	 */
   6082	if (typemask & NFS4_REVOKED_DELEG_STID)
   6083		return_revoked = true;
   6084	else if (typemask & NFS4_DELEG_STID)
   6085		typemask |= NFS4_REVOKED_DELEG_STID;
   6086
   6087	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
   6088		CLOSE_STATEID(stateid))
   6089		return nfserr_bad_stateid;
   6090	status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
   6091	if (status == nfserr_stale_clientid) {
   6092		if (cstate->session)
   6093			return nfserr_bad_stateid;
   6094		return nfserr_stale_stateid;
   6095	}
   6096	if (status)
   6097		return status;
   6098	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
   6099	if (!*s)
   6100		return nfserr_bad_stateid;
   6101	if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
   6102		nfs4_put_stid(*s);
   6103		if (cstate->minorversion)
   6104			return nfserr_deleg_revoked;
   6105		return nfserr_bad_stateid;
   6106	}
   6107	return nfs_ok;
   6108}
   6109
   6110static struct nfsd_file *
   6111nfs4_find_file(struct nfs4_stid *s, int flags)
   6112{
   6113	if (!s)
   6114		return NULL;
   6115
   6116	switch (s->sc_type) {
   6117	case NFS4_DELEG_STID:
   6118		if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
   6119			return NULL;
   6120		return nfsd_file_get(s->sc_file->fi_deleg_file);
   6121	case NFS4_OPEN_STID:
   6122	case NFS4_LOCK_STID:
   6123		if (flags & RD_STATE)
   6124			return find_readable_file(s->sc_file);
   6125		else
   6126			return find_writeable_file(s->sc_file);
   6127	}
   6128
   6129	return NULL;
   6130}
   6131
   6132static __be32
   6133nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
   6134{
   6135	__be32 status;
   6136
   6137	status = nfsd4_check_openowner_confirmed(ols);
   6138	if (status)
   6139		return status;
   6140	return nfs4_check_openmode(ols, flags);
   6141}
   6142
   6143static __be32
   6144nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
   6145		struct nfsd_file **nfp, int flags)
   6146{
   6147	int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
   6148	struct nfsd_file *nf;
   6149	__be32 status;
   6150
   6151	nf = nfs4_find_file(s, flags);
   6152	if (nf) {
   6153		status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
   6154				acc | NFSD_MAY_OWNER_OVERRIDE);
   6155		if (status) {
   6156			nfsd_file_put(nf);
   6157			goto out;
   6158		}
   6159	} else {
   6160		status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
   6161		if (status)
   6162			return status;
   6163	}
   6164	*nfp = nf;
   6165out:
   6166	return status;
   6167}
   6168static void
   6169_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
   6170{
   6171	WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
   6172	if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
   6173		return;
   6174	list_del(&cps->cp_list);
   6175	idr_remove(&nn->s2s_cp_stateids,
   6176		   cps->cp_stateid.stid.si_opaque.so_id);
   6177	kfree(cps);
   6178}
   6179/*
   6180 * A READ from an inter server to server COPY will have a
   6181 * copy stateid. Look up the copy notify stateid from the
   6182 * idr structure and take a reference on it.
   6183 */
   6184__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
   6185			  struct nfs4_client *clp,
   6186			  struct nfs4_cpntf_state **cps)
   6187{
   6188	copy_stateid_t *cps_t;
   6189	struct nfs4_cpntf_state *state = NULL;
   6190
   6191	if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
   6192		return nfserr_bad_stateid;
   6193	spin_lock(&nn->s2s_cp_lock);
   6194	cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
   6195	if (cps_t) {
   6196		state = container_of(cps_t, struct nfs4_cpntf_state,
   6197				     cp_stateid);
   6198		if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
   6199			state = NULL;
   6200			goto unlock;
   6201		}
   6202		if (!clp)
   6203			refcount_inc(&state->cp_stateid.sc_count);
   6204		else
   6205			_free_cpntf_state_locked(nn, state);
   6206	}
   6207unlock:
   6208	spin_unlock(&nn->s2s_cp_lock);
   6209	if (!state)
   6210		return nfserr_bad_stateid;
   6211	if (!clp && state)
   6212		*cps = state;
   6213	return 0;
   6214}
   6215
   6216static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
   6217			       struct nfs4_stid **stid)
   6218{
   6219	__be32 status;
   6220	struct nfs4_cpntf_state *cps = NULL;
   6221	struct nfs4_client *found;
   6222
   6223	status = manage_cpntf_state(nn, st, NULL, &cps);
   6224	if (status)
   6225		return status;
   6226
   6227	cps->cpntf_time = ktime_get_boottime_seconds();
   6228
   6229	status = nfserr_expired;
   6230	found = lookup_clientid(&cps->cp_p_clid, true, nn);
   6231	if (!found)
   6232		goto out;
   6233
   6234	*stid = find_stateid_by_type(found, &cps->cp_p_stateid,
   6235			NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
   6236	if (*stid)
   6237		status = nfs_ok;
   6238	else
   6239		status = nfserr_bad_stateid;
   6240
   6241	put_client_renew(found);
   6242out:
   6243	nfs4_put_cpntf_state(nn, cps);
   6244	return status;
   6245}
   6246
   6247void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
   6248{
   6249	spin_lock(&nn->s2s_cp_lock);
   6250	_free_cpntf_state_locked(nn, cps);
   6251	spin_unlock(&nn->s2s_cp_lock);
   6252}
   6253
   6254/*
   6255 * Checks for stateid operations
   6256 */
   6257__be32
   6258nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
   6259		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
   6260		stateid_t *stateid, int flags, struct nfsd_file **nfp,
   6261		struct nfs4_stid **cstid)
   6262{
   6263	struct net *net = SVC_NET(rqstp);
   6264	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   6265	struct nfs4_stid *s = NULL;
   6266	__be32 status;
   6267
   6268	if (nfp)
   6269		*nfp = NULL;
   6270
   6271	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
   6272		if (cstid)
   6273			status = nfserr_bad_stateid;
   6274		else
   6275			status = check_special_stateids(net, fhp, stateid,
   6276									flags);
   6277		goto done;
   6278	}
   6279
   6280	status = nfsd4_lookup_stateid(cstate, stateid,
   6281				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
   6282				&s, nn);
   6283	if (status == nfserr_bad_stateid)
   6284		status = find_cpntf_state(nn, stateid, &s);
   6285	if (status)
   6286		return status;
   6287	status = nfsd4_stid_check_stateid_generation(stateid, s,
   6288			nfsd4_has_session(cstate));
   6289	if (status)
   6290		goto out;
   6291
   6292	switch (s->sc_type) {
   6293	case NFS4_DELEG_STID:
   6294		status = nfs4_check_delegmode(delegstateid(s), flags);
   6295		break;
   6296	case NFS4_OPEN_STID:
   6297	case NFS4_LOCK_STID:
   6298		status = nfs4_check_olstateid(openlockstateid(s), flags);
   6299		break;
   6300	default:
   6301		status = nfserr_bad_stateid;
   6302		break;
   6303	}
   6304	if (status)
   6305		goto out;
   6306	status = nfs4_check_fh(fhp, s);
   6307
   6308done:
   6309	if (status == nfs_ok && nfp)
   6310		status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
   6311out:
   6312	if (s) {
   6313		if (!status && cstid)
   6314			*cstid = s;
   6315		else
   6316			nfs4_put_stid(s);
   6317	}
   6318	return status;
   6319}
   6320
   6321/*
   6322 * Test if the stateid is valid
   6323 */
   6324__be32
   6325nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   6326		   union nfsd4_op_u *u)
   6327{
   6328	struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
   6329	struct nfsd4_test_stateid_id *stateid;
   6330	struct nfs4_client *cl = cstate->clp;
   6331
   6332	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
   6333		stateid->ts_id_status =
   6334			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
   6335
   6336	return nfs_ok;
   6337}
   6338
   6339static __be32
   6340nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
   6341{
   6342	struct nfs4_ol_stateid *stp = openlockstateid(s);
   6343	__be32 ret;
   6344
   6345	ret = nfsd4_lock_ol_stateid(stp);
   6346	if (ret)
   6347		goto out_put_stid;
   6348
   6349	ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
   6350	if (ret)
   6351		goto out;
   6352
   6353	ret = nfserr_locks_held;
   6354	if (check_for_locks(stp->st_stid.sc_file,
   6355			    lockowner(stp->st_stateowner)))
   6356		goto out;
   6357
   6358	release_lock_stateid(stp);
   6359	ret = nfs_ok;
   6360
   6361out:
   6362	mutex_unlock(&stp->st_mutex);
   6363out_put_stid:
   6364	nfs4_put_stid(s);
   6365	return ret;
   6366}
   6367
   6368__be32
   6369nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   6370		   union nfsd4_op_u *u)
   6371{
   6372	struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
   6373	stateid_t *stateid = &free_stateid->fr_stateid;
   6374	struct nfs4_stid *s;
   6375	struct nfs4_delegation *dp;
   6376	struct nfs4_client *cl = cstate->clp;
   6377	__be32 ret = nfserr_bad_stateid;
   6378
   6379	spin_lock(&cl->cl_lock);
   6380	s = find_stateid_locked(cl, stateid);
   6381	if (!s)
   6382		goto out_unlock;
   6383	spin_lock(&s->sc_lock);
   6384	switch (s->sc_type) {
   6385	case NFS4_DELEG_STID:
   6386		ret = nfserr_locks_held;
   6387		break;
   6388	case NFS4_OPEN_STID:
   6389		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
   6390		if (ret)
   6391			break;
   6392		ret = nfserr_locks_held;
   6393		break;
   6394	case NFS4_LOCK_STID:
   6395		spin_unlock(&s->sc_lock);
   6396		refcount_inc(&s->sc_count);
   6397		spin_unlock(&cl->cl_lock);
   6398		ret = nfsd4_free_lock_stateid(stateid, s);
   6399		goto out;
   6400	case NFS4_REVOKED_DELEG_STID:
   6401		spin_unlock(&s->sc_lock);
   6402		dp = delegstateid(s);
   6403		list_del_init(&dp->dl_recall_lru);
   6404		spin_unlock(&cl->cl_lock);
   6405		nfs4_put_stid(s);
   6406		ret = nfs_ok;
   6407		goto out;
   6408	/* Default falls through and returns nfserr_bad_stateid */
   6409	}
   6410	spin_unlock(&s->sc_lock);
   6411out_unlock:
   6412	spin_unlock(&cl->cl_lock);
   6413out:
   6414	return ret;
   6415}
   6416
   6417static inline int
   6418setlkflg (int type)
   6419{
   6420	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
   6421		RD_STATE : WR_STATE;
   6422}
   6423
   6424static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
   6425{
   6426	struct svc_fh *current_fh = &cstate->current_fh;
   6427	struct nfs4_stateowner *sop = stp->st_stateowner;
   6428	__be32 status;
   6429
   6430	status = nfsd4_check_seqid(cstate, sop, seqid);
   6431	if (status)
   6432		return status;
   6433	status = nfsd4_lock_ol_stateid(stp);
   6434	if (status != nfs_ok)
   6435		return status;
   6436	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
   6437	if (status == nfs_ok)
   6438		status = nfs4_check_fh(current_fh, &stp->st_stid);
   6439	if (status != nfs_ok)
   6440		mutex_unlock(&stp->st_mutex);
   6441	return status;
   6442}
   6443
   6444/* 
   6445 * Checks for sequence id mutating operations. 
   6446 */
   6447static __be32
   6448nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
   6449			 stateid_t *stateid, char typemask,
   6450			 struct nfs4_ol_stateid **stpp,
   6451			 struct nfsd_net *nn)
   6452{
   6453	__be32 status;
   6454	struct nfs4_stid *s;
   6455	struct nfs4_ol_stateid *stp = NULL;
   6456
   6457	trace_nfsd_preprocess(seqid, stateid);
   6458
   6459	*stpp = NULL;
   6460	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
   6461	if (status)
   6462		return status;
   6463	stp = openlockstateid(s);
   6464	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
   6465
   6466	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
   6467	if (!status)
   6468		*stpp = stp;
   6469	else
   6470		nfs4_put_stid(&stp->st_stid);
   6471	return status;
   6472}
   6473
   6474static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
   6475						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
   6476{
   6477	__be32 status;
   6478	struct nfs4_openowner *oo;
   6479	struct nfs4_ol_stateid *stp;
   6480
   6481	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
   6482						NFS4_OPEN_STID, &stp, nn);
   6483	if (status)
   6484		return status;
   6485	oo = openowner(stp->st_stateowner);
   6486	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
   6487		mutex_unlock(&stp->st_mutex);
   6488		nfs4_put_stid(&stp->st_stid);
   6489		return nfserr_bad_stateid;
   6490	}
   6491	*stpp = stp;
   6492	return nfs_ok;
   6493}
   6494
   6495__be32
   6496nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   6497		   union nfsd4_op_u *u)
   6498{
   6499	struct nfsd4_open_confirm *oc = &u->open_confirm;
   6500	__be32 status;
   6501	struct nfs4_openowner *oo;
   6502	struct nfs4_ol_stateid *stp;
   6503	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   6504
   6505	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
   6506			cstate->current_fh.fh_dentry);
   6507
   6508	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
   6509	if (status)
   6510		return status;
   6511
   6512	status = nfs4_preprocess_seqid_op(cstate,
   6513					oc->oc_seqid, &oc->oc_req_stateid,
   6514					NFS4_OPEN_STID, &stp, nn);
   6515	if (status)
   6516		goto out;
   6517	oo = openowner(stp->st_stateowner);
   6518	status = nfserr_bad_stateid;
   6519	if (oo->oo_flags & NFS4_OO_CONFIRMED) {
   6520		mutex_unlock(&stp->st_mutex);
   6521		goto put_stateid;
   6522	}
   6523	oo->oo_flags |= NFS4_OO_CONFIRMED;
   6524	nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
   6525	mutex_unlock(&stp->st_mutex);
   6526	trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
   6527	nfsd4_client_record_create(oo->oo_owner.so_client);
   6528	status = nfs_ok;
   6529put_stateid:
   6530	nfs4_put_stid(&stp->st_stid);
   6531out:
   6532	nfsd4_bump_seqid(cstate, status);
   6533	return status;
   6534}
   6535
   6536static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
   6537{
   6538	if (!test_access(access, stp))
   6539		return;
   6540	nfs4_file_put_access(stp->st_stid.sc_file, access);
   6541	clear_access(access, stp);
   6542}
   6543
   6544static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
   6545{
   6546	switch (to_access) {
   6547	case NFS4_SHARE_ACCESS_READ:
   6548		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
   6549		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
   6550		break;
   6551	case NFS4_SHARE_ACCESS_WRITE:
   6552		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
   6553		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
   6554		break;
   6555	case NFS4_SHARE_ACCESS_BOTH:
   6556		break;
   6557	default:
   6558		WARN_ON_ONCE(1);
   6559	}
   6560}
   6561
   6562__be32
   6563nfsd4_open_downgrade(struct svc_rqst *rqstp,
   6564		     struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
   6565{
   6566	struct nfsd4_open_downgrade *od = &u->open_downgrade;
   6567	__be32 status;
   6568	struct nfs4_ol_stateid *stp;
   6569	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   6570
   6571	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 
   6572			cstate->current_fh.fh_dentry);
   6573
   6574	/* We don't yet support WANT bits: */
   6575	if (od->od_deleg_want)
   6576		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
   6577			od->od_deleg_want);
   6578
   6579	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
   6580					&od->od_stateid, &stp, nn);
   6581	if (status)
   6582		goto out; 
   6583	status = nfserr_inval;
   6584	if (!test_access(od->od_share_access, stp)) {
   6585		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
   6586			stp->st_access_bmap, od->od_share_access);
   6587		goto put_stateid;
   6588	}
   6589	if (!test_deny(od->od_share_deny, stp)) {
   6590		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
   6591			stp->st_deny_bmap, od->od_share_deny);
   6592		goto put_stateid;
   6593	}
   6594	nfs4_stateid_downgrade(stp, od->od_share_access);
   6595	reset_union_bmap_deny(od->od_share_deny, stp);
   6596	nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
   6597	status = nfs_ok;
   6598put_stateid:
   6599	mutex_unlock(&stp->st_mutex);
   6600	nfs4_put_stid(&stp->st_stid);
   6601out:
   6602	nfsd4_bump_seqid(cstate, status);
   6603	return status;
   6604}
   6605
   6606static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
   6607{
   6608	struct nfs4_client *clp = s->st_stid.sc_client;
   6609	bool unhashed;
   6610	LIST_HEAD(reaplist);
   6611
   6612	spin_lock(&clp->cl_lock);
   6613	unhashed = unhash_open_stateid(s, &reaplist);
   6614
   6615	if (clp->cl_minorversion) {
   6616		if (unhashed)
   6617			put_ol_stateid_locked(s, &reaplist);
   6618		spin_unlock(&clp->cl_lock);
   6619		free_ol_stateid_reaplist(&reaplist);
   6620	} else {
   6621		spin_unlock(&clp->cl_lock);
   6622		free_ol_stateid_reaplist(&reaplist);
   6623		if (unhashed)
   6624			move_to_close_lru(s, clp->net);
   6625	}
   6626}
   6627
   6628/*
   6629 * nfs4_unlock_state() called after encode
   6630 */
   6631__be32
   6632nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   6633		union nfsd4_op_u *u)
   6634{
   6635	struct nfsd4_close *close = &u->close;
   6636	__be32 status;
   6637	struct nfs4_ol_stateid *stp;
   6638	struct net *net = SVC_NET(rqstp);
   6639	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   6640
   6641	dprintk("NFSD: nfsd4_close on file %pd\n", 
   6642			cstate->current_fh.fh_dentry);
   6643
   6644	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
   6645					&close->cl_stateid,
   6646					NFS4_OPEN_STID|NFS4_CLOSED_STID,
   6647					&stp, nn);
   6648	nfsd4_bump_seqid(cstate, status);
   6649	if (status)
   6650		goto out; 
   6651
   6652	stp->st_stid.sc_type = NFS4_CLOSED_STID;
   6653
   6654	/*
   6655	 * Technically we don't _really_ have to increment or copy it, since
   6656	 * it should just be gone after this operation and we clobber the
   6657	 * copied value below, but we continue to do so here just to ensure
   6658	 * that racing ops see that there was a state change.
   6659	 */
   6660	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
   6661
   6662	nfsd4_close_open_stateid(stp);
   6663	mutex_unlock(&stp->st_mutex);
   6664
   6665	/* v4.1+ suggests that we send a special stateid in here, since the
   6666	 * clients should just ignore this anyway. Since this is not useful
   6667	 * for v4.0 clients either, we set it to the special close_stateid
   6668	 * universally.
   6669	 *
   6670	 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
   6671	 */
   6672	memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
   6673
   6674	/* put reference from nfs4_preprocess_seqid_op */
   6675	nfs4_put_stid(&stp->st_stid);
   6676out:
   6677	return status;
   6678}
   6679
   6680__be32
   6681nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   6682		  union nfsd4_op_u *u)
   6683{
   6684	struct nfsd4_delegreturn *dr = &u->delegreturn;
   6685	struct nfs4_delegation *dp;
   6686	stateid_t *stateid = &dr->dr_stateid;
   6687	struct nfs4_stid *s;
   6688	__be32 status;
   6689	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   6690
   6691	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
   6692		return status;
   6693
   6694	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
   6695	if (status)
   6696		goto out;
   6697	dp = delegstateid(s);
   6698	status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
   6699	if (status)
   6700		goto put_stateid;
   6701
   6702	destroy_delegation(dp);
   6703put_stateid:
   6704	nfs4_put_stid(&dp->dl_stid);
   6705out:
   6706	return status;
   6707}
   6708
   6709/* last octet in a range */
   6710static inline u64
   6711last_byte_offset(u64 start, u64 len)
   6712{
   6713	u64 end;
   6714
   6715	WARN_ON_ONCE(!len);
   6716	end = start + len;
   6717	return end > start ? end - 1: NFS4_MAX_UINT64;
   6718}
   6719
   6720/*
   6721 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
   6722 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
   6723 * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
   6724 * locking, this prevents us from being completely protocol-compliant.  The
   6725 * real solution to this problem is to start using unsigned file offsets in
   6726 * the VFS, but this is a very deep change!
   6727 */
   6728static inline void
   6729nfs4_transform_lock_offset(struct file_lock *lock)
   6730{
   6731	if (lock->fl_start < 0)
   6732		lock->fl_start = OFFSET_MAX;
   6733	if (lock->fl_end < 0)
   6734		lock->fl_end = OFFSET_MAX;
   6735}
   6736
   6737static fl_owner_t
   6738nfsd4_lm_get_owner(fl_owner_t owner)
   6739{
   6740	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
   6741
   6742	nfs4_get_stateowner(&lo->lo_owner);
   6743	return owner;
   6744}
   6745
   6746static void
   6747nfsd4_lm_put_owner(fl_owner_t owner)
   6748{
   6749	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
   6750
   6751	if (lo)
   6752		nfs4_put_stateowner(&lo->lo_owner);
   6753}
   6754
   6755/* return pointer to struct nfs4_client if client is expirable */
   6756static bool
   6757nfsd4_lm_lock_expirable(struct file_lock *cfl)
   6758{
   6759	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner;
   6760	struct nfs4_client *clp = lo->lo_owner.so_client;
   6761	struct nfsd_net *nn;
   6762
   6763	if (try_to_expire_client(clp)) {
   6764		nn = net_generic(clp->net, nfsd_net_id);
   6765		mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
   6766		return true;
   6767	}
   6768	return false;
   6769}
   6770
   6771/* schedule laundromat to run immediately and wait for it to complete */
   6772static void
   6773nfsd4_lm_expire_lock(void)
   6774{
   6775	flush_workqueue(laundry_wq);
   6776}
   6777
   6778static void
   6779nfsd4_lm_notify(struct file_lock *fl)
   6780{
   6781	struct nfs4_lockowner		*lo = (struct nfs4_lockowner *)fl->fl_owner;
   6782	struct net			*net = lo->lo_owner.so_client->net;
   6783	struct nfsd_net			*nn = net_generic(net, nfsd_net_id);
   6784	struct nfsd4_blocked_lock	*nbl = container_of(fl,
   6785						struct nfsd4_blocked_lock, nbl_lock);
   6786	bool queue = false;
   6787
   6788	/* An empty list means that something else is going to be using it */
   6789	spin_lock(&nn->blocked_locks_lock);
   6790	if (!list_empty(&nbl->nbl_list)) {
   6791		list_del_init(&nbl->nbl_list);
   6792		list_del_init(&nbl->nbl_lru);
   6793		queue = true;
   6794	}
   6795	spin_unlock(&nn->blocked_locks_lock);
   6796
   6797	if (queue) {
   6798		trace_nfsd_cb_notify_lock(lo, nbl);
   6799		nfsd4_run_cb(&nbl->nbl_cb);
   6800	}
   6801}
   6802
   6803static const struct lock_manager_operations nfsd_posix_mng_ops  = {
   6804	.lm_mod_owner = THIS_MODULE,
   6805	.lm_notify = nfsd4_lm_notify,
   6806	.lm_get_owner = nfsd4_lm_get_owner,
   6807	.lm_put_owner = nfsd4_lm_put_owner,
   6808	.lm_lock_expirable = nfsd4_lm_lock_expirable,
   6809	.lm_expire_lock = nfsd4_lm_expire_lock,
   6810};
   6811
   6812static inline void
   6813nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
   6814{
   6815	struct nfs4_lockowner *lo;
   6816
   6817	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
   6818		lo = (struct nfs4_lockowner *) fl->fl_owner;
   6819		xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
   6820						GFP_KERNEL);
   6821		if (!deny->ld_owner.data)
   6822			/* We just don't care that much */
   6823			goto nevermind;
   6824		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
   6825	} else {
   6826nevermind:
   6827		deny->ld_owner.len = 0;
   6828		deny->ld_owner.data = NULL;
   6829		deny->ld_clientid.cl_boot = 0;
   6830		deny->ld_clientid.cl_id = 0;
   6831	}
   6832	deny->ld_start = fl->fl_start;
   6833	deny->ld_length = NFS4_MAX_UINT64;
   6834	if (fl->fl_end != NFS4_MAX_UINT64)
   6835		deny->ld_length = fl->fl_end - fl->fl_start + 1;        
   6836	deny->ld_type = NFS4_READ_LT;
   6837	if (fl->fl_type != F_RDLCK)
   6838		deny->ld_type = NFS4_WRITE_LT;
   6839}
   6840
   6841static struct nfs4_lockowner *
   6842find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
   6843{
   6844	unsigned int strhashval = ownerstr_hashval(owner);
   6845	struct nfs4_stateowner *so;
   6846
   6847	lockdep_assert_held(&clp->cl_lock);
   6848
   6849	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
   6850			    so_strhash) {
   6851		if (so->so_is_open_owner)
   6852			continue;
   6853		if (same_owner_str(so, owner))
   6854			return lockowner(nfs4_get_stateowner(so));
   6855	}
   6856	return NULL;
   6857}
   6858
   6859static struct nfs4_lockowner *
   6860find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
   6861{
   6862	struct nfs4_lockowner *lo;
   6863
   6864	spin_lock(&clp->cl_lock);
   6865	lo = find_lockowner_str_locked(clp, owner);
   6866	spin_unlock(&clp->cl_lock);
   6867	return lo;
   6868}
   6869
   6870static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
   6871{
   6872	unhash_lockowner_locked(lockowner(sop));
   6873}
   6874
   6875static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
   6876{
   6877	struct nfs4_lockowner *lo = lockowner(sop);
   6878
   6879	kmem_cache_free(lockowner_slab, lo);
   6880}
   6881
   6882static const struct nfs4_stateowner_operations lockowner_ops = {
   6883	.so_unhash =	nfs4_unhash_lockowner,
   6884	.so_free =	nfs4_free_lockowner,
   6885};
   6886
   6887/*
   6888 * Alloc a lock owner structure.
   6889 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
   6890 * occurred. 
   6891 *
   6892 * strhashval = ownerstr_hashval
   6893 */
   6894static struct nfs4_lockowner *
   6895alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
   6896			   struct nfs4_ol_stateid *open_stp,
   6897			   struct nfsd4_lock *lock)
   6898{
   6899	struct nfs4_lockowner *lo, *ret;
   6900
   6901	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
   6902	if (!lo)
   6903		return NULL;
   6904	INIT_LIST_HEAD(&lo->lo_blocked);
   6905	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
   6906	lo->lo_owner.so_is_open_owner = 0;
   6907	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
   6908	lo->lo_owner.so_ops = &lockowner_ops;
   6909	spin_lock(&clp->cl_lock);
   6910	ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
   6911	if (ret == NULL) {
   6912		list_add(&lo->lo_owner.so_strhash,
   6913			 &clp->cl_ownerstr_hashtbl[strhashval]);
   6914		ret = lo;
   6915	} else
   6916		nfs4_free_stateowner(&lo->lo_owner);
   6917
   6918	spin_unlock(&clp->cl_lock);
   6919	return ret;
   6920}
   6921
   6922static struct nfs4_ol_stateid *
   6923find_lock_stateid(const struct nfs4_lockowner *lo,
   6924		  const struct nfs4_ol_stateid *ost)
   6925{
   6926	struct nfs4_ol_stateid *lst;
   6927
   6928	lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
   6929
   6930	/* If ost is not hashed, ost->st_locks will not be valid */
   6931	if (!nfs4_ol_stateid_unhashed(ost))
   6932		list_for_each_entry(lst, &ost->st_locks, st_locks) {
   6933			if (lst->st_stateowner == &lo->lo_owner) {
   6934				refcount_inc(&lst->st_stid.sc_count);
   6935				return lst;
   6936			}
   6937		}
   6938	return NULL;
   6939}
   6940
   6941static struct nfs4_ol_stateid *
   6942init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
   6943		  struct nfs4_file *fp, struct inode *inode,
   6944		  struct nfs4_ol_stateid *open_stp)
   6945{
   6946	struct nfs4_client *clp = lo->lo_owner.so_client;
   6947	struct nfs4_ol_stateid *retstp;
   6948
   6949	mutex_init(&stp->st_mutex);
   6950	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
   6951retry:
   6952	spin_lock(&clp->cl_lock);
   6953	if (nfs4_ol_stateid_unhashed(open_stp))
   6954		goto out_close;
   6955	retstp = find_lock_stateid(lo, open_stp);
   6956	if (retstp)
   6957		goto out_found;
   6958	refcount_inc(&stp->st_stid.sc_count);
   6959	stp->st_stid.sc_type = NFS4_LOCK_STID;
   6960	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
   6961	get_nfs4_file(fp);
   6962	stp->st_stid.sc_file = fp;
   6963	stp->st_access_bmap = 0;
   6964	stp->st_deny_bmap = open_stp->st_deny_bmap;
   6965	stp->st_openstp = open_stp;
   6966	spin_lock(&fp->fi_lock);
   6967	list_add(&stp->st_locks, &open_stp->st_locks);
   6968	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
   6969	list_add(&stp->st_perfile, &fp->fi_stateids);
   6970	spin_unlock(&fp->fi_lock);
   6971	spin_unlock(&clp->cl_lock);
   6972	return stp;
   6973out_found:
   6974	spin_unlock(&clp->cl_lock);
   6975	if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
   6976		nfs4_put_stid(&retstp->st_stid);
   6977		goto retry;
   6978	}
   6979	/* To keep mutex tracking happy */
   6980	mutex_unlock(&stp->st_mutex);
   6981	return retstp;
   6982out_close:
   6983	spin_unlock(&clp->cl_lock);
   6984	mutex_unlock(&stp->st_mutex);
   6985	return NULL;
   6986}
   6987
   6988static struct nfs4_ol_stateid *
   6989find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
   6990			    struct inode *inode, struct nfs4_ol_stateid *ost,
   6991			    bool *new)
   6992{
   6993	struct nfs4_stid *ns = NULL;
   6994	struct nfs4_ol_stateid *lst;
   6995	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
   6996	struct nfs4_client *clp = oo->oo_owner.so_client;
   6997
   6998	*new = false;
   6999	spin_lock(&clp->cl_lock);
   7000	lst = find_lock_stateid(lo, ost);
   7001	spin_unlock(&clp->cl_lock);
   7002	if (lst != NULL) {
   7003		if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
   7004			goto out;
   7005		nfs4_put_stid(&lst->st_stid);
   7006	}
   7007	ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
   7008	if (ns == NULL)
   7009		return NULL;
   7010
   7011	lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
   7012	if (lst == openlockstateid(ns))
   7013		*new = true;
   7014	else
   7015		nfs4_put_stid(ns);
   7016out:
   7017	return lst;
   7018}
   7019
   7020static int
   7021check_lock_length(u64 offset, u64 length)
   7022{
   7023	return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
   7024		(length > ~offset)));
   7025}
   7026
   7027static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
   7028{
   7029	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
   7030
   7031	lockdep_assert_held(&fp->fi_lock);
   7032
   7033	if (test_access(access, lock_stp))
   7034		return;
   7035	__nfs4_file_get_access(fp, access);
   7036	set_access(access, lock_stp);
   7037}
   7038
   7039static __be32
   7040lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
   7041			    struct nfs4_ol_stateid *ost,
   7042			    struct nfsd4_lock *lock,
   7043			    struct nfs4_ol_stateid **plst, bool *new)
   7044{
   7045	__be32 status;
   7046	struct nfs4_file *fi = ost->st_stid.sc_file;
   7047	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
   7048	struct nfs4_client *cl = oo->oo_owner.so_client;
   7049	struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
   7050	struct nfs4_lockowner *lo;
   7051	struct nfs4_ol_stateid *lst;
   7052	unsigned int strhashval;
   7053
   7054	lo = find_lockowner_str(cl, &lock->lk_new_owner);
   7055	if (!lo) {
   7056		strhashval = ownerstr_hashval(&lock->lk_new_owner);
   7057		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
   7058		if (lo == NULL)
   7059			return nfserr_jukebox;
   7060	} else {
   7061		/* with an existing lockowner, seqids must be the same */
   7062		status = nfserr_bad_seqid;
   7063		if (!cstate->minorversion &&
   7064		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
   7065			goto out;
   7066	}
   7067
   7068	lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
   7069	if (lst == NULL) {
   7070		status = nfserr_jukebox;
   7071		goto out;
   7072	}
   7073
   7074	status = nfs_ok;
   7075	*plst = lst;
   7076out:
   7077	nfs4_put_stateowner(&lo->lo_owner);
   7078	return status;
   7079}
   7080
   7081/*
   7082 *  LOCK operation 
   7083 */
   7084__be32
   7085nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   7086	   union nfsd4_op_u *u)
   7087{
   7088	struct nfsd4_lock *lock = &u->lock;
   7089	struct nfs4_openowner *open_sop = NULL;
   7090	struct nfs4_lockowner *lock_sop = NULL;
   7091	struct nfs4_ol_stateid *lock_stp = NULL;
   7092	struct nfs4_ol_stateid *open_stp = NULL;
   7093	struct nfs4_file *fp;
   7094	struct nfsd_file *nf = NULL;
   7095	struct nfsd4_blocked_lock *nbl = NULL;
   7096	struct file_lock *file_lock = NULL;
   7097	struct file_lock *conflock = NULL;
   7098	__be32 status = 0;
   7099	int lkflg;
   7100	int err;
   7101	bool new = false;
   7102	unsigned char fl_type;
   7103	unsigned int fl_flags = FL_POSIX;
   7104	struct net *net = SVC_NET(rqstp);
   7105	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   7106
   7107	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
   7108		(long long) lock->lk_offset,
   7109		(long long) lock->lk_length);
   7110
   7111	if (check_lock_length(lock->lk_offset, lock->lk_length))
   7112		 return nfserr_inval;
   7113
   7114	if ((status = fh_verify(rqstp, &cstate->current_fh,
   7115				S_IFREG, NFSD_MAY_LOCK))) {
   7116		dprintk("NFSD: nfsd4_lock: permission denied!\n");
   7117		return status;
   7118	}
   7119
   7120	if (lock->lk_is_new) {
   7121		if (nfsd4_has_session(cstate))
   7122			/* See rfc 5661 18.10.3: given clientid is ignored: */
   7123			memcpy(&lock->lk_new_clientid,
   7124				&cstate->clp->cl_clientid,
   7125				sizeof(clientid_t));
   7126
   7127		/* validate and update open stateid and open seqid */
   7128		status = nfs4_preprocess_confirmed_seqid_op(cstate,
   7129				        lock->lk_new_open_seqid,
   7130		                        &lock->lk_new_open_stateid,
   7131					&open_stp, nn);
   7132		if (status)
   7133			goto out;
   7134		mutex_unlock(&open_stp->st_mutex);
   7135		open_sop = openowner(open_stp->st_stateowner);
   7136		status = nfserr_bad_stateid;
   7137		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
   7138						&lock->lk_new_clientid))
   7139			goto out;
   7140		status = lookup_or_create_lock_state(cstate, open_stp, lock,
   7141							&lock_stp, &new);
   7142	} else {
   7143		status = nfs4_preprocess_seqid_op(cstate,
   7144				       lock->lk_old_lock_seqid,
   7145				       &lock->lk_old_lock_stateid,
   7146				       NFS4_LOCK_STID, &lock_stp, nn);
   7147	}
   7148	if (status)
   7149		goto out;
   7150	lock_sop = lockowner(lock_stp->st_stateowner);
   7151
   7152	lkflg = setlkflg(lock->lk_type);
   7153	status = nfs4_check_openmode(lock_stp, lkflg);
   7154	if (status)
   7155		goto out;
   7156
   7157	status = nfserr_grace;
   7158	if (locks_in_grace(net) && !lock->lk_reclaim)
   7159		goto out;
   7160	status = nfserr_no_grace;
   7161	if (!locks_in_grace(net) && lock->lk_reclaim)
   7162		goto out;
   7163
   7164	if (lock->lk_reclaim)
   7165		fl_flags |= FL_RECLAIM;
   7166
   7167	fp = lock_stp->st_stid.sc_file;
   7168	switch (lock->lk_type) {
   7169		case NFS4_READW_LT:
   7170			if (nfsd4_has_session(cstate))
   7171				fl_flags |= FL_SLEEP;
   7172			fallthrough;
   7173		case NFS4_READ_LT:
   7174			spin_lock(&fp->fi_lock);
   7175			nf = find_readable_file_locked(fp);
   7176			if (nf)
   7177				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
   7178			spin_unlock(&fp->fi_lock);
   7179			fl_type = F_RDLCK;
   7180			break;
   7181		case NFS4_WRITEW_LT:
   7182			if (nfsd4_has_session(cstate))
   7183				fl_flags |= FL_SLEEP;
   7184			fallthrough;
   7185		case NFS4_WRITE_LT:
   7186			spin_lock(&fp->fi_lock);
   7187			nf = find_writeable_file_locked(fp);
   7188			if (nf)
   7189				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
   7190			spin_unlock(&fp->fi_lock);
   7191			fl_type = F_WRLCK;
   7192			break;
   7193		default:
   7194			status = nfserr_inval;
   7195		goto out;
   7196	}
   7197
   7198	if (!nf) {
   7199		status = nfserr_openmode;
   7200		goto out;
   7201	}
   7202
   7203	/*
   7204	 * Most filesystems with their own ->lock operations will block
   7205	 * the nfsd thread waiting to acquire the lock.  That leads to
   7206	 * deadlocks (we don't want every nfsd thread tied up waiting
   7207	 * for file locks), so don't attempt blocking lock notifications
   7208	 * on those filesystems:
   7209	 */
   7210	if (nf->nf_file->f_op->lock)
   7211		fl_flags &= ~FL_SLEEP;
   7212
   7213	nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
   7214	if (!nbl) {
   7215		dprintk("NFSD: %s: unable to allocate block!\n", __func__);
   7216		status = nfserr_jukebox;
   7217		goto out;
   7218	}
   7219
   7220	file_lock = &nbl->nbl_lock;
   7221	file_lock->fl_type = fl_type;
   7222	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
   7223	file_lock->fl_pid = current->tgid;
   7224	file_lock->fl_file = nf->nf_file;
   7225	file_lock->fl_flags = fl_flags;
   7226	file_lock->fl_lmops = &nfsd_posix_mng_ops;
   7227	file_lock->fl_start = lock->lk_offset;
   7228	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
   7229	nfs4_transform_lock_offset(file_lock);
   7230
   7231	conflock = locks_alloc_lock();
   7232	if (!conflock) {
   7233		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
   7234		status = nfserr_jukebox;
   7235		goto out;
   7236	}
   7237
   7238	if (fl_flags & FL_SLEEP) {
   7239		nbl->nbl_time = ktime_get_boottime_seconds();
   7240		spin_lock(&nn->blocked_locks_lock);
   7241		list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
   7242		list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
   7243		kref_get(&nbl->nbl_kref);
   7244		spin_unlock(&nn->blocked_locks_lock);
   7245	}
   7246
   7247	err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
   7248	switch (err) {
   7249	case 0: /* success! */
   7250		nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
   7251		status = 0;
   7252		if (lock->lk_reclaim)
   7253			nn->somebody_reclaimed = true;
   7254		break;
   7255	case FILE_LOCK_DEFERRED:
   7256		kref_put(&nbl->nbl_kref, free_nbl);
   7257		nbl = NULL;
   7258		fallthrough;
   7259	case -EAGAIN:		/* conflock holds conflicting lock */
   7260		status = nfserr_denied;
   7261		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
   7262		nfs4_set_lock_denied(conflock, &lock->lk_denied);
   7263		break;
   7264	case -EDEADLK:
   7265		status = nfserr_deadlock;
   7266		break;
   7267	default:
   7268		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
   7269		status = nfserrno(err);
   7270		break;
   7271	}
   7272out:
   7273	if (nbl) {
   7274		/* dequeue it if we queued it before */
   7275		if (fl_flags & FL_SLEEP) {
   7276			spin_lock(&nn->blocked_locks_lock);
   7277			if (!list_empty(&nbl->nbl_list) &&
   7278			    !list_empty(&nbl->nbl_lru)) {
   7279				list_del_init(&nbl->nbl_list);
   7280				list_del_init(&nbl->nbl_lru);
   7281				kref_put(&nbl->nbl_kref, free_nbl);
   7282			}
   7283			/* nbl can use one of lists to be linked to reaplist */
   7284			spin_unlock(&nn->blocked_locks_lock);
   7285		}
   7286		free_blocked_lock(nbl);
   7287	}
   7288	if (nf)
   7289		nfsd_file_put(nf);
   7290	if (lock_stp) {
   7291		/* Bump seqid manually if the 4.0 replay owner is openowner */
   7292		if (cstate->replay_owner &&
   7293		    cstate->replay_owner != &lock_sop->lo_owner &&
   7294		    seqid_mutating_err(ntohl(status)))
   7295			lock_sop->lo_owner.so_seqid++;
   7296
   7297		/*
   7298		 * If this is a new, never-before-used stateid, and we are
   7299		 * returning an error, then just go ahead and release it.
   7300		 */
   7301		if (status && new)
   7302			release_lock_stateid(lock_stp);
   7303
   7304		mutex_unlock(&lock_stp->st_mutex);
   7305
   7306		nfs4_put_stid(&lock_stp->st_stid);
   7307	}
   7308	if (open_stp)
   7309		nfs4_put_stid(&open_stp->st_stid);
   7310	nfsd4_bump_seqid(cstate, status);
   7311	if (conflock)
   7312		locks_free_lock(conflock);
   7313	return status;
   7314}
   7315
   7316/*
   7317 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
   7318 * so we do a temporary open here just to get an open file to pass to
   7319 * vfs_test_lock.
   7320 */
   7321static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
   7322{
   7323	struct nfsd_file *nf;
   7324	__be32 err;
   7325
   7326	err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
   7327	if (err)
   7328		return err;
   7329	fh_lock(fhp); /* to block new leases till after test_lock: */
   7330	err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
   7331							NFSD_MAY_READ));
   7332	if (err)
   7333		goto out;
   7334	lock->fl_file = nf->nf_file;
   7335	err = nfserrno(vfs_test_lock(nf->nf_file, lock));
   7336	lock->fl_file = NULL;
   7337out:
   7338	fh_unlock(fhp);
   7339	nfsd_file_put(nf);
   7340	return err;
   7341}
   7342
   7343/*
   7344 * LOCKT operation
   7345 */
   7346__be32
   7347nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   7348	    union nfsd4_op_u *u)
   7349{
   7350	struct nfsd4_lockt *lockt = &u->lockt;
   7351	struct file_lock *file_lock = NULL;
   7352	struct nfs4_lockowner *lo = NULL;
   7353	__be32 status;
   7354	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   7355
   7356	if (locks_in_grace(SVC_NET(rqstp)))
   7357		return nfserr_grace;
   7358
   7359	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
   7360		 return nfserr_inval;
   7361
   7362	if (!nfsd4_has_session(cstate)) {
   7363		status = set_client(&lockt->lt_clientid, cstate, nn);
   7364		if (status)
   7365			goto out;
   7366	}
   7367
   7368	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
   7369		goto out;
   7370
   7371	file_lock = locks_alloc_lock();
   7372	if (!file_lock) {
   7373		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
   7374		status = nfserr_jukebox;
   7375		goto out;
   7376	}
   7377
   7378	switch (lockt->lt_type) {
   7379		case NFS4_READ_LT:
   7380		case NFS4_READW_LT:
   7381			file_lock->fl_type = F_RDLCK;
   7382			break;
   7383		case NFS4_WRITE_LT:
   7384		case NFS4_WRITEW_LT:
   7385			file_lock->fl_type = F_WRLCK;
   7386			break;
   7387		default:
   7388			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
   7389			status = nfserr_inval;
   7390			goto out;
   7391	}
   7392
   7393	lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
   7394	if (lo)
   7395		file_lock->fl_owner = (fl_owner_t)lo;
   7396	file_lock->fl_pid = current->tgid;
   7397	file_lock->fl_flags = FL_POSIX;
   7398
   7399	file_lock->fl_start = lockt->lt_offset;
   7400	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
   7401
   7402	nfs4_transform_lock_offset(file_lock);
   7403
   7404	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
   7405	if (status)
   7406		goto out;
   7407
   7408	if (file_lock->fl_type != F_UNLCK) {
   7409		status = nfserr_denied;
   7410		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
   7411	}
   7412out:
   7413	if (lo)
   7414		nfs4_put_stateowner(&lo->lo_owner);
   7415	if (file_lock)
   7416		locks_free_lock(file_lock);
   7417	return status;
   7418}
   7419
   7420__be32
   7421nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
   7422	    union nfsd4_op_u *u)
   7423{
   7424	struct nfsd4_locku *locku = &u->locku;
   7425	struct nfs4_ol_stateid *stp;
   7426	struct nfsd_file *nf = NULL;
   7427	struct file_lock *file_lock = NULL;
   7428	__be32 status;
   7429	int err;
   7430	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   7431
   7432	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
   7433		(long long) locku->lu_offset,
   7434		(long long) locku->lu_length);
   7435
   7436	if (check_lock_length(locku->lu_offset, locku->lu_length))
   7437		 return nfserr_inval;
   7438
   7439	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
   7440					&locku->lu_stateid, NFS4_LOCK_STID,
   7441					&stp, nn);
   7442	if (status)
   7443		goto out;
   7444	nf = find_any_file(stp->st_stid.sc_file);
   7445	if (!nf) {
   7446		status = nfserr_lock_range;
   7447		goto put_stateid;
   7448	}
   7449	file_lock = locks_alloc_lock();
   7450	if (!file_lock) {
   7451		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
   7452		status = nfserr_jukebox;
   7453		goto put_file;
   7454	}
   7455
   7456	file_lock->fl_type = F_UNLCK;
   7457	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
   7458	file_lock->fl_pid = current->tgid;
   7459	file_lock->fl_file = nf->nf_file;
   7460	file_lock->fl_flags = FL_POSIX;
   7461	file_lock->fl_lmops = &nfsd_posix_mng_ops;
   7462	file_lock->fl_start = locku->lu_offset;
   7463
   7464	file_lock->fl_end = last_byte_offset(locku->lu_offset,
   7465						locku->lu_length);
   7466	nfs4_transform_lock_offset(file_lock);
   7467
   7468	err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
   7469	if (err) {
   7470		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
   7471		goto out_nfserr;
   7472	}
   7473	nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
   7474put_file:
   7475	nfsd_file_put(nf);
   7476put_stateid:
   7477	mutex_unlock(&stp->st_mutex);
   7478	nfs4_put_stid(&stp->st_stid);
   7479out:
   7480	nfsd4_bump_seqid(cstate, status);
   7481	if (file_lock)
   7482		locks_free_lock(file_lock);
   7483	return status;
   7484
   7485out_nfserr:
   7486	status = nfserrno(err);
   7487	goto put_file;
   7488}
   7489
   7490/*
   7491 * returns
   7492 * 	true:  locks held by lockowner
   7493 * 	false: no locks held by lockowner
   7494 */
   7495static bool
   7496check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
   7497{
   7498	struct file_lock *fl;
   7499	int status = false;
   7500	struct nfsd_file *nf = find_any_file(fp);
   7501	struct inode *inode;
   7502	struct file_lock_context *flctx;
   7503
   7504	if (!nf) {
   7505		/* Any valid lock stateid should have some sort of access */
   7506		WARN_ON_ONCE(1);
   7507		return status;
   7508	}
   7509
   7510	inode = locks_inode(nf->nf_file);
   7511	flctx = inode->i_flctx;
   7512
   7513	if (flctx && !list_empty_careful(&flctx->flc_posix)) {
   7514		spin_lock(&flctx->flc_lock);
   7515		list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
   7516			if (fl->fl_owner == (fl_owner_t)lowner) {
   7517				status = true;
   7518				break;
   7519			}
   7520		}
   7521		spin_unlock(&flctx->flc_lock);
   7522	}
   7523	nfsd_file_put(nf);
   7524	return status;
   7525}
   7526
   7527/**
   7528 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
   7529 * @rqstp: RPC transaction
   7530 * @cstate: NFSv4 COMPOUND state
   7531 * @u: RELEASE_LOCKOWNER arguments
   7532 *
   7533 * The lockowner's so_count is bumped when a lock record is added
   7534 * or when copying a conflicting lock. The latter case is brief,
   7535 * but can lead to fleeting false positives when looking for
   7536 * locks-in-use.
   7537 *
   7538 * Return values:
   7539 *   %nfs_ok: lockowner released or not found
   7540 *   %nfserr_locks_held: lockowner still in use
   7541 *   %nfserr_stale_clientid: clientid no longer active
   7542 *   %nfserr_expired: clientid not recognized
   7543 */
   7544__be32
   7545nfsd4_release_lockowner(struct svc_rqst *rqstp,
   7546			struct nfsd4_compound_state *cstate,
   7547			union nfsd4_op_u *u)
   7548{
   7549	struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
   7550	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
   7551	clientid_t *clid = &rlockowner->rl_clientid;
   7552	struct nfs4_ol_stateid *stp;
   7553	struct nfs4_lockowner *lo;
   7554	struct nfs4_client *clp;
   7555	LIST_HEAD(reaplist);
   7556	__be32 status;
   7557
   7558	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
   7559		clid->cl_boot, clid->cl_id);
   7560
   7561	status = set_client(clid, cstate, nn);
   7562	if (status)
   7563		return status;
   7564	clp = cstate->clp;
   7565
   7566	spin_lock(&clp->cl_lock);
   7567	lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
   7568	if (!lo) {
   7569		spin_unlock(&clp->cl_lock);
   7570		return nfs_ok;
   7571	}
   7572	if (atomic_read(&lo->lo_owner.so_count) != 2) {
   7573		spin_unlock(&clp->cl_lock);
   7574		nfs4_put_stateowner(&lo->lo_owner);
   7575		return nfserr_locks_held;
   7576	}
   7577	unhash_lockowner_locked(lo);
   7578	while (!list_empty(&lo->lo_owner.so_stateids)) {
   7579		stp = list_first_entry(&lo->lo_owner.so_stateids,
   7580				       struct nfs4_ol_stateid,
   7581				       st_perstateowner);
   7582		WARN_ON(!unhash_lock_stateid(stp));
   7583		put_ol_stateid_locked(stp, &reaplist);
   7584	}
   7585	spin_unlock(&clp->cl_lock);
   7586
   7587	free_ol_stateid_reaplist(&reaplist);
   7588	remove_blocked_locks(lo);
   7589	nfs4_put_stateowner(&lo->lo_owner);
   7590	return nfs_ok;
   7591}
   7592
   7593static inline struct nfs4_client_reclaim *
   7594alloc_reclaim(void)
   7595{
   7596	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
   7597}
   7598
   7599bool
   7600nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
   7601{
   7602	struct nfs4_client_reclaim *crp;
   7603
   7604	crp = nfsd4_find_reclaim_client(name, nn);
   7605	return (crp && crp->cr_clp);
   7606}
   7607
   7608/*
   7609 * failure => all reset bets are off, nfserr_no_grace...
   7610 *
   7611 * The caller is responsible for freeing name.data if NULL is returned (it
   7612 * will be freed in nfs4_remove_reclaim_record in the normal case).
   7613 */
   7614struct nfs4_client_reclaim *
   7615nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
   7616		struct nfsd_net *nn)
   7617{
   7618	unsigned int strhashval;
   7619	struct nfs4_client_reclaim *crp;
   7620
   7621	crp = alloc_reclaim();
   7622	if (crp) {
   7623		strhashval = clientstr_hashval(name);
   7624		INIT_LIST_HEAD(&crp->cr_strhash);
   7625		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
   7626		crp->cr_name.data = name.data;
   7627		crp->cr_name.len = name.len;
   7628		crp->cr_princhash.data = princhash.data;
   7629		crp->cr_princhash.len = princhash.len;
   7630		crp->cr_clp = NULL;
   7631		nn->reclaim_str_hashtbl_size++;
   7632	}
   7633	return crp;
   7634}
   7635
   7636void
   7637nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
   7638{
   7639	list_del(&crp->cr_strhash);
   7640	kfree(crp->cr_name.data);
   7641	kfree(crp->cr_princhash.data);
   7642	kfree(crp);
   7643	nn->reclaim_str_hashtbl_size--;
   7644}
   7645
   7646void
   7647nfs4_release_reclaim(struct nfsd_net *nn)
   7648{
   7649	struct nfs4_client_reclaim *crp = NULL;
   7650	int i;
   7651
   7652	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
   7653		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
   7654			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
   7655			                struct nfs4_client_reclaim, cr_strhash);
   7656			nfs4_remove_reclaim_record(crp, nn);
   7657		}
   7658	}
   7659	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
   7660}
   7661
   7662/*
   7663 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
   7664struct nfs4_client_reclaim *
   7665nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
   7666{
   7667	unsigned int strhashval;
   7668	struct nfs4_client_reclaim *crp = NULL;
   7669
   7670	strhashval = clientstr_hashval(name);
   7671	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
   7672		if (compare_blob(&crp->cr_name, &name) == 0) {
   7673			return crp;
   7674		}
   7675	}
   7676	return NULL;
   7677}
   7678
   7679__be32
   7680nfs4_check_open_reclaim(struct nfs4_client *clp)
   7681{
   7682	if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
   7683		return nfserr_no_grace;
   7684
   7685	if (nfsd4_client_record_check(clp))
   7686		return nfserr_reclaim_bad;
   7687
   7688	return nfs_ok;
   7689}
   7690
   7691/*
   7692 * Since the lifetime of a delegation isn't limited to that of an open, a
   7693 * client may quite reasonably hang on to a delegation as long as it has
   7694 * the inode cached.  This becomes an obvious problem the first time a
   7695 * client's inode cache approaches the size of the server's total memory.
   7696 *
   7697 * For now we avoid this problem by imposing a hard limit on the number
   7698 * of delegations, which varies according to the server's memory size.
   7699 */
   7700static void
   7701set_max_delegations(void)
   7702{
   7703	/*
   7704	 * Allow at most 4 delegations per megabyte of RAM.  Quick
   7705	 * estimates suggest that in the worst case (where every delegation
   7706	 * is for a different inode), a delegation could take about 1.5K,
   7707	 * giving a worst case usage of about 6% of memory.
   7708	 */
   7709	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
   7710}
   7711
   7712static int nfs4_state_create_net(struct net *net)
   7713{
   7714	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   7715	int i;
   7716
   7717	nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
   7718					    sizeof(struct list_head),
   7719					    GFP_KERNEL);
   7720	if (!nn->conf_id_hashtbl)
   7721		goto err;
   7722	nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
   7723					      sizeof(struct list_head),
   7724					      GFP_KERNEL);
   7725	if (!nn->unconf_id_hashtbl)
   7726		goto err_unconf_id;
   7727	nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
   7728					      sizeof(struct list_head),
   7729					      GFP_KERNEL);
   7730	if (!nn->sessionid_hashtbl)
   7731		goto err_sessionid;
   7732
   7733	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
   7734		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
   7735		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
   7736	}
   7737	for (i = 0; i < SESSION_HASH_SIZE; i++)
   7738		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
   7739	nn->conf_name_tree = RB_ROOT;
   7740	nn->unconf_name_tree = RB_ROOT;
   7741	nn->boot_time = ktime_get_real_seconds();
   7742	nn->grace_ended = false;
   7743	nn->nfsd4_manager.block_opens = true;
   7744	INIT_LIST_HEAD(&nn->nfsd4_manager.list);
   7745	INIT_LIST_HEAD(&nn->client_lru);
   7746	INIT_LIST_HEAD(&nn->close_lru);
   7747	INIT_LIST_HEAD(&nn->del_recall_lru);
   7748	spin_lock_init(&nn->client_lock);
   7749	spin_lock_init(&nn->s2s_cp_lock);
   7750	idr_init(&nn->s2s_cp_stateids);
   7751
   7752	spin_lock_init(&nn->blocked_locks_lock);
   7753	INIT_LIST_HEAD(&nn->blocked_locks_lru);
   7754
   7755	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
   7756	get_net(net);
   7757
   7758	return 0;
   7759
   7760err_sessionid:
   7761	kfree(nn->unconf_id_hashtbl);
   7762err_unconf_id:
   7763	kfree(nn->conf_id_hashtbl);
   7764err:
   7765	return -ENOMEM;
   7766}
   7767
   7768static void
   7769nfs4_state_destroy_net(struct net *net)
   7770{
   7771	int i;
   7772	struct nfs4_client *clp = NULL;
   7773	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   7774
   7775	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
   7776		while (!list_empty(&nn->conf_id_hashtbl[i])) {
   7777			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
   7778			destroy_client(clp);
   7779		}
   7780	}
   7781
   7782	WARN_ON(!list_empty(&nn->blocked_locks_lru));
   7783
   7784	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
   7785		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
   7786			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
   7787			destroy_client(clp);
   7788		}
   7789	}
   7790
   7791	kfree(nn->sessionid_hashtbl);
   7792	kfree(nn->unconf_id_hashtbl);
   7793	kfree(nn->conf_id_hashtbl);
   7794	put_net(net);
   7795}
   7796
   7797int
   7798nfs4_state_start_net(struct net *net)
   7799{
   7800	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   7801	int ret;
   7802
   7803	ret = nfs4_state_create_net(net);
   7804	if (ret)
   7805		return ret;
   7806	locks_start_grace(net, &nn->nfsd4_manager);
   7807	nfsd4_client_tracking_init(net);
   7808	if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
   7809		goto skip_grace;
   7810	printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
   7811	       nn->nfsd4_grace, net->ns.inum);
   7812	trace_nfsd_grace_start(nn);
   7813	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
   7814	return 0;
   7815
   7816skip_grace:
   7817	printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
   7818			net->ns.inum);
   7819	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
   7820	nfsd4_end_grace(nn);
   7821	return 0;
   7822}
   7823
   7824/* initialization to perform when the nfsd service is started: */
   7825
   7826int
   7827nfs4_state_start(void)
   7828{
   7829	int ret;
   7830
   7831	ret = nfsd4_create_callback_queue();
   7832	if (ret)
   7833		return ret;
   7834
   7835	set_max_delegations();
   7836	return 0;
   7837}
   7838
   7839void
   7840nfs4_state_shutdown_net(struct net *net)
   7841{
   7842	struct nfs4_delegation *dp = NULL;
   7843	struct list_head *pos, *next, reaplist;
   7844	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
   7845
   7846	cancel_delayed_work_sync(&nn->laundromat_work);
   7847	locks_end_grace(&nn->nfsd4_manager);
   7848
   7849	INIT_LIST_HEAD(&reaplist);
   7850	spin_lock(&state_lock);
   7851	list_for_each_safe(pos, next, &nn->del_recall_lru) {
   7852		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
   7853		WARN_ON(!unhash_delegation_locked(dp));
   7854		list_add(&dp->dl_recall_lru, &reaplist);
   7855	}
   7856	spin_unlock(&state_lock);
   7857	list_for_each_safe(pos, next, &reaplist) {
   7858		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
   7859		list_del_init(&dp->dl_recall_lru);
   7860		destroy_unhashed_deleg(dp);
   7861	}
   7862
   7863	nfsd4_client_tracking_exit(net);
   7864	nfs4_state_destroy_net(net);
   7865#ifdef CONFIG_NFSD_V4_2_INTER_SSC
   7866	nfsd4_ssc_shutdown_umount(nn);
   7867#endif
   7868}
   7869
   7870void
   7871nfs4_state_shutdown(void)
   7872{
   7873	nfsd4_destroy_callback_queue();
   7874}
   7875
   7876static void
   7877get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
   7878{
   7879	if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
   7880	    CURRENT_STATEID(stateid))
   7881		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
   7882}
   7883
   7884static void
   7885put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
   7886{
   7887	if (cstate->minorversion) {
   7888		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
   7889		SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
   7890	}
   7891}
   7892
   7893void
   7894clear_current_stateid(struct nfsd4_compound_state *cstate)
   7895{
   7896	CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
   7897}
   7898
   7899/*
   7900 * functions to set current state id
   7901 */
   7902void
   7903nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
   7904		union nfsd4_op_u *u)
   7905{
   7906	put_stateid(cstate, &u->open_downgrade.od_stateid);
   7907}
   7908
   7909void
   7910nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
   7911		union nfsd4_op_u *u)
   7912{
   7913	put_stateid(cstate, &u->open.op_stateid);
   7914}
   7915
   7916void
   7917nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
   7918		union nfsd4_op_u *u)
   7919{
   7920	put_stateid(cstate, &u->close.cl_stateid);
   7921}
   7922
   7923void
   7924nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
   7925		union nfsd4_op_u *u)
   7926{
   7927	put_stateid(cstate, &u->lock.lk_resp_stateid);
   7928}
   7929
   7930/*
   7931 * functions to consume current state id
   7932 */
   7933
   7934void
   7935nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
   7936		union nfsd4_op_u *u)
   7937{
   7938	get_stateid(cstate, &u->open_downgrade.od_stateid);
   7939}
   7940
   7941void
   7942nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
   7943		union nfsd4_op_u *u)
   7944{
   7945	get_stateid(cstate, &u->delegreturn.dr_stateid);
   7946}
   7947
   7948void
   7949nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
   7950		union nfsd4_op_u *u)
   7951{
   7952	get_stateid(cstate, &u->free_stateid.fr_stateid);
   7953}
   7954
   7955void
   7956nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
   7957		union nfsd4_op_u *u)
   7958{
   7959	get_stateid(cstate, &u->setattr.sa_stateid);
   7960}
   7961
   7962void
   7963nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
   7964		union nfsd4_op_u *u)
   7965{
   7966	get_stateid(cstate, &u->close.cl_stateid);
   7967}
   7968
   7969void
   7970nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
   7971		union nfsd4_op_u *u)
   7972{
   7973	get_stateid(cstate, &u->locku.lu_stateid);
   7974}
   7975
   7976void
   7977nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
   7978		union nfsd4_op_u *u)
   7979{
   7980	get_stateid(cstate, &u->read.rd_stateid);
   7981}
   7982
   7983void
   7984nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
   7985		union nfsd4_op_u *u)
   7986{
   7987	get_stateid(cstate, &u->write.wr_stateid);
   7988}