cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nfs4layouts.c (19073B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2014 Christoph Hellwig.
      4 */
      5#include <linux/blkdev.h>
      6#include <linux/kmod.h>
      7#include <linux/file.h>
      8#include <linux/jhash.h>
      9#include <linux/sched.h>
     10#include <linux/sunrpc/addr.h>
     11
     12#include "pnfs.h"
     13#include "netns.h"
     14#include "trace.h"
     15
     16#define NFSDDBG_FACILITY                NFSDDBG_PNFS
     17
     18struct nfs4_layout {
     19	struct list_head		lo_perstate;
     20	struct nfs4_layout_stateid	*lo_state;
     21	struct nfsd4_layout_seg		lo_seg;
     22};
     23
     24static struct kmem_cache *nfs4_layout_cache;
     25static struct kmem_cache *nfs4_layout_stateid_cache;
     26
     27static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
     28static const struct lock_manager_operations nfsd4_layouts_lm_ops;
     29
     30const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] =  {
     31#ifdef CONFIG_NFSD_FLEXFILELAYOUT
     32	[LAYOUT_FLEX_FILES]	= &ff_layout_ops,
     33#endif
     34#ifdef CONFIG_NFSD_BLOCKLAYOUT
     35	[LAYOUT_BLOCK_VOLUME]	= &bl_layout_ops,
     36#endif
     37#ifdef CONFIG_NFSD_SCSILAYOUT
     38	[LAYOUT_SCSI]		= &scsi_layout_ops,
     39#endif
     40};
     41
     42/* pNFS device ID to export fsid mapping */
     43#define DEVID_HASH_BITS	8
     44#define DEVID_HASH_SIZE	(1 << DEVID_HASH_BITS)
     45#define DEVID_HASH_MASK	(DEVID_HASH_SIZE - 1)
     46static u64 nfsd_devid_seq = 1;
     47static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
     48static DEFINE_SPINLOCK(nfsd_devid_lock);
     49
     50static inline u32 devid_hashfn(u64 idx)
     51{
     52	return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
     53}
     54
     55static void
     56nfsd4_alloc_devid_map(const struct svc_fh *fhp)
     57{
     58	const struct knfsd_fh *fh = &fhp->fh_handle;
     59	size_t fsid_len = key_len(fh->fh_fsid_type);
     60	struct nfsd4_deviceid_map *map, *old;
     61	int i;
     62
     63	map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
     64	if (!map)
     65		return;
     66
     67	map->fsid_type = fh->fh_fsid_type;
     68	memcpy(&map->fsid, fh->fh_fsid, fsid_len);
     69
     70	spin_lock(&nfsd_devid_lock);
     71	if (fhp->fh_export->ex_devid_map)
     72		goto out_unlock;
     73
     74	for (i = 0; i < DEVID_HASH_SIZE; i++) {
     75		list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
     76			if (old->fsid_type != fh->fh_fsid_type)
     77				continue;
     78			if (memcmp(old->fsid, fh->fh_fsid,
     79					key_len(old->fsid_type)))
     80				continue;
     81
     82			fhp->fh_export->ex_devid_map = old;
     83			goto out_unlock;
     84		}
     85	}
     86
     87	map->idx = nfsd_devid_seq++;
     88	list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
     89	fhp->fh_export->ex_devid_map = map;
     90	map = NULL;
     91
     92out_unlock:
     93	spin_unlock(&nfsd_devid_lock);
     94	kfree(map);
     95}
     96
     97struct nfsd4_deviceid_map *
     98nfsd4_find_devid_map(int idx)
     99{
    100	struct nfsd4_deviceid_map *map, *ret = NULL;
    101
    102	rcu_read_lock();
    103	list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
    104		if (map->idx == idx)
    105			ret = map;
    106	rcu_read_unlock();
    107
    108	return ret;
    109}
    110
    111int
    112nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
    113		u32 device_generation)
    114{
    115	if (!fhp->fh_export->ex_devid_map) {
    116		nfsd4_alloc_devid_map(fhp);
    117		if (!fhp->fh_export->ex_devid_map)
    118			return -ENOMEM;
    119	}
    120
    121	id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
    122	id->generation = device_generation;
    123	id->pad = 0;
    124	return 0;
    125}
    126
    127void nfsd4_setup_layout_type(struct svc_export *exp)
    128{
    129#if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
    130	struct super_block *sb = exp->ex_path.mnt->mnt_sb;
    131#endif
    132
    133	if (!(exp->ex_flags & NFSEXP_PNFS))
    134		return;
    135
    136#ifdef CONFIG_NFSD_FLEXFILELAYOUT
    137	exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
    138#endif
    139#ifdef CONFIG_NFSD_BLOCKLAYOUT
    140	if (sb->s_export_op->get_uuid &&
    141	    sb->s_export_op->map_blocks &&
    142	    sb->s_export_op->commit_blocks)
    143		exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
    144#endif
    145#ifdef CONFIG_NFSD_SCSILAYOUT
    146	if (sb->s_export_op->map_blocks &&
    147	    sb->s_export_op->commit_blocks &&
    148	    sb->s_bdev &&
    149	    sb->s_bdev->bd_disk->fops->pr_ops &&
    150	    sb->s_bdev->bd_disk->fops->get_unique_id)
    151		exp->ex_layout_types |= 1 << LAYOUT_SCSI;
    152#endif
    153}
    154
    155static void
    156nfsd4_free_layout_stateid(struct nfs4_stid *stid)
    157{
    158	struct nfs4_layout_stateid *ls = layoutstateid(stid);
    159	struct nfs4_client *clp = ls->ls_stid.sc_client;
    160	struct nfs4_file *fp = ls->ls_stid.sc_file;
    161
    162	trace_nfsd_layoutstate_free(&ls->ls_stid.sc_stateid);
    163
    164	spin_lock(&clp->cl_lock);
    165	list_del_init(&ls->ls_perclnt);
    166	spin_unlock(&clp->cl_lock);
    167
    168	spin_lock(&fp->fi_lock);
    169	list_del_init(&ls->ls_perfile);
    170	spin_unlock(&fp->fi_lock);
    171
    172	if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
    173		vfs_setlease(ls->ls_file->nf_file, F_UNLCK, NULL, (void **)&ls);
    174	nfsd_file_put(ls->ls_file);
    175
    176	if (ls->ls_recalled)
    177		atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
    178
    179	kmem_cache_free(nfs4_layout_stateid_cache, ls);
    180}
    181
    182static int
    183nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
    184{
    185	struct file_lock *fl;
    186	int status;
    187
    188	if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
    189		return 0;
    190
    191	fl = locks_alloc_lock();
    192	if (!fl)
    193		return -ENOMEM;
    194	locks_init_lock(fl);
    195	fl->fl_lmops = &nfsd4_layouts_lm_ops;
    196	fl->fl_flags = FL_LAYOUT;
    197	fl->fl_type = F_RDLCK;
    198	fl->fl_end = OFFSET_MAX;
    199	fl->fl_owner = ls;
    200	fl->fl_pid = current->tgid;
    201	fl->fl_file = ls->ls_file->nf_file;
    202
    203	status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
    204	if (status) {
    205		locks_free_lock(fl);
    206		return status;
    207	}
    208	BUG_ON(fl != NULL);
    209	return 0;
    210}
    211
    212static struct nfs4_layout_stateid *
    213nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
    214		struct nfs4_stid *parent, u32 layout_type)
    215{
    216	struct nfs4_client *clp = cstate->clp;
    217	struct nfs4_file *fp = parent->sc_file;
    218	struct nfs4_layout_stateid *ls;
    219	struct nfs4_stid *stp;
    220
    221	stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
    222					nfsd4_free_layout_stateid);
    223	if (!stp)
    224		return NULL;
    225
    226	get_nfs4_file(fp);
    227	stp->sc_file = fp;
    228
    229	ls = layoutstateid(stp);
    230	INIT_LIST_HEAD(&ls->ls_perclnt);
    231	INIT_LIST_HEAD(&ls->ls_perfile);
    232	spin_lock_init(&ls->ls_lock);
    233	INIT_LIST_HEAD(&ls->ls_layouts);
    234	mutex_init(&ls->ls_mutex);
    235	ls->ls_layout_type = layout_type;
    236	nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
    237			NFSPROC4_CLNT_CB_LAYOUT);
    238
    239	if (parent->sc_type == NFS4_DELEG_STID)
    240		ls->ls_file = nfsd_file_get(fp->fi_deleg_file);
    241	else
    242		ls->ls_file = find_any_file(fp);
    243	BUG_ON(!ls->ls_file);
    244
    245	if (nfsd4_layout_setlease(ls)) {
    246		nfsd_file_put(ls->ls_file);
    247		put_nfs4_file(fp);
    248		kmem_cache_free(nfs4_layout_stateid_cache, ls);
    249		return NULL;
    250	}
    251
    252	spin_lock(&clp->cl_lock);
    253	stp->sc_type = NFS4_LAYOUT_STID;
    254	list_add(&ls->ls_perclnt, &clp->cl_lo_states);
    255	spin_unlock(&clp->cl_lock);
    256
    257	spin_lock(&fp->fi_lock);
    258	list_add(&ls->ls_perfile, &fp->fi_lo_states);
    259	spin_unlock(&fp->fi_lock);
    260
    261	trace_nfsd_layoutstate_alloc(&ls->ls_stid.sc_stateid);
    262	return ls;
    263}
    264
    265__be32
    266nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
    267		struct nfsd4_compound_state *cstate, stateid_t *stateid,
    268		bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
    269{
    270	struct nfs4_layout_stateid *ls;
    271	struct nfs4_stid *stid;
    272	unsigned char typemask = NFS4_LAYOUT_STID;
    273	__be32 status;
    274
    275	if (create)
    276		typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
    277
    278	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
    279			net_generic(SVC_NET(rqstp), nfsd_net_id));
    280	if (status)
    281		goto out;
    282
    283	if (!fh_match(&cstate->current_fh.fh_handle,
    284		      &stid->sc_file->fi_fhandle)) {
    285		status = nfserr_bad_stateid;
    286		goto out_put_stid;
    287	}
    288
    289	if (stid->sc_type != NFS4_LAYOUT_STID) {
    290		ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
    291		nfs4_put_stid(stid);
    292
    293		status = nfserr_jukebox;
    294		if (!ls)
    295			goto out;
    296		mutex_lock(&ls->ls_mutex);
    297	} else {
    298		ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
    299
    300		status = nfserr_bad_stateid;
    301		mutex_lock(&ls->ls_mutex);
    302		if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
    303			goto out_unlock_stid;
    304		if (layout_type != ls->ls_layout_type)
    305			goto out_unlock_stid;
    306	}
    307
    308	*lsp = ls;
    309	return 0;
    310
    311out_unlock_stid:
    312	mutex_unlock(&ls->ls_mutex);
    313out_put_stid:
    314	nfs4_put_stid(stid);
    315out:
    316	return status;
    317}
    318
    319static void
    320nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
    321{
    322	spin_lock(&ls->ls_lock);
    323	if (ls->ls_recalled)
    324		goto out_unlock;
    325
    326	ls->ls_recalled = true;
    327	atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
    328	if (list_empty(&ls->ls_layouts))
    329		goto out_unlock;
    330
    331	trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
    332
    333	refcount_inc(&ls->ls_stid.sc_count);
    334	nfsd4_run_cb(&ls->ls_recall);
    335
    336out_unlock:
    337	spin_unlock(&ls->ls_lock);
    338}
    339
    340static inline u64
    341layout_end(struct nfsd4_layout_seg *seg)
    342{
    343	u64 end = seg->offset + seg->length;
    344	return end >= seg->offset ? end : NFS4_MAX_UINT64;
    345}
    346
    347static void
    348layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
    349{
    350	if (end == NFS4_MAX_UINT64)
    351		lo->length = NFS4_MAX_UINT64;
    352	else
    353		lo->length = end - lo->offset;
    354}
    355
    356static bool
    357layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
    358{
    359	if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
    360		return false;
    361	if (layout_end(&lo->lo_seg) <= s->offset)
    362		return false;
    363	if (layout_end(s) <= lo->lo_seg.offset)
    364		return false;
    365	return true;
    366}
    367
    368static bool
    369layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
    370{
    371	if (lo->iomode != new->iomode)
    372		return false;
    373	if (layout_end(new) < lo->offset)
    374		return false;
    375	if (layout_end(lo) < new->offset)
    376		return false;
    377
    378	lo->offset = min(lo->offset, new->offset);
    379	layout_update_len(lo, max(layout_end(lo), layout_end(new)));
    380	return true;
    381}
    382
    383static __be32
    384nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
    385{
    386	struct nfs4_file *fp = ls->ls_stid.sc_file;
    387	struct nfs4_layout_stateid *l, *n;
    388	__be32 nfserr = nfs_ok;
    389
    390	assert_spin_locked(&fp->fi_lock);
    391
    392	list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
    393		if (l != ls) {
    394			nfsd4_recall_file_layout(l);
    395			nfserr = nfserr_recallconflict;
    396		}
    397	}
    398
    399	return nfserr;
    400}
    401
    402__be32
    403nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
    404{
    405	struct nfsd4_layout_seg *seg = &lgp->lg_seg;
    406	struct nfs4_file *fp = ls->ls_stid.sc_file;
    407	struct nfs4_layout *lp, *new = NULL;
    408	__be32 nfserr;
    409
    410	spin_lock(&fp->fi_lock);
    411	nfserr = nfsd4_recall_conflict(ls);
    412	if (nfserr)
    413		goto out;
    414	spin_lock(&ls->ls_lock);
    415	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
    416		if (layouts_try_merge(&lp->lo_seg, seg))
    417			goto done;
    418	}
    419	spin_unlock(&ls->ls_lock);
    420	spin_unlock(&fp->fi_lock);
    421
    422	new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
    423	if (!new)
    424		return nfserr_jukebox;
    425	memcpy(&new->lo_seg, seg, sizeof(new->lo_seg));
    426	new->lo_state = ls;
    427
    428	spin_lock(&fp->fi_lock);
    429	nfserr = nfsd4_recall_conflict(ls);
    430	if (nfserr)
    431		goto out;
    432	spin_lock(&ls->ls_lock);
    433	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
    434		if (layouts_try_merge(&lp->lo_seg, seg))
    435			goto done;
    436	}
    437
    438	refcount_inc(&ls->ls_stid.sc_count);
    439	list_add_tail(&new->lo_perstate, &ls->ls_layouts);
    440	new = NULL;
    441done:
    442	nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
    443	spin_unlock(&ls->ls_lock);
    444out:
    445	spin_unlock(&fp->fi_lock);
    446	if (new)
    447		kmem_cache_free(nfs4_layout_cache, new);
    448	return nfserr;
    449}
    450
    451static void
    452nfsd4_free_layouts(struct list_head *reaplist)
    453{
    454	while (!list_empty(reaplist)) {
    455		struct nfs4_layout *lp = list_first_entry(reaplist,
    456				struct nfs4_layout, lo_perstate);
    457
    458		list_del(&lp->lo_perstate);
    459		nfs4_put_stid(&lp->lo_state->ls_stid);
    460		kmem_cache_free(nfs4_layout_cache, lp);
    461	}
    462}
    463
    464static void
    465nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
    466		struct list_head *reaplist)
    467{
    468	struct nfsd4_layout_seg *lo = &lp->lo_seg;
    469	u64 end = layout_end(lo);
    470
    471	if (seg->offset <= lo->offset) {
    472		if (layout_end(seg) >= end) {
    473			list_move_tail(&lp->lo_perstate, reaplist);
    474			return;
    475		}
    476		lo->offset = layout_end(seg);
    477	} else {
    478		/* retain the whole layout segment on a split. */
    479		if (layout_end(seg) < end) {
    480			dprintk("%s: split not supported\n", __func__);
    481			return;
    482		}
    483		end = seg->offset;
    484	}
    485
    486	layout_update_len(lo, end);
    487}
    488
    489__be32
    490nfsd4_return_file_layouts(struct svc_rqst *rqstp,
    491		struct nfsd4_compound_state *cstate,
    492		struct nfsd4_layoutreturn *lrp)
    493{
    494	struct nfs4_layout_stateid *ls;
    495	struct nfs4_layout *lp, *n;
    496	LIST_HEAD(reaplist);
    497	__be32 nfserr;
    498	int found = 0;
    499
    500	nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
    501						false, lrp->lr_layout_type,
    502						&ls);
    503	if (nfserr) {
    504		trace_nfsd_layout_return_lookup_fail(&lrp->lr_sid);
    505		return nfserr;
    506	}
    507
    508	spin_lock(&ls->ls_lock);
    509	list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
    510		if (layouts_overlapping(lp, &lrp->lr_seg)) {
    511			nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
    512			found++;
    513		}
    514	}
    515	if (!list_empty(&ls->ls_layouts)) {
    516		if (found)
    517			nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
    518		lrp->lrs_present = 1;
    519	} else {
    520		trace_nfsd_layoutstate_unhash(&ls->ls_stid.sc_stateid);
    521		nfs4_unhash_stid(&ls->ls_stid);
    522		lrp->lrs_present = 0;
    523	}
    524	spin_unlock(&ls->ls_lock);
    525
    526	mutex_unlock(&ls->ls_mutex);
    527	nfs4_put_stid(&ls->ls_stid);
    528	nfsd4_free_layouts(&reaplist);
    529	return nfs_ok;
    530}
    531
    532__be32
    533nfsd4_return_client_layouts(struct svc_rqst *rqstp,
    534		struct nfsd4_compound_state *cstate,
    535		struct nfsd4_layoutreturn *lrp)
    536{
    537	struct nfs4_layout_stateid *ls, *n;
    538	struct nfs4_client *clp = cstate->clp;
    539	struct nfs4_layout *lp, *t;
    540	LIST_HEAD(reaplist);
    541
    542	lrp->lrs_present = 0;
    543
    544	spin_lock(&clp->cl_lock);
    545	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
    546		if (ls->ls_layout_type != lrp->lr_layout_type)
    547			continue;
    548
    549		if (lrp->lr_return_type == RETURN_FSID &&
    550		    !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
    551				   &cstate->current_fh.fh_handle))
    552			continue;
    553
    554		spin_lock(&ls->ls_lock);
    555		list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
    556			if (lrp->lr_seg.iomode == IOMODE_ANY ||
    557			    lrp->lr_seg.iomode == lp->lo_seg.iomode)
    558				list_move_tail(&lp->lo_perstate, &reaplist);
    559		}
    560		spin_unlock(&ls->ls_lock);
    561	}
    562	spin_unlock(&clp->cl_lock);
    563
    564	nfsd4_free_layouts(&reaplist);
    565	return 0;
    566}
    567
    568static void
    569nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
    570		struct list_head *reaplist)
    571{
    572	spin_lock(&ls->ls_lock);
    573	list_splice_init(&ls->ls_layouts, reaplist);
    574	spin_unlock(&ls->ls_lock);
    575}
    576
    577void
    578nfsd4_return_all_client_layouts(struct nfs4_client *clp)
    579{
    580	struct nfs4_layout_stateid *ls, *n;
    581	LIST_HEAD(reaplist);
    582
    583	spin_lock(&clp->cl_lock);
    584	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
    585		nfsd4_return_all_layouts(ls, &reaplist);
    586	spin_unlock(&clp->cl_lock);
    587
    588	nfsd4_free_layouts(&reaplist);
    589}
    590
    591void
    592nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
    593{
    594	struct nfs4_layout_stateid *ls, *n;
    595	LIST_HEAD(reaplist);
    596
    597	spin_lock(&fp->fi_lock);
    598	list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
    599		if (ls->ls_stid.sc_client == clp)
    600			nfsd4_return_all_layouts(ls, &reaplist);
    601	}
    602	spin_unlock(&fp->fi_lock);
    603
    604	nfsd4_free_layouts(&reaplist);
    605}
    606
    607static void
    608nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
    609{
    610	struct nfs4_client *clp = ls->ls_stid.sc_client;
    611	char addr_str[INET6_ADDRSTRLEN];
    612	static char const nfsd_recall_failed[] = "/sbin/nfsd-recall-failed";
    613	static char *envp[] = {
    614		"HOME=/",
    615		"TERM=linux",
    616		"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
    617		NULL
    618	};
    619	char *argv[8];
    620	int error;
    621
    622	rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
    623
    624	printk(KERN_WARNING
    625		"nfsd: client %s failed to respond to layout recall. "
    626		"  Fencing..\n", addr_str);
    627
    628	argv[0] = (char *)nfsd_recall_failed;
    629	argv[1] = addr_str;
    630	argv[2] = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_id;
    631	argv[3] = NULL;
    632
    633	error = call_usermodehelper(nfsd_recall_failed, argv, envp,
    634				    UMH_WAIT_PROC);
    635	if (error) {
    636		printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
    637			addr_str, error);
    638	}
    639}
    640
    641static void
    642nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
    643{
    644	struct nfs4_layout_stateid *ls =
    645		container_of(cb, struct nfs4_layout_stateid, ls_recall);
    646
    647	mutex_lock(&ls->ls_mutex);
    648	nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
    649	mutex_unlock(&ls->ls_mutex);
    650}
    651
    652static int
    653nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
    654{
    655	struct nfs4_layout_stateid *ls =
    656		container_of(cb, struct nfs4_layout_stateid, ls_recall);
    657	struct nfsd_net *nn;
    658	ktime_t now, cutoff;
    659	const struct nfsd4_layout_ops *ops;
    660
    661
    662	switch (task->tk_status) {
    663	case 0:
    664	case -NFS4ERR_DELAY:
    665		/*
    666		 * Anything left? If not, then call it done. Note that we don't
    667		 * take the spinlock since this is an optimization and nothing
    668		 * should get added until the cb counter goes to zero.
    669		 */
    670		if (list_empty(&ls->ls_layouts))
    671			return 1;
    672
    673		/* Poll the client until it's done with the layout */
    674		now = ktime_get();
    675		nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id);
    676
    677		/* Client gets 2 lease periods to return it */
    678		cutoff = ktime_add_ns(task->tk_start,
    679					 (u64)nn->nfsd4_lease * NSEC_PER_SEC * 2);
    680
    681		if (ktime_before(now, cutoff)) {
    682			rpc_delay(task, HZ/100); /* 10 mili-seconds */
    683			return 0;
    684		}
    685		fallthrough;
    686	default:
    687		/*
    688		 * Unknown error or non-responding client, we'll need to fence.
    689		 */
    690		trace_nfsd_layout_recall_fail(&ls->ls_stid.sc_stateid);
    691
    692		ops = nfsd4_layout_ops[ls->ls_layout_type];
    693		if (ops->fence_client)
    694			ops->fence_client(ls);
    695		else
    696			nfsd4_cb_layout_fail(ls);
    697		return 1;
    698	case -NFS4ERR_NOMATCHING_LAYOUT:
    699		trace_nfsd_layout_recall_done(&ls->ls_stid.sc_stateid);
    700		task->tk_status = 0;
    701		return 1;
    702	}
    703}
    704
    705static void
    706nfsd4_cb_layout_release(struct nfsd4_callback *cb)
    707{
    708	struct nfs4_layout_stateid *ls =
    709		container_of(cb, struct nfs4_layout_stateid, ls_recall);
    710	LIST_HEAD(reaplist);
    711
    712	trace_nfsd_layout_recall_release(&ls->ls_stid.sc_stateid);
    713
    714	nfsd4_return_all_layouts(ls, &reaplist);
    715	nfsd4_free_layouts(&reaplist);
    716	nfs4_put_stid(&ls->ls_stid);
    717}
    718
    719static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
    720	.prepare	= nfsd4_cb_layout_prepare,
    721	.done		= nfsd4_cb_layout_done,
    722	.release	= nfsd4_cb_layout_release,
    723};
    724
    725static bool
    726nfsd4_layout_lm_break(struct file_lock *fl)
    727{
    728	/*
    729	 * We don't want the locks code to timeout the lease for us;
    730	 * we'll remove it ourself if a layout isn't returned
    731	 * in time:
    732	 */
    733	fl->fl_break_time = 0;
    734	nfsd4_recall_file_layout(fl->fl_owner);
    735	return false;
    736}
    737
    738static int
    739nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
    740		struct list_head *dispose)
    741{
    742	BUG_ON(!(arg & F_UNLCK));
    743	return lease_modify(onlist, arg, dispose);
    744}
    745
    746static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
    747	.lm_break	= nfsd4_layout_lm_break,
    748	.lm_change	= nfsd4_layout_lm_change,
    749};
    750
    751int
    752nfsd4_init_pnfs(void)
    753{
    754	int i;
    755
    756	for (i = 0; i < DEVID_HASH_SIZE; i++)
    757		INIT_LIST_HEAD(&nfsd_devid_hash[i]);
    758
    759	nfs4_layout_cache = kmem_cache_create("nfs4_layout",
    760			sizeof(struct nfs4_layout), 0, 0, NULL);
    761	if (!nfs4_layout_cache)
    762		return -ENOMEM;
    763
    764	nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
    765			sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
    766	if (!nfs4_layout_stateid_cache) {
    767		kmem_cache_destroy(nfs4_layout_cache);
    768		return -ENOMEM;
    769	}
    770	return 0;
    771}
    772
    773void
    774nfsd4_exit_pnfs(void)
    775{
    776	int i;
    777
    778	kmem_cache_destroy(nfs4_layout_cache);
    779	kmem_cache_destroy(nfs4_layout_stateid_cache);
    780
    781	for (i = 0; i < DEVID_HASH_SIZE; i++) {
    782		struct nfsd4_deviceid_map *map, *n;
    783
    784		list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
    785			kfree(map);
    786	}
    787}