cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xfs_inode.c (106005B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
      4 * All Rights Reserved.
      5 */
      6#include <linux/iversion.h>
      7
      8#include "xfs.h"
      9#include "xfs_fs.h"
     10#include "xfs_shared.h"
     11#include "xfs_format.h"
     12#include "xfs_log_format.h"
     13#include "xfs_trans_resv.h"
     14#include "xfs_mount.h"
     15#include "xfs_defer.h"
     16#include "xfs_inode.h"
     17#include "xfs_dir2.h"
     18#include "xfs_attr.h"
     19#include "xfs_trans_space.h"
     20#include "xfs_trans.h"
     21#include "xfs_buf_item.h"
     22#include "xfs_inode_item.h"
     23#include "xfs_ialloc.h"
     24#include "xfs_bmap.h"
     25#include "xfs_bmap_util.h"
     26#include "xfs_errortag.h"
     27#include "xfs_error.h"
     28#include "xfs_quota.h"
     29#include "xfs_filestream.h"
     30#include "xfs_trace.h"
     31#include "xfs_icache.h"
     32#include "xfs_symlink.h"
     33#include "xfs_trans_priv.h"
     34#include "xfs_log.h"
     35#include "xfs_bmap_btree.h"
     36#include "xfs_reflink.h"
     37#include "xfs_ag.h"
     38#include "xfs_log_priv.h"
     39
     40struct kmem_cache *xfs_inode_cache;
     41
     42/*
     43 * Used in xfs_itruncate_extents().  This is the maximum number of extents
     44 * freed from a file in a single transaction.
     45 */
     46#define	XFS_ITRUNC_MAX_EXTENTS	2
     47
     48STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
     49STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
     50	struct xfs_inode *);
     51
     52/*
     53 * helper function to extract extent size hint from inode
     54 */
     55xfs_extlen_t
     56xfs_get_extsz_hint(
     57	struct xfs_inode	*ip)
     58{
     59	/*
     60	 * No point in aligning allocations if we need to COW to actually
     61	 * write to them.
     62	 */
     63	if (xfs_is_always_cow_inode(ip))
     64		return 0;
     65	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
     66		return ip->i_extsize;
     67	if (XFS_IS_REALTIME_INODE(ip))
     68		return ip->i_mount->m_sb.sb_rextsize;
     69	return 0;
     70}
     71
     72/*
     73 * Helper function to extract CoW extent size hint from inode.
     74 * Between the extent size hint and the CoW extent size hint, we
     75 * return the greater of the two.  If the value is zero (automatic),
     76 * use the default size.
     77 */
     78xfs_extlen_t
     79xfs_get_cowextsz_hint(
     80	struct xfs_inode	*ip)
     81{
     82	xfs_extlen_t		a, b;
     83
     84	a = 0;
     85	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
     86		a = ip->i_cowextsize;
     87	b = xfs_get_extsz_hint(ip);
     88
     89	a = max(a, b);
     90	if (a == 0)
     91		return XFS_DEFAULT_COWEXTSZ_HINT;
     92	return a;
     93}
     94
     95/*
     96 * These two are wrapper routines around the xfs_ilock() routine used to
     97 * centralize some grungy code.  They are used in places that wish to lock the
     98 * inode solely for reading the extents.  The reason these places can't just
     99 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
    100 * bringing in of the extents from disk for a file in b-tree format.  If the
    101 * inode is in b-tree format, then we need to lock the inode exclusively until
    102 * the extents are read in.  Locking it exclusively all the time would limit
    103 * our parallelism unnecessarily, though.  What we do instead is check to see
    104 * if the extents have been read in yet, and only lock the inode exclusively
    105 * if they have not.
    106 *
    107 * The functions return a value which should be given to the corresponding
    108 * xfs_iunlock() call.
    109 */
    110uint
    111xfs_ilock_data_map_shared(
    112	struct xfs_inode	*ip)
    113{
    114	uint			lock_mode = XFS_ILOCK_SHARED;
    115
    116	if (xfs_need_iread_extents(&ip->i_df))
    117		lock_mode = XFS_ILOCK_EXCL;
    118	xfs_ilock(ip, lock_mode);
    119	return lock_mode;
    120}
    121
    122uint
    123xfs_ilock_attr_map_shared(
    124	struct xfs_inode	*ip)
    125{
    126	uint			lock_mode = XFS_ILOCK_SHARED;
    127
    128	if (ip->i_afp && xfs_need_iread_extents(ip->i_afp))
    129		lock_mode = XFS_ILOCK_EXCL;
    130	xfs_ilock(ip, lock_mode);
    131	return lock_mode;
    132}
    133
    134/*
    135 * You can't set both SHARED and EXCL for the same lock,
    136 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
    137 * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
    138 * to set in lock_flags.
    139 */
    140static inline void
    141xfs_lock_flags_assert(
    142	uint		lock_flags)
    143{
    144	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
    145		(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
    146	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
    147		(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
    148	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
    149		(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
    150	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
    151	ASSERT(lock_flags != 0);
    152}
    153
    154/*
    155 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
    156 * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
    157 * various combinations of the locks to be obtained.
    158 *
    159 * The 3 locks should always be ordered so that the IO lock is obtained first,
    160 * the mmap lock second and the ilock last in order to prevent deadlock.
    161 *
    162 * Basic locking order:
    163 *
    164 * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
    165 *
    166 * mmap_lock locking order:
    167 *
    168 * i_rwsem -> page lock -> mmap_lock
    169 * mmap_lock -> invalidate_lock -> page_lock
    170 *
    171 * The difference in mmap_lock locking order mean that we cannot hold the
    172 * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
    173 * can fault in pages during copy in/out (for buffered IO) or require the
    174 * mmap_lock in get_user_pages() to map the user pages into the kernel address
    175 * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
    176 * fault because page faults already hold the mmap_lock.
    177 *
    178 * Hence to serialise fully against both syscall and mmap based IO, we need to
    179 * take both the i_rwsem and the invalidate_lock. These locks should *only* be
    180 * both taken in places where we need to invalidate the page cache in a race
    181 * free manner (e.g. truncate, hole punch and other extent manipulation
    182 * functions).
    183 */
    184void
    185xfs_ilock(
    186	xfs_inode_t		*ip,
    187	uint			lock_flags)
    188{
    189	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
    190
    191	xfs_lock_flags_assert(lock_flags);
    192
    193	if (lock_flags & XFS_IOLOCK_EXCL) {
    194		down_write_nested(&VFS_I(ip)->i_rwsem,
    195				  XFS_IOLOCK_DEP(lock_flags));
    196	} else if (lock_flags & XFS_IOLOCK_SHARED) {
    197		down_read_nested(&VFS_I(ip)->i_rwsem,
    198				 XFS_IOLOCK_DEP(lock_flags));
    199	}
    200
    201	if (lock_flags & XFS_MMAPLOCK_EXCL) {
    202		down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
    203				  XFS_MMAPLOCK_DEP(lock_flags));
    204	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
    205		down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
    206				 XFS_MMAPLOCK_DEP(lock_flags));
    207	}
    208
    209	if (lock_flags & XFS_ILOCK_EXCL)
    210		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
    211	else if (lock_flags & XFS_ILOCK_SHARED)
    212		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
    213}
    214
    215/*
    216 * This is just like xfs_ilock(), except that the caller
    217 * is guaranteed not to sleep.  It returns 1 if it gets
    218 * the requested locks and 0 otherwise.  If the IO lock is
    219 * obtained but the inode lock cannot be, then the IO lock
    220 * is dropped before returning.
    221 *
    222 * ip -- the inode being locked
    223 * lock_flags -- this parameter indicates the inode's locks to be
    224 *       to be locked.  See the comment for xfs_ilock() for a list
    225 *	 of valid values.
    226 */
    227int
    228xfs_ilock_nowait(
    229	xfs_inode_t		*ip,
    230	uint			lock_flags)
    231{
    232	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
    233
    234	xfs_lock_flags_assert(lock_flags);
    235
    236	if (lock_flags & XFS_IOLOCK_EXCL) {
    237		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
    238			goto out;
    239	} else if (lock_flags & XFS_IOLOCK_SHARED) {
    240		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
    241			goto out;
    242	}
    243
    244	if (lock_flags & XFS_MMAPLOCK_EXCL) {
    245		if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
    246			goto out_undo_iolock;
    247	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
    248		if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
    249			goto out_undo_iolock;
    250	}
    251
    252	if (lock_flags & XFS_ILOCK_EXCL) {
    253		if (!mrtryupdate(&ip->i_lock))
    254			goto out_undo_mmaplock;
    255	} else if (lock_flags & XFS_ILOCK_SHARED) {
    256		if (!mrtryaccess(&ip->i_lock))
    257			goto out_undo_mmaplock;
    258	}
    259	return 1;
    260
    261out_undo_mmaplock:
    262	if (lock_flags & XFS_MMAPLOCK_EXCL)
    263		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
    264	else if (lock_flags & XFS_MMAPLOCK_SHARED)
    265		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
    266out_undo_iolock:
    267	if (lock_flags & XFS_IOLOCK_EXCL)
    268		up_write(&VFS_I(ip)->i_rwsem);
    269	else if (lock_flags & XFS_IOLOCK_SHARED)
    270		up_read(&VFS_I(ip)->i_rwsem);
    271out:
    272	return 0;
    273}
    274
    275/*
    276 * xfs_iunlock() is used to drop the inode locks acquired with
    277 * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
    278 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
    279 * that we know which locks to drop.
    280 *
    281 * ip -- the inode being unlocked
    282 * lock_flags -- this parameter indicates the inode's locks to be
    283 *       to be unlocked.  See the comment for xfs_ilock() for a list
    284 *	 of valid values for this parameter.
    285 *
    286 */
    287void
    288xfs_iunlock(
    289	xfs_inode_t		*ip,
    290	uint			lock_flags)
    291{
    292	xfs_lock_flags_assert(lock_flags);
    293
    294	if (lock_flags & XFS_IOLOCK_EXCL)
    295		up_write(&VFS_I(ip)->i_rwsem);
    296	else if (lock_flags & XFS_IOLOCK_SHARED)
    297		up_read(&VFS_I(ip)->i_rwsem);
    298
    299	if (lock_flags & XFS_MMAPLOCK_EXCL)
    300		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
    301	else if (lock_flags & XFS_MMAPLOCK_SHARED)
    302		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
    303
    304	if (lock_flags & XFS_ILOCK_EXCL)
    305		mrunlock_excl(&ip->i_lock);
    306	else if (lock_flags & XFS_ILOCK_SHARED)
    307		mrunlock_shared(&ip->i_lock);
    308
    309	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
    310}
    311
    312/*
    313 * give up write locks.  the i/o lock cannot be held nested
    314 * if it is being demoted.
    315 */
    316void
    317xfs_ilock_demote(
    318	xfs_inode_t		*ip,
    319	uint			lock_flags)
    320{
    321	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
    322	ASSERT((lock_flags &
    323		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
    324
    325	if (lock_flags & XFS_ILOCK_EXCL)
    326		mrdemote(&ip->i_lock);
    327	if (lock_flags & XFS_MMAPLOCK_EXCL)
    328		downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
    329	if (lock_flags & XFS_IOLOCK_EXCL)
    330		downgrade_write(&VFS_I(ip)->i_rwsem);
    331
    332	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
    333}
    334
    335#if defined(DEBUG) || defined(XFS_WARN)
    336static inline bool
    337__xfs_rwsem_islocked(
    338	struct rw_semaphore	*rwsem,
    339	bool			shared)
    340{
    341	if (!debug_locks)
    342		return rwsem_is_locked(rwsem);
    343
    344	if (!shared)
    345		return lockdep_is_held_type(rwsem, 0);
    346
    347	/*
    348	 * We are checking that the lock is held at least in shared
    349	 * mode but don't care that it might be held exclusively
    350	 * (i.e. shared | excl). Hence we check if the lock is held
    351	 * in any mode rather than an explicit shared mode.
    352	 */
    353	return lockdep_is_held_type(rwsem, -1);
    354}
    355
    356bool
    357xfs_isilocked(
    358	struct xfs_inode	*ip,
    359	uint			lock_flags)
    360{
    361	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
    362		if (!(lock_flags & XFS_ILOCK_SHARED))
    363			return !!ip->i_lock.mr_writer;
    364		return rwsem_is_locked(&ip->i_lock.mr_lock);
    365	}
    366
    367	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
    368		return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
    369				(lock_flags & XFS_MMAPLOCK_SHARED));
    370	}
    371
    372	if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
    373		return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
    374				(lock_flags & XFS_IOLOCK_SHARED));
    375	}
    376
    377	ASSERT(0);
    378	return false;
    379}
    380#endif
    381
    382/*
    383 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
    384 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
    385 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
    386 * errors and warnings.
    387 */
    388#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
    389static bool
    390xfs_lockdep_subclass_ok(
    391	int subclass)
    392{
    393	return subclass < MAX_LOCKDEP_SUBCLASSES;
    394}
    395#else
    396#define xfs_lockdep_subclass_ok(subclass)	(true)
    397#endif
    398
    399/*
    400 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
    401 * value. This can be called for any type of inode lock combination, including
    402 * parent locking. Care must be taken to ensure we don't overrun the subclass
    403 * storage fields in the class mask we build.
    404 */
    405static inline uint
    406xfs_lock_inumorder(
    407	uint	lock_mode,
    408	uint	subclass)
    409{
    410	uint	class = 0;
    411
    412	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
    413			      XFS_ILOCK_RTSUM)));
    414	ASSERT(xfs_lockdep_subclass_ok(subclass));
    415
    416	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
    417		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
    418		class += subclass << XFS_IOLOCK_SHIFT;
    419	}
    420
    421	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
    422		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
    423		class += subclass << XFS_MMAPLOCK_SHIFT;
    424	}
    425
    426	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
    427		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
    428		class += subclass << XFS_ILOCK_SHIFT;
    429	}
    430
    431	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
    432}
    433
    434/*
    435 * The following routine will lock n inodes in exclusive mode.  We assume the
    436 * caller calls us with the inodes in i_ino order.
    437 *
    438 * We need to detect deadlock where an inode that we lock is in the AIL and we
    439 * start waiting for another inode that is locked by a thread in a long running
    440 * transaction (such as truncate). This can result in deadlock since the long
    441 * running trans might need to wait for the inode we just locked in order to
    442 * push the tail and free space in the log.
    443 *
    444 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
    445 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
    446 * lock more than one at a time, lockdep will report false positives saying we
    447 * have violated locking orders.
    448 */
    449static void
    450xfs_lock_inodes(
    451	struct xfs_inode	**ips,
    452	int			inodes,
    453	uint			lock_mode)
    454{
    455	int			attempts = 0;
    456	uint			i;
    457	int			j;
    458	bool			try_lock;
    459	struct xfs_log_item	*lp;
    460
    461	/*
    462	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
    463	 * support an arbitrary depth of locking here, but absolute limits on
    464	 * inodes depend on the type of locking and the limits placed by
    465	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
    466	 * the asserts.
    467	 */
    468	ASSERT(ips && inodes >= 2 && inodes <= 5);
    469	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
    470			    XFS_ILOCK_EXCL));
    471	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
    472			      XFS_ILOCK_SHARED)));
    473	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
    474		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
    475	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
    476		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
    477
    478	if (lock_mode & XFS_IOLOCK_EXCL) {
    479		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
    480	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
    481		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
    482
    483again:
    484	try_lock = false;
    485	i = 0;
    486	for (; i < inodes; i++) {
    487		ASSERT(ips[i]);
    488
    489		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
    490			continue;
    491
    492		/*
    493		 * If try_lock is not set yet, make sure all locked inodes are
    494		 * not in the AIL.  If any are, set try_lock to be used later.
    495		 */
    496		if (!try_lock) {
    497			for (j = (i - 1); j >= 0 && !try_lock; j--) {
    498				lp = &ips[j]->i_itemp->ili_item;
    499				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
    500					try_lock = true;
    501			}
    502		}
    503
    504		/*
    505		 * If any of the previous locks we have locked is in the AIL,
    506		 * we must TRY to get the second and subsequent locks. If
    507		 * we can't get any, we must release all we have
    508		 * and try again.
    509		 */
    510		if (!try_lock) {
    511			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
    512			continue;
    513		}
    514
    515		/* try_lock means we have an inode locked that is in the AIL. */
    516		ASSERT(i != 0);
    517		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
    518			continue;
    519
    520		/*
    521		 * Unlock all previous guys and try again.  xfs_iunlock will try
    522		 * to push the tail if the inode is in the AIL.
    523		 */
    524		attempts++;
    525		for (j = i - 1; j >= 0; j--) {
    526			/*
    527			 * Check to see if we've already unlocked this one.  Not
    528			 * the first one going back, and the inode ptr is the
    529			 * same.
    530			 */
    531			if (j != (i - 1) && ips[j] == ips[j + 1])
    532				continue;
    533
    534			xfs_iunlock(ips[j], lock_mode);
    535		}
    536
    537		if ((attempts % 5) == 0) {
    538			delay(1); /* Don't just spin the CPU */
    539		}
    540		goto again;
    541	}
    542}
    543
    544/*
    545 * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
    546 * mmaplock must be double-locked separately since we use i_rwsem and
    547 * invalidate_lock for that. We now support taking one lock EXCL and the
    548 * other SHARED.
    549 */
    550void
    551xfs_lock_two_inodes(
    552	struct xfs_inode	*ip0,
    553	uint			ip0_mode,
    554	struct xfs_inode	*ip1,
    555	uint			ip1_mode)
    556{
    557	int			attempts = 0;
    558	struct xfs_log_item	*lp;
    559
    560	ASSERT(hweight32(ip0_mode) == 1);
    561	ASSERT(hweight32(ip1_mode) == 1);
    562	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
    563	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
    564	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
    565	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
    566	ASSERT(ip0->i_ino != ip1->i_ino);
    567
    568	if (ip0->i_ino > ip1->i_ino) {
    569		swap(ip0, ip1);
    570		swap(ip0_mode, ip1_mode);
    571	}
    572
    573 again:
    574	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
    575
    576	/*
    577	 * If the first lock we have locked is in the AIL, we must TRY to get
    578	 * the second lock. If we can't get it, we must release the first one
    579	 * and try again.
    580	 */
    581	lp = &ip0->i_itemp->ili_item;
    582	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
    583		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
    584			xfs_iunlock(ip0, ip0_mode);
    585			if ((++attempts % 5) == 0)
    586				delay(1); /* Don't just spin the CPU */
    587			goto again;
    588		}
    589	} else {
    590		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
    591	}
    592}
    593
    594uint
    595xfs_ip2xflags(
    596	struct xfs_inode	*ip)
    597{
    598	uint			flags = 0;
    599
    600	if (ip->i_diflags & XFS_DIFLAG_ANY) {
    601		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
    602			flags |= FS_XFLAG_REALTIME;
    603		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
    604			flags |= FS_XFLAG_PREALLOC;
    605		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
    606			flags |= FS_XFLAG_IMMUTABLE;
    607		if (ip->i_diflags & XFS_DIFLAG_APPEND)
    608			flags |= FS_XFLAG_APPEND;
    609		if (ip->i_diflags & XFS_DIFLAG_SYNC)
    610			flags |= FS_XFLAG_SYNC;
    611		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
    612			flags |= FS_XFLAG_NOATIME;
    613		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
    614			flags |= FS_XFLAG_NODUMP;
    615		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
    616			flags |= FS_XFLAG_RTINHERIT;
    617		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
    618			flags |= FS_XFLAG_PROJINHERIT;
    619		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
    620			flags |= FS_XFLAG_NOSYMLINKS;
    621		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
    622			flags |= FS_XFLAG_EXTSIZE;
    623		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
    624			flags |= FS_XFLAG_EXTSZINHERIT;
    625		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
    626			flags |= FS_XFLAG_NODEFRAG;
    627		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
    628			flags |= FS_XFLAG_FILESTREAM;
    629	}
    630
    631	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
    632		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
    633			flags |= FS_XFLAG_DAX;
    634		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
    635			flags |= FS_XFLAG_COWEXTSIZE;
    636	}
    637
    638	if (XFS_IFORK_Q(ip))
    639		flags |= FS_XFLAG_HASATTR;
    640	return flags;
    641}
    642
    643/*
    644 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
    645 * is allowed, otherwise it has to be an exact match. If a CI match is found,
    646 * ci_name->name will point to a the actual name (caller must free) or
    647 * will be set to NULL if an exact match is found.
    648 */
    649int
    650xfs_lookup(
    651	struct xfs_inode	*dp,
    652	const struct xfs_name	*name,
    653	struct xfs_inode	**ipp,
    654	struct xfs_name		*ci_name)
    655{
    656	xfs_ino_t		inum;
    657	int			error;
    658
    659	trace_xfs_lookup(dp, name);
    660
    661	if (xfs_is_shutdown(dp->i_mount))
    662		return -EIO;
    663
    664	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
    665	if (error)
    666		goto out_unlock;
    667
    668	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
    669	if (error)
    670		goto out_free_name;
    671
    672	return 0;
    673
    674out_free_name:
    675	if (ci_name)
    676		kmem_free(ci_name->name);
    677out_unlock:
    678	*ipp = NULL;
    679	return error;
    680}
    681
    682/* Propagate di_flags from a parent inode to a child inode. */
    683static void
    684xfs_inode_inherit_flags(
    685	struct xfs_inode	*ip,
    686	const struct xfs_inode	*pip)
    687{
    688	unsigned int		di_flags = 0;
    689	xfs_failaddr_t		failaddr;
    690	umode_t			mode = VFS_I(ip)->i_mode;
    691
    692	if (S_ISDIR(mode)) {
    693		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
    694			di_flags |= XFS_DIFLAG_RTINHERIT;
    695		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
    696			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
    697			ip->i_extsize = pip->i_extsize;
    698		}
    699		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
    700			di_flags |= XFS_DIFLAG_PROJINHERIT;
    701	} else if (S_ISREG(mode)) {
    702		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
    703		    xfs_has_realtime(ip->i_mount))
    704			di_flags |= XFS_DIFLAG_REALTIME;
    705		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
    706			di_flags |= XFS_DIFLAG_EXTSIZE;
    707			ip->i_extsize = pip->i_extsize;
    708		}
    709	}
    710	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
    711	    xfs_inherit_noatime)
    712		di_flags |= XFS_DIFLAG_NOATIME;
    713	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
    714	    xfs_inherit_nodump)
    715		di_flags |= XFS_DIFLAG_NODUMP;
    716	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
    717	    xfs_inherit_sync)
    718		di_flags |= XFS_DIFLAG_SYNC;
    719	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
    720	    xfs_inherit_nosymlinks)
    721		di_flags |= XFS_DIFLAG_NOSYMLINKS;
    722	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
    723	    xfs_inherit_nodefrag)
    724		di_flags |= XFS_DIFLAG_NODEFRAG;
    725	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
    726		di_flags |= XFS_DIFLAG_FILESTREAM;
    727
    728	ip->i_diflags |= di_flags;
    729
    730	/*
    731	 * Inode verifiers on older kernels only check that the extent size
    732	 * hint is an integer multiple of the rt extent size on realtime files.
    733	 * They did not check the hint alignment on a directory with both
    734	 * rtinherit and extszinherit flags set.  If the misaligned hint is
    735	 * propagated from a directory into a new realtime file, new file
    736	 * allocations will fail due to math errors in the rt allocator and/or
    737	 * trip the verifiers.  Validate the hint settings in the new file so
    738	 * that we don't let broken hints propagate.
    739	 */
    740	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
    741			VFS_I(ip)->i_mode, ip->i_diflags);
    742	if (failaddr) {
    743		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
    744				   XFS_DIFLAG_EXTSZINHERIT);
    745		ip->i_extsize = 0;
    746	}
    747}
    748
    749/* Propagate di_flags2 from a parent inode to a child inode. */
    750static void
    751xfs_inode_inherit_flags2(
    752	struct xfs_inode	*ip,
    753	const struct xfs_inode	*pip)
    754{
    755	xfs_failaddr_t		failaddr;
    756
    757	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
    758		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
    759		ip->i_cowextsize = pip->i_cowextsize;
    760	}
    761	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
    762		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
    763
    764	/* Don't let invalid cowextsize hints propagate. */
    765	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
    766			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
    767	if (failaddr) {
    768		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
    769		ip->i_cowextsize = 0;
    770	}
    771}
    772
    773/*
    774 * Initialise a newly allocated inode and return the in-core inode to the
    775 * caller locked exclusively.
    776 */
    777int
    778xfs_init_new_inode(
    779	struct user_namespace	*mnt_userns,
    780	struct xfs_trans	*tp,
    781	struct xfs_inode	*pip,
    782	xfs_ino_t		ino,
    783	umode_t			mode,
    784	xfs_nlink_t		nlink,
    785	dev_t			rdev,
    786	prid_t			prid,
    787	bool			init_xattrs,
    788	struct xfs_inode	**ipp)
    789{
    790	struct inode		*dir = pip ? VFS_I(pip) : NULL;
    791	struct xfs_mount	*mp = tp->t_mountp;
    792	struct xfs_inode	*ip;
    793	unsigned int		flags;
    794	int			error;
    795	struct timespec64	tv;
    796	struct inode		*inode;
    797
    798	/*
    799	 * Protect against obviously corrupt allocation btree records. Later
    800	 * xfs_iget checks will catch re-allocation of other active in-memory
    801	 * and on-disk inodes. If we don't catch reallocating the parent inode
    802	 * here we will deadlock in xfs_iget() so we have to do these checks
    803	 * first.
    804	 */
    805	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
    806		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
    807		return -EFSCORRUPTED;
    808	}
    809
    810	/*
    811	 * Get the in-core inode with the lock held exclusively to prevent
    812	 * others from looking at until we're done.
    813	 */
    814	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
    815	if (error)
    816		return error;
    817
    818	ASSERT(ip != NULL);
    819	inode = VFS_I(ip);
    820	set_nlink(inode, nlink);
    821	inode->i_rdev = rdev;
    822	ip->i_projid = prid;
    823
    824	if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
    825		inode_fsuid_set(inode, mnt_userns);
    826		inode->i_gid = dir->i_gid;
    827		inode->i_mode = mode;
    828	} else {
    829		inode_init_owner(mnt_userns, inode, dir, mode);
    830	}
    831
    832	/*
    833	 * If the group ID of the new file does not match the effective group
    834	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
    835	 * (and only if the irix_sgid_inherit compatibility variable is set).
    836	 */
    837	if (irix_sgid_inherit &&
    838	    (inode->i_mode & S_ISGID) &&
    839	    !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
    840		inode->i_mode &= ~S_ISGID;
    841
    842	ip->i_disk_size = 0;
    843	ip->i_df.if_nextents = 0;
    844	ASSERT(ip->i_nblocks == 0);
    845
    846	tv = current_time(inode);
    847	inode->i_mtime = tv;
    848	inode->i_atime = tv;
    849	inode->i_ctime = tv;
    850
    851	ip->i_extsize = 0;
    852	ip->i_diflags = 0;
    853
    854	if (xfs_has_v3inodes(mp)) {
    855		inode_set_iversion(inode, 1);
    856		ip->i_cowextsize = 0;
    857		ip->i_crtime = tv;
    858	}
    859
    860	flags = XFS_ILOG_CORE;
    861	switch (mode & S_IFMT) {
    862	case S_IFIFO:
    863	case S_IFCHR:
    864	case S_IFBLK:
    865	case S_IFSOCK:
    866		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
    867		flags |= XFS_ILOG_DEV;
    868		break;
    869	case S_IFREG:
    870	case S_IFDIR:
    871		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
    872			xfs_inode_inherit_flags(ip, pip);
    873		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
    874			xfs_inode_inherit_flags2(ip, pip);
    875		fallthrough;
    876	case S_IFLNK:
    877		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
    878		ip->i_df.if_bytes = 0;
    879		ip->i_df.if_u1.if_root = NULL;
    880		break;
    881	default:
    882		ASSERT(0);
    883	}
    884
    885	/*
    886	 * If we need to create attributes immediately after allocating the
    887	 * inode, initialise an empty attribute fork right now. We use the
    888	 * default fork offset for attributes here as we don't know exactly what
    889	 * size or how many attributes we might be adding. We can do this
    890	 * safely here because we know the data fork is completely empty and
    891	 * this saves us from needing to run a separate transaction to set the
    892	 * fork offset in the immediate future.
    893	 */
    894	if (init_xattrs && xfs_has_attr(mp)) {
    895		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
    896		ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
    897	}
    898
    899	/*
    900	 * Log the new values stuffed into the inode.
    901	 */
    902	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
    903	xfs_trans_log_inode(tp, ip, flags);
    904
    905	/* now that we have an i_mode we can setup the inode structure */
    906	xfs_setup_inode(ip);
    907
    908	*ipp = ip;
    909	return 0;
    910}
    911
    912/*
    913 * Decrement the link count on an inode & log the change.  If this causes the
    914 * link count to go to zero, move the inode to AGI unlinked list so that it can
    915 * be freed when the last active reference goes away via xfs_inactive().
    916 */
    917static int			/* error */
    918xfs_droplink(
    919	xfs_trans_t *tp,
    920	xfs_inode_t *ip)
    921{
    922	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
    923
    924	drop_nlink(VFS_I(ip));
    925	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
    926
    927	if (VFS_I(ip)->i_nlink)
    928		return 0;
    929
    930	return xfs_iunlink(tp, ip);
    931}
    932
    933/*
    934 * Increment the link count on an inode & log the change.
    935 */
    936static void
    937xfs_bumplink(
    938	xfs_trans_t *tp,
    939	xfs_inode_t *ip)
    940{
    941	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
    942
    943	inc_nlink(VFS_I(ip));
    944	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
    945}
    946
    947int
    948xfs_create(
    949	struct user_namespace	*mnt_userns,
    950	xfs_inode_t		*dp,
    951	struct xfs_name		*name,
    952	umode_t			mode,
    953	dev_t			rdev,
    954	bool			init_xattrs,
    955	xfs_inode_t		**ipp)
    956{
    957	int			is_dir = S_ISDIR(mode);
    958	struct xfs_mount	*mp = dp->i_mount;
    959	struct xfs_inode	*ip = NULL;
    960	struct xfs_trans	*tp = NULL;
    961	int			error;
    962	bool                    unlock_dp_on_error = false;
    963	prid_t			prid;
    964	struct xfs_dquot	*udqp = NULL;
    965	struct xfs_dquot	*gdqp = NULL;
    966	struct xfs_dquot	*pdqp = NULL;
    967	struct xfs_trans_res	*tres;
    968	uint			resblks;
    969	xfs_ino_t		ino;
    970
    971	trace_xfs_create(dp, name);
    972
    973	if (xfs_is_shutdown(mp))
    974		return -EIO;
    975
    976	prid = xfs_get_initial_prid(dp);
    977
    978	/*
    979	 * Make sure that we have allocated dquot(s) on disk.
    980	 */
    981	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns),
    982			mapped_fsgid(mnt_userns, &init_user_ns), prid,
    983			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
    984			&udqp, &gdqp, &pdqp);
    985	if (error)
    986		return error;
    987
    988	if (is_dir) {
    989		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
    990		tres = &M_RES(mp)->tr_mkdir;
    991	} else {
    992		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
    993		tres = &M_RES(mp)->tr_create;
    994	}
    995
    996	/*
    997	 * Initially assume that the file does not exist and
    998	 * reserve the resources for that case.  If that is not
    999	 * the case we'll drop the one we have and get a more
   1000	 * appropriate transaction later.
   1001	 */
   1002	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
   1003			&tp);
   1004	if (error == -ENOSPC) {
   1005		/* flush outstanding delalloc blocks and retry */
   1006		xfs_flush_inodes(mp);
   1007		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
   1008				resblks, &tp);
   1009	}
   1010	if (error)
   1011		goto out_release_dquots;
   1012
   1013	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
   1014	unlock_dp_on_error = true;
   1015
   1016	/*
   1017	 * A newly created regular or special file just has one directory
   1018	 * entry pointing to them, but a directory also the "." entry
   1019	 * pointing to itself.
   1020	 */
   1021	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
   1022	if (!error)
   1023		error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
   1024				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
   1025	if (error)
   1026		goto out_trans_cancel;
   1027
   1028	/*
   1029	 * Now we join the directory inode to the transaction.  We do not do it
   1030	 * earlier because xfs_dialloc might commit the previous transaction
   1031	 * (and release all the locks).  An error from here on will result in
   1032	 * the transaction cancel unlocking dp so don't do it explicitly in the
   1033	 * error path.
   1034	 */
   1035	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
   1036	unlock_dp_on_error = false;
   1037
   1038	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
   1039					resblks - XFS_IALLOC_SPACE_RES(mp));
   1040	if (error) {
   1041		ASSERT(error != -ENOSPC);
   1042		goto out_trans_cancel;
   1043	}
   1044	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
   1045	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
   1046
   1047	if (is_dir) {
   1048		error = xfs_dir_init(tp, ip, dp);
   1049		if (error)
   1050			goto out_trans_cancel;
   1051
   1052		xfs_bumplink(tp, dp);
   1053	}
   1054
   1055	/*
   1056	 * If this is a synchronous mount, make sure that the
   1057	 * create transaction goes to disk before returning to
   1058	 * the user.
   1059	 */
   1060	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
   1061		xfs_trans_set_sync(tp);
   1062
   1063	/*
   1064	 * Attach the dquot(s) to the inodes and modify them incore.
   1065	 * These ids of the inode couldn't have changed since the new
   1066	 * inode has been locked ever since it was created.
   1067	 */
   1068	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
   1069
   1070	error = xfs_trans_commit(tp);
   1071	if (error)
   1072		goto out_release_inode;
   1073
   1074	xfs_qm_dqrele(udqp);
   1075	xfs_qm_dqrele(gdqp);
   1076	xfs_qm_dqrele(pdqp);
   1077
   1078	*ipp = ip;
   1079	return 0;
   1080
   1081 out_trans_cancel:
   1082	xfs_trans_cancel(tp);
   1083 out_release_inode:
   1084	/*
   1085	 * Wait until after the current transaction is aborted to finish the
   1086	 * setup of the inode and release the inode.  This prevents recursive
   1087	 * transactions and deadlocks from xfs_inactive.
   1088	 */
   1089	if (ip) {
   1090		xfs_finish_inode_setup(ip);
   1091		xfs_irele(ip);
   1092	}
   1093 out_release_dquots:
   1094	xfs_qm_dqrele(udqp);
   1095	xfs_qm_dqrele(gdqp);
   1096	xfs_qm_dqrele(pdqp);
   1097
   1098	if (unlock_dp_on_error)
   1099		xfs_iunlock(dp, XFS_ILOCK_EXCL);
   1100	return error;
   1101}
   1102
   1103int
   1104xfs_create_tmpfile(
   1105	struct user_namespace	*mnt_userns,
   1106	struct xfs_inode	*dp,
   1107	umode_t			mode,
   1108	struct xfs_inode	**ipp)
   1109{
   1110	struct xfs_mount	*mp = dp->i_mount;
   1111	struct xfs_inode	*ip = NULL;
   1112	struct xfs_trans	*tp = NULL;
   1113	int			error;
   1114	prid_t                  prid;
   1115	struct xfs_dquot	*udqp = NULL;
   1116	struct xfs_dquot	*gdqp = NULL;
   1117	struct xfs_dquot	*pdqp = NULL;
   1118	struct xfs_trans_res	*tres;
   1119	uint			resblks;
   1120	xfs_ino_t		ino;
   1121
   1122	if (xfs_is_shutdown(mp))
   1123		return -EIO;
   1124
   1125	prid = xfs_get_initial_prid(dp);
   1126
   1127	/*
   1128	 * Make sure that we have allocated dquot(s) on disk.
   1129	 */
   1130	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns),
   1131			mapped_fsgid(mnt_userns, &init_user_ns), prid,
   1132			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
   1133			&udqp, &gdqp, &pdqp);
   1134	if (error)
   1135		return error;
   1136
   1137	resblks = XFS_IALLOC_SPACE_RES(mp);
   1138	tres = &M_RES(mp)->tr_create_tmpfile;
   1139
   1140	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
   1141			&tp);
   1142	if (error)
   1143		goto out_release_dquots;
   1144
   1145	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
   1146	if (!error)
   1147		error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
   1148				0, 0, prid, false, &ip);
   1149	if (error)
   1150		goto out_trans_cancel;
   1151
   1152	if (xfs_has_wsync(mp))
   1153		xfs_trans_set_sync(tp);
   1154
   1155	/*
   1156	 * Attach the dquot(s) to the inodes and modify them incore.
   1157	 * These ids of the inode couldn't have changed since the new
   1158	 * inode has been locked ever since it was created.
   1159	 */
   1160	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
   1161
   1162	error = xfs_iunlink(tp, ip);
   1163	if (error)
   1164		goto out_trans_cancel;
   1165
   1166	error = xfs_trans_commit(tp);
   1167	if (error)
   1168		goto out_release_inode;
   1169
   1170	xfs_qm_dqrele(udqp);
   1171	xfs_qm_dqrele(gdqp);
   1172	xfs_qm_dqrele(pdqp);
   1173
   1174	*ipp = ip;
   1175	return 0;
   1176
   1177 out_trans_cancel:
   1178	xfs_trans_cancel(tp);
   1179 out_release_inode:
   1180	/*
   1181	 * Wait until after the current transaction is aborted to finish the
   1182	 * setup of the inode and release the inode.  This prevents recursive
   1183	 * transactions and deadlocks from xfs_inactive.
   1184	 */
   1185	if (ip) {
   1186		xfs_finish_inode_setup(ip);
   1187		xfs_irele(ip);
   1188	}
   1189 out_release_dquots:
   1190	xfs_qm_dqrele(udqp);
   1191	xfs_qm_dqrele(gdqp);
   1192	xfs_qm_dqrele(pdqp);
   1193
   1194	return error;
   1195}
   1196
   1197int
   1198xfs_link(
   1199	xfs_inode_t		*tdp,
   1200	xfs_inode_t		*sip,
   1201	struct xfs_name		*target_name)
   1202{
   1203	xfs_mount_t		*mp = tdp->i_mount;
   1204	xfs_trans_t		*tp;
   1205	int			error, nospace_error = 0;
   1206	int			resblks;
   1207
   1208	trace_xfs_link(tdp, target_name);
   1209
   1210	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
   1211
   1212	if (xfs_is_shutdown(mp))
   1213		return -EIO;
   1214
   1215	error = xfs_qm_dqattach(sip);
   1216	if (error)
   1217		goto std_return;
   1218
   1219	error = xfs_qm_dqattach(tdp);
   1220	if (error)
   1221		goto std_return;
   1222
   1223	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
   1224	error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
   1225			&tp, &nospace_error);
   1226	if (error)
   1227		goto std_return;
   1228
   1229	/*
   1230	 * If we are using project inheritance, we only allow hard link
   1231	 * creation in our tree when the project IDs are the same; else
   1232	 * the tree quota mechanism could be circumvented.
   1233	 */
   1234	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
   1235		     tdp->i_projid != sip->i_projid)) {
   1236		error = -EXDEV;
   1237		goto error_return;
   1238	}
   1239
   1240	if (!resblks) {
   1241		error = xfs_dir_canenter(tp, tdp, target_name);
   1242		if (error)
   1243			goto error_return;
   1244	}
   1245
   1246	/*
   1247	 * Handle initial link state of O_TMPFILE inode
   1248	 */
   1249	if (VFS_I(sip)->i_nlink == 0) {
   1250		struct xfs_perag	*pag;
   1251
   1252		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
   1253		error = xfs_iunlink_remove(tp, pag, sip);
   1254		xfs_perag_put(pag);
   1255		if (error)
   1256			goto error_return;
   1257	}
   1258
   1259	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
   1260				   resblks);
   1261	if (error)
   1262		goto error_return;
   1263	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
   1264	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
   1265
   1266	xfs_bumplink(tp, sip);
   1267
   1268	/*
   1269	 * If this is a synchronous mount, make sure that the
   1270	 * link transaction goes to disk before returning to
   1271	 * the user.
   1272	 */
   1273	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
   1274		xfs_trans_set_sync(tp);
   1275
   1276	return xfs_trans_commit(tp);
   1277
   1278 error_return:
   1279	xfs_trans_cancel(tp);
   1280 std_return:
   1281	if (error == -ENOSPC && nospace_error)
   1282		error = nospace_error;
   1283	return error;
   1284}
   1285
   1286/* Clear the reflink flag and the cowblocks tag if possible. */
   1287static void
   1288xfs_itruncate_clear_reflink_flags(
   1289	struct xfs_inode	*ip)
   1290{
   1291	struct xfs_ifork	*dfork;
   1292	struct xfs_ifork	*cfork;
   1293
   1294	if (!xfs_is_reflink_inode(ip))
   1295		return;
   1296	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
   1297	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
   1298	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
   1299		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
   1300	if (cfork->if_bytes == 0)
   1301		xfs_inode_clear_cowblocks_tag(ip);
   1302}
   1303
   1304/*
   1305 * Free up the underlying blocks past new_size.  The new size must be smaller
   1306 * than the current size.  This routine can be used both for the attribute and
   1307 * data fork, and does not modify the inode size, which is left to the caller.
   1308 *
   1309 * The transaction passed to this routine must have made a permanent log
   1310 * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
   1311 * given transaction and start new ones, so make sure everything involved in
   1312 * the transaction is tidy before calling here.  Some transaction will be
   1313 * returned to the caller to be committed.  The incoming transaction must
   1314 * already include the inode, and both inode locks must be held exclusively.
   1315 * The inode must also be "held" within the transaction.  On return the inode
   1316 * will be "held" within the returned transaction.  This routine does NOT
   1317 * require any disk space to be reserved for it within the transaction.
   1318 *
   1319 * If we get an error, we must return with the inode locked and linked into the
   1320 * current transaction. This keeps things simple for the higher level code,
   1321 * because it always knows that the inode is locked and held in the transaction
   1322 * that returns to it whether errors occur or not.  We don't mark the inode
   1323 * dirty on error so that transactions can be easily aborted if possible.
   1324 */
   1325int
   1326xfs_itruncate_extents_flags(
   1327	struct xfs_trans	**tpp,
   1328	struct xfs_inode	*ip,
   1329	int			whichfork,
   1330	xfs_fsize_t		new_size,
   1331	int			flags)
   1332{
   1333	struct xfs_mount	*mp = ip->i_mount;
   1334	struct xfs_trans	*tp = *tpp;
   1335	xfs_fileoff_t		first_unmap_block;
   1336	xfs_filblks_t		unmap_len;
   1337	int			error = 0;
   1338
   1339	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
   1340	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
   1341	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
   1342	ASSERT(new_size <= XFS_ISIZE(ip));
   1343	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
   1344	ASSERT(ip->i_itemp != NULL);
   1345	ASSERT(ip->i_itemp->ili_lock_flags == 0);
   1346	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
   1347
   1348	trace_xfs_itruncate_extents_start(ip, new_size);
   1349
   1350	flags |= xfs_bmapi_aflag(whichfork);
   1351
   1352	/*
   1353	 * Since it is possible for space to become allocated beyond
   1354	 * the end of the file (in a crash where the space is allocated
   1355	 * but the inode size is not yet updated), simply remove any
   1356	 * blocks which show up between the new EOF and the maximum
   1357	 * possible file size.
   1358	 *
   1359	 * We have to free all the blocks to the bmbt maximum offset, even if
   1360	 * the page cache can't scale that far.
   1361	 */
   1362	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
   1363	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
   1364		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
   1365		return 0;
   1366	}
   1367
   1368	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
   1369	while (unmap_len > 0) {
   1370		ASSERT(tp->t_firstblock == NULLFSBLOCK);
   1371		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
   1372				flags, XFS_ITRUNC_MAX_EXTENTS);
   1373		if (error)
   1374			goto out;
   1375
   1376		/* free the just unmapped extents */
   1377		error = xfs_defer_finish(&tp);
   1378		if (error)
   1379			goto out;
   1380	}
   1381
   1382	if (whichfork == XFS_DATA_FORK) {
   1383		/* Remove all pending CoW reservations. */
   1384		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
   1385				first_unmap_block, XFS_MAX_FILEOFF, true);
   1386		if (error)
   1387			goto out;
   1388
   1389		xfs_itruncate_clear_reflink_flags(ip);
   1390	}
   1391
   1392	/*
   1393	 * Always re-log the inode so that our permanent transaction can keep
   1394	 * on rolling it forward in the log.
   1395	 */
   1396	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
   1397
   1398	trace_xfs_itruncate_extents_end(ip, new_size);
   1399
   1400out:
   1401	*tpp = tp;
   1402	return error;
   1403}
   1404
   1405int
   1406xfs_release(
   1407	xfs_inode_t	*ip)
   1408{
   1409	xfs_mount_t	*mp = ip->i_mount;
   1410	int		error = 0;
   1411
   1412	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
   1413		return 0;
   1414
   1415	/* If this is a read-only mount, don't do this (would generate I/O) */
   1416	if (xfs_is_readonly(mp))
   1417		return 0;
   1418
   1419	if (!xfs_is_shutdown(mp)) {
   1420		int truncated;
   1421
   1422		/*
   1423		 * If we previously truncated this file and removed old data
   1424		 * in the process, we want to initiate "early" writeout on
   1425		 * the last close.  This is an attempt to combat the notorious
   1426		 * NULL files problem which is particularly noticeable from a
   1427		 * truncate down, buffered (re-)write (delalloc), followed by
   1428		 * a crash.  What we are effectively doing here is
   1429		 * significantly reducing the time window where we'd otherwise
   1430		 * be exposed to that problem.
   1431		 */
   1432		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
   1433		if (truncated) {
   1434			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
   1435			if (ip->i_delayed_blks > 0) {
   1436				error = filemap_flush(VFS_I(ip)->i_mapping);
   1437				if (error)
   1438					return error;
   1439			}
   1440		}
   1441	}
   1442
   1443	if (VFS_I(ip)->i_nlink == 0)
   1444		return 0;
   1445
   1446	/*
   1447	 * If we can't get the iolock just skip truncating the blocks past EOF
   1448	 * because we could deadlock with the mmap_lock otherwise. We'll get
   1449	 * another chance to drop them once the last reference to the inode is
   1450	 * dropped, so we'll never leak blocks permanently.
   1451	 */
   1452	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
   1453		return 0;
   1454
   1455	if (xfs_can_free_eofblocks(ip, false)) {
   1456		/*
   1457		 * Check if the inode is being opened, written and closed
   1458		 * frequently and we have delayed allocation blocks outstanding
   1459		 * (e.g. streaming writes from the NFS server), truncating the
   1460		 * blocks past EOF will cause fragmentation to occur.
   1461		 *
   1462		 * In this case don't do the truncation, but we have to be
   1463		 * careful how we detect this case. Blocks beyond EOF show up as
   1464		 * i_delayed_blks even when the inode is clean, so we need to
   1465		 * truncate them away first before checking for a dirty release.
   1466		 * Hence on the first dirty close we will still remove the
   1467		 * speculative allocation, but after that we will leave it in
   1468		 * place.
   1469		 */
   1470		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
   1471			goto out_unlock;
   1472
   1473		error = xfs_free_eofblocks(ip);
   1474		if (error)
   1475			goto out_unlock;
   1476
   1477		/* delalloc blocks after truncation means it really is dirty */
   1478		if (ip->i_delayed_blks)
   1479			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
   1480	}
   1481
   1482out_unlock:
   1483	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
   1484	return error;
   1485}
   1486
   1487/*
   1488 * xfs_inactive_truncate
   1489 *
   1490 * Called to perform a truncate when an inode becomes unlinked.
   1491 */
   1492STATIC int
   1493xfs_inactive_truncate(
   1494	struct xfs_inode *ip)
   1495{
   1496	struct xfs_mount	*mp = ip->i_mount;
   1497	struct xfs_trans	*tp;
   1498	int			error;
   1499
   1500	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
   1501	if (error) {
   1502		ASSERT(xfs_is_shutdown(mp));
   1503		return error;
   1504	}
   1505	xfs_ilock(ip, XFS_ILOCK_EXCL);
   1506	xfs_trans_ijoin(tp, ip, 0);
   1507
   1508	/*
   1509	 * Log the inode size first to prevent stale data exposure in the event
   1510	 * of a system crash before the truncate completes. See the related
   1511	 * comment in xfs_vn_setattr_size() for details.
   1512	 */
   1513	ip->i_disk_size = 0;
   1514	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
   1515
   1516	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
   1517	if (error)
   1518		goto error_trans_cancel;
   1519
   1520	ASSERT(ip->i_df.if_nextents == 0);
   1521
   1522	error = xfs_trans_commit(tp);
   1523	if (error)
   1524		goto error_unlock;
   1525
   1526	xfs_iunlock(ip, XFS_ILOCK_EXCL);
   1527	return 0;
   1528
   1529error_trans_cancel:
   1530	xfs_trans_cancel(tp);
   1531error_unlock:
   1532	xfs_iunlock(ip, XFS_ILOCK_EXCL);
   1533	return error;
   1534}
   1535
   1536/*
   1537 * xfs_inactive_ifree()
   1538 *
   1539 * Perform the inode free when an inode is unlinked.
   1540 */
   1541STATIC int
   1542xfs_inactive_ifree(
   1543	struct xfs_inode *ip)
   1544{
   1545	struct xfs_mount	*mp = ip->i_mount;
   1546	struct xfs_trans	*tp;
   1547	int			error;
   1548
   1549	/*
   1550	 * We try to use a per-AG reservation for any block needed by the finobt
   1551	 * tree, but as the finobt feature predates the per-AG reservation
   1552	 * support a degraded file system might not have enough space for the
   1553	 * reservation at mount time.  In that case try to dip into the reserved
   1554	 * pool and pray.
   1555	 *
   1556	 * Send a warning if the reservation does happen to fail, as the inode
   1557	 * now remains allocated and sits on the unlinked list until the fs is
   1558	 * repaired.
   1559	 */
   1560	if (unlikely(mp->m_finobt_nores)) {
   1561		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
   1562				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
   1563				&tp);
   1564	} else {
   1565		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
   1566	}
   1567	if (error) {
   1568		if (error == -ENOSPC) {
   1569			xfs_warn_ratelimited(mp,
   1570			"Failed to remove inode(s) from unlinked list. "
   1571			"Please free space, unmount and run xfs_repair.");
   1572		} else {
   1573			ASSERT(xfs_is_shutdown(mp));
   1574		}
   1575		return error;
   1576	}
   1577
   1578	/*
   1579	 * We do not hold the inode locked across the entire rolling transaction
   1580	 * here. We only need to hold it for the first transaction that
   1581	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
   1582	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
   1583	 * here breaks the relationship between cluster buffer invalidation and
   1584	 * stale inode invalidation on cluster buffer item journal commit
   1585	 * completion, and can result in leaving dirty stale inodes hanging
   1586	 * around in memory.
   1587	 *
   1588	 * We have no need for serialising this inode operation against other
   1589	 * operations - we freed the inode and hence reallocation is required
   1590	 * and that will serialise on reallocating the space the deferops need
   1591	 * to free. Hence we can unlock the inode on the first commit of
   1592	 * the transaction rather than roll it right through the deferops. This
   1593	 * avoids relogging the XFS_ISTALE inode.
   1594	 *
   1595	 * We check that xfs_ifree() hasn't grown an internal transaction roll
   1596	 * by asserting that the inode is still locked when it returns.
   1597	 */
   1598	xfs_ilock(ip, XFS_ILOCK_EXCL);
   1599	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
   1600
   1601	error = xfs_ifree(tp, ip);
   1602	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
   1603	if (error) {
   1604		/*
   1605		 * If we fail to free the inode, shut down.  The cancel
   1606		 * might do that, we need to make sure.  Otherwise the
   1607		 * inode might be lost for a long time or forever.
   1608		 */
   1609		if (!xfs_is_shutdown(mp)) {
   1610			xfs_notice(mp, "%s: xfs_ifree returned error %d",
   1611				__func__, error);
   1612			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
   1613		}
   1614		xfs_trans_cancel(tp);
   1615		return error;
   1616	}
   1617
   1618	/*
   1619	 * Credit the quota account(s). The inode is gone.
   1620	 */
   1621	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
   1622
   1623	/*
   1624	 * Just ignore errors at this point.  There is nothing we can do except
   1625	 * to try to keep going. Make sure it's not a silent error.
   1626	 */
   1627	error = xfs_trans_commit(tp);
   1628	if (error)
   1629		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
   1630			__func__, error);
   1631
   1632	return 0;
   1633}
   1634
   1635/*
   1636 * Returns true if we need to update the on-disk metadata before we can free
   1637 * the memory used by this inode.  Updates include freeing post-eof
   1638 * preallocations; freeing COW staging extents; and marking the inode free in
   1639 * the inobt if it is on the unlinked list.
   1640 */
   1641bool
   1642xfs_inode_needs_inactive(
   1643	struct xfs_inode	*ip)
   1644{
   1645	struct xfs_mount	*mp = ip->i_mount;
   1646	struct xfs_ifork	*cow_ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
   1647
   1648	/*
   1649	 * If the inode is already free, then there can be nothing
   1650	 * to clean up here.
   1651	 */
   1652	if (VFS_I(ip)->i_mode == 0)
   1653		return false;
   1654
   1655	/* If this is a read-only mount, don't do this (would generate I/O) */
   1656	if (xfs_is_readonly(mp))
   1657		return false;
   1658
   1659	/* If the log isn't running, push inodes straight to reclaim. */
   1660	if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
   1661		return false;
   1662
   1663	/* Metadata inodes require explicit resource cleanup. */
   1664	if (xfs_is_metadata_inode(ip))
   1665		return false;
   1666
   1667	/* Want to clean out the cow blocks if there are any. */
   1668	if (cow_ifp && cow_ifp->if_bytes > 0)
   1669		return true;
   1670
   1671	/* Unlinked files must be freed. */
   1672	if (VFS_I(ip)->i_nlink == 0)
   1673		return true;
   1674
   1675	/*
   1676	 * This file isn't being freed, so check if there are post-eof blocks
   1677	 * to free.  @force is true because we are evicting an inode from the
   1678	 * cache.  Post-eof blocks must be freed, lest we end up with broken
   1679	 * free space accounting.
   1680	 *
   1681	 * Note: don't bother with iolock here since lockdep complains about
   1682	 * acquiring it in reclaim context. We have the only reference to the
   1683	 * inode at this point anyways.
   1684	 */
   1685	return xfs_can_free_eofblocks(ip, true);
   1686}
   1687
   1688/*
   1689 * xfs_inactive
   1690 *
   1691 * This is called when the vnode reference count for the vnode
   1692 * goes to zero.  If the file has been unlinked, then it must
   1693 * now be truncated.  Also, we clear all of the read-ahead state
   1694 * kept for the inode here since the file is now closed.
   1695 */
   1696void
   1697xfs_inactive(
   1698	xfs_inode_t	*ip)
   1699{
   1700	struct xfs_mount	*mp;
   1701	int			error;
   1702	int			truncate = 0;
   1703
   1704	/*
   1705	 * If the inode is already free, then there can be nothing
   1706	 * to clean up here.
   1707	 */
   1708	if (VFS_I(ip)->i_mode == 0) {
   1709		ASSERT(ip->i_df.if_broot_bytes == 0);
   1710		goto out;
   1711	}
   1712
   1713	mp = ip->i_mount;
   1714	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
   1715
   1716	/* If this is a read-only mount, don't do this (would generate I/O) */
   1717	if (xfs_is_readonly(mp))
   1718		goto out;
   1719
   1720	/* Metadata inodes require explicit resource cleanup. */
   1721	if (xfs_is_metadata_inode(ip))
   1722		goto out;
   1723
   1724	/* Try to clean out the cow blocks if there are any. */
   1725	if (xfs_inode_has_cow_data(ip))
   1726		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
   1727
   1728	if (VFS_I(ip)->i_nlink != 0) {
   1729		/*
   1730		 * force is true because we are evicting an inode from the
   1731		 * cache. Post-eof blocks must be freed, lest we end up with
   1732		 * broken free space accounting.
   1733		 *
   1734		 * Note: don't bother with iolock here since lockdep complains
   1735		 * about acquiring it in reclaim context. We have the only
   1736		 * reference to the inode at this point anyways.
   1737		 */
   1738		if (xfs_can_free_eofblocks(ip, true))
   1739			xfs_free_eofblocks(ip);
   1740
   1741		goto out;
   1742	}
   1743
   1744	if (S_ISREG(VFS_I(ip)->i_mode) &&
   1745	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
   1746	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
   1747		truncate = 1;
   1748
   1749	error = xfs_qm_dqattach(ip);
   1750	if (error)
   1751		goto out;
   1752
   1753	if (S_ISLNK(VFS_I(ip)->i_mode))
   1754		error = xfs_inactive_symlink(ip);
   1755	else if (truncate)
   1756		error = xfs_inactive_truncate(ip);
   1757	if (error)
   1758		goto out;
   1759
   1760	/*
   1761	 * If there are attributes associated with the file then blow them away
   1762	 * now.  The code calls a routine that recursively deconstructs the
   1763	 * attribute fork. If also blows away the in-core attribute fork.
   1764	 */
   1765	if (XFS_IFORK_Q(ip)) {
   1766		error = xfs_attr_inactive(ip);
   1767		if (error)
   1768			goto out;
   1769	}
   1770
   1771	ASSERT(!ip->i_afp);
   1772	ASSERT(ip->i_forkoff == 0);
   1773
   1774	/*
   1775	 * Free the inode.
   1776	 */
   1777	xfs_inactive_ifree(ip);
   1778
   1779out:
   1780	/*
   1781	 * We're done making metadata updates for this inode, so we can release
   1782	 * the attached dquots.
   1783	 */
   1784	xfs_qm_dqdetach(ip);
   1785}
   1786
   1787/*
   1788 * In-Core Unlinked List Lookups
   1789 * =============================
   1790 *
   1791 * Every inode is supposed to be reachable from some other piece of metadata
   1792 * with the exception of the root directory.  Inodes with a connection to a
   1793 * file descriptor but not linked from anywhere in the on-disk directory tree
   1794 * are collectively known as unlinked inodes, though the filesystem itself
   1795 * maintains links to these inodes so that on-disk metadata are consistent.
   1796 *
   1797 * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
   1798 * header contains a number of buckets that point to an inode, and each inode
   1799 * record has a pointer to the next inode in the hash chain.  This
   1800 * singly-linked list causes scaling problems in the iunlink remove function
   1801 * because we must walk that list to find the inode that points to the inode
   1802 * being removed from the unlinked hash bucket list.
   1803 *
   1804 * What if we modelled the unlinked list as a collection of records capturing
   1805 * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
   1806 * have a fast way to look up unlinked list predecessors, which avoids the
   1807 * slow list walk.  That's exactly what we do here (in-core) with a per-AG
   1808 * rhashtable.
   1809 *
   1810 * Because this is a backref cache, we ignore operational failures since the
   1811 * iunlink code can fall back to the slow bucket walk.  The only errors that
   1812 * should bubble out are for obviously incorrect situations.
   1813 *
   1814 * All users of the backref cache MUST hold the AGI buffer lock to serialize
   1815 * access or have otherwise provided for concurrency control.
   1816 */
   1817
   1818/* Capture a "X.next_unlinked = Y" relationship. */
   1819struct xfs_iunlink {
   1820	struct rhash_head	iu_rhash_head;
   1821	xfs_agino_t		iu_agino;		/* X */
   1822	xfs_agino_t		iu_next_unlinked;	/* Y */
   1823};
   1824
   1825/* Unlinked list predecessor lookup hashtable construction */
   1826static int
   1827xfs_iunlink_obj_cmpfn(
   1828	struct rhashtable_compare_arg	*arg,
   1829	const void			*obj)
   1830{
   1831	const xfs_agino_t		*key = arg->key;
   1832	const struct xfs_iunlink	*iu = obj;
   1833
   1834	if (iu->iu_next_unlinked != *key)
   1835		return 1;
   1836	return 0;
   1837}
   1838
   1839static const struct rhashtable_params xfs_iunlink_hash_params = {
   1840	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
   1841	.key_len		= sizeof(xfs_agino_t),
   1842	.key_offset		= offsetof(struct xfs_iunlink,
   1843					   iu_next_unlinked),
   1844	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
   1845	.automatic_shrinking	= true,
   1846	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
   1847};
   1848
   1849/*
   1850 * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
   1851 * relation is found.
   1852 */
   1853static xfs_agino_t
   1854xfs_iunlink_lookup_backref(
   1855	struct xfs_perag	*pag,
   1856	xfs_agino_t		agino)
   1857{
   1858	struct xfs_iunlink	*iu;
   1859
   1860	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
   1861			xfs_iunlink_hash_params);
   1862	return iu ? iu->iu_agino : NULLAGINO;
   1863}
   1864
   1865/*
   1866 * Take ownership of an iunlink cache entry and insert it into the hash table.
   1867 * If successful, the entry will be owned by the cache; if not, it is freed.
   1868 * Either way, the caller does not own @iu after this call.
   1869 */
   1870static int
   1871xfs_iunlink_insert_backref(
   1872	struct xfs_perag	*pag,
   1873	struct xfs_iunlink	*iu)
   1874{
   1875	int			error;
   1876
   1877	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
   1878			&iu->iu_rhash_head, xfs_iunlink_hash_params);
   1879	/*
   1880	 * Fail loudly if there already was an entry because that's a sign of
   1881	 * corruption of in-memory data.  Also fail loudly if we see an error
   1882	 * code we didn't anticipate from the rhashtable code.  Currently we
   1883	 * only anticipate ENOMEM.
   1884	 */
   1885	if (error) {
   1886		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
   1887		kmem_free(iu);
   1888	}
   1889	/*
   1890	 * Absorb any runtime errors that aren't a result of corruption because
   1891	 * this is a cache and we can always fall back to bucket list scanning.
   1892	 */
   1893	if (error != 0 && error != -EEXIST)
   1894		error = 0;
   1895	return error;
   1896}
   1897
   1898/* Remember that @prev_agino.next_unlinked = @this_agino. */
   1899static int
   1900xfs_iunlink_add_backref(
   1901	struct xfs_perag	*pag,
   1902	xfs_agino_t		prev_agino,
   1903	xfs_agino_t		this_agino)
   1904{
   1905	struct xfs_iunlink	*iu;
   1906
   1907	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
   1908		return 0;
   1909
   1910	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
   1911	iu->iu_agino = prev_agino;
   1912	iu->iu_next_unlinked = this_agino;
   1913
   1914	return xfs_iunlink_insert_backref(pag, iu);
   1915}
   1916
   1917/*
   1918 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
   1919 * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
   1920 * wasn't any such entry then we don't bother.
   1921 */
   1922static int
   1923xfs_iunlink_change_backref(
   1924	struct xfs_perag	*pag,
   1925	xfs_agino_t		agino,
   1926	xfs_agino_t		next_unlinked)
   1927{
   1928	struct xfs_iunlink	*iu;
   1929	int			error;
   1930
   1931	/* Look up the old entry; if there wasn't one then exit. */
   1932	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
   1933			xfs_iunlink_hash_params);
   1934	if (!iu)
   1935		return 0;
   1936
   1937	/*
   1938	 * Remove the entry.  This shouldn't ever return an error, but if we
   1939	 * couldn't remove the old entry we don't want to add it again to the
   1940	 * hash table, and if the entry disappeared on us then someone's
   1941	 * violated the locking rules and we need to fail loudly.  Either way
   1942	 * we cannot remove the inode because internal state is or would have
   1943	 * been corrupt.
   1944	 */
   1945	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
   1946			&iu->iu_rhash_head, xfs_iunlink_hash_params);
   1947	if (error)
   1948		return error;
   1949
   1950	/* If there is no new next entry just free our item and return. */
   1951	if (next_unlinked == NULLAGINO) {
   1952		kmem_free(iu);
   1953		return 0;
   1954	}
   1955
   1956	/* Update the entry and re-add it to the hash table. */
   1957	iu->iu_next_unlinked = next_unlinked;
   1958	return xfs_iunlink_insert_backref(pag, iu);
   1959}
   1960
   1961/* Set up the in-core predecessor structures. */
   1962int
   1963xfs_iunlink_init(
   1964	struct xfs_perag	*pag)
   1965{
   1966	return rhashtable_init(&pag->pagi_unlinked_hash,
   1967			&xfs_iunlink_hash_params);
   1968}
   1969
   1970/* Free the in-core predecessor structures. */
   1971static void
   1972xfs_iunlink_free_item(
   1973	void			*ptr,
   1974	void			*arg)
   1975{
   1976	struct xfs_iunlink	*iu = ptr;
   1977	bool			*freed_anything = arg;
   1978
   1979	*freed_anything = true;
   1980	kmem_free(iu);
   1981}
   1982
   1983void
   1984xfs_iunlink_destroy(
   1985	struct xfs_perag	*pag)
   1986{
   1987	bool			freed_anything = false;
   1988
   1989	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
   1990			xfs_iunlink_free_item, &freed_anything);
   1991
   1992	ASSERT(freed_anything == false || xfs_is_shutdown(pag->pag_mount));
   1993}
   1994
   1995/*
   1996 * Point the AGI unlinked bucket at an inode and log the results.  The caller
   1997 * is responsible for validating the old value.
   1998 */
   1999STATIC int
   2000xfs_iunlink_update_bucket(
   2001	struct xfs_trans	*tp,
   2002	struct xfs_perag	*pag,
   2003	struct xfs_buf		*agibp,
   2004	unsigned int		bucket_index,
   2005	xfs_agino_t		new_agino)
   2006{
   2007	struct xfs_agi		*agi = agibp->b_addr;
   2008	xfs_agino_t		old_value;
   2009	int			offset;
   2010
   2011	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, pag->pag_agno, new_agino));
   2012
   2013	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
   2014	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
   2015			old_value, new_agino);
   2016
   2017	/*
   2018	 * We should never find the head of the list already set to the value
   2019	 * passed in because either we're adding or removing ourselves from the
   2020	 * head of the list.
   2021	 */
   2022	if (old_value == new_agino) {
   2023		xfs_buf_mark_corrupt(agibp);
   2024		return -EFSCORRUPTED;
   2025	}
   2026
   2027	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
   2028	offset = offsetof(struct xfs_agi, agi_unlinked) +
   2029			(sizeof(xfs_agino_t) * bucket_index);
   2030	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
   2031	return 0;
   2032}
   2033
   2034/* Set an on-disk inode's next_unlinked pointer. */
   2035STATIC void
   2036xfs_iunlink_update_dinode(
   2037	struct xfs_trans	*tp,
   2038	struct xfs_perag	*pag,
   2039	xfs_agino_t		agino,
   2040	struct xfs_buf		*ibp,
   2041	struct xfs_dinode	*dip,
   2042	struct xfs_imap		*imap,
   2043	xfs_agino_t		next_agino)
   2044{
   2045	struct xfs_mount	*mp = tp->t_mountp;
   2046	int			offset;
   2047
   2048	ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
   2049
   2050	trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino,
   2051			be32_to_cpu(dip->di_next_unlinked), next_agino);
   2052
   2053	dip->di_next_unlinked = cpu_to_be32(next_agino);
   2054	offset = imap->im_boffset +
   2055			offsetof(struct xfs_dinode, di_next_unlinked);
   2056
   2057	/* need to recalc the inode CRC if appropriate */
   2058	xfs_dinode_calc_crc(mp, dip);
   2059	xfs_trans_inode_buf(tp, ibp);
   2060	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
   2061}
   2062
   2063/* Set an in-core inode's unlinked pointer and return the old value. */
   2064STATIC int
   2065xfs_iunlink_update_inode(
   2066	struct xfs_trans	*tp,
   2067	struct xfs_inode	*ip,
   2068	struct xfs_perag	*pag,
   2069	xfs_agino_t		next_agino,
   2070	xfs_agino_t		*old_next_agino)
   2071{
   2072	struct xfs_mount	*mp = tp->t_mountp;
   2073	struct xfs_dinode	*dip;
   2074	struct xfs_buf		*ibp;
   2075	xfs_agino_t		old_value;
   2076	int			error;
   2077
   2078	ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
   2079
   2080	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
   2081	if (error)
   2082		return error;
   2083	dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
   2084
   2085	/* Make sure the old pointer isn't garbage. */
   2086	old_value = be32_to_cpu(dip->di_next_unlinked);
   2087	if (!xfs_verify_agino_or_null(mp, pag->pag_agno, old_value)) {
   2088		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
   2089				sizeof(*dip), __this_address);
   2090		error = -EFSCORRUPTED;
   2091		goto out;
   2092	}
   2093
   2094	/*
   2095	 * Since we're updating a linked list, we should never find that the
   2096	 * current pointer is the same as the new value, unless we're
   2097	 * terminating the list.
   2098	 */
   2099	*old_next_agino = old_value;
   2100	if (old_value == next_agino) {
   2101		if (next_agino != NULLAGINO) {
   2102			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
   2103					dip, sizeof(*dip), __this_address);
   2104			error = -EFSCORRUPTED;
   2105		}
   2106		goto out;
   2107	}
   2108
   2109	/* Ok, update the new pointer. */
   2110	xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
   2111			ibp, dip, &ip->i_imap, next_agino);
   2112	return 0;
   2113out:
   2114	xfs_trans_brelse(tp, ibp);
   2115	return error;
   2116}
   2117
   2118/*
   2119 * This is called when the inode's link count has gone to 0 or we are creating
   2120 * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
   2121 *
   2122 * We place the on-disk inode on a list in the AGI.  It will be pulled from this
   2123 * list when the inode is freed.
   2124 */
   2125STATIC int
   2126xfs_iunlink(
   2127	struct xfs_trans	*tp,
   2128	struct xfs_inode	*ip)
   2129{
   2130	struct xfs_mount	*mp = tp->t_mountp;
   2131	struct xfs_perag	*pag;
   2132	struct xfs_agi		*agi;
   2133	struct xfs_buf		*agibp;
   2134	xfs_agino_t		next_agino;
   2135	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
   2136	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
   2137	int			error;
   2138
   2139	ASSERT(VFS_I(ip)->i_nlink == 0);
   2140	ASSERT(VFS_I(ip)->i_mode != 0);
   2141	trace_xfs_iunlink(ip);
   2142
   2143	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
   2144
   2145	/* Get the agi buffer first.  It ensures lock ordering on the list. */
   2146	error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
   2147	if (error)
   2148		goto out;
   2149	agi = agibp->b_addr;
   2150
   2151	/*
   2152	 * Get the index into the agi hash table for the list this inode will
   2153	 * go on.  Make sure the pointer isn't garbage and that this inode
   2154	 * isn't already on the list.
   2155	 */
   2156	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
   2157	if (next_agino == agino ||
   2158	    !xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)) {
   2159		xfs_buf_mark_corrupt(agibp);
   2160		error = -EFSCORRUPTED;
   2161		goto out;
   2162	}
   2163
   2164	if (next_agino != NULLAGINO) {
   2165		xfs_agino_t		old_agino;
   2166
   2167		/*
   2168		 * There is already another inode in the bucket, so point this
   2169		 * inode to the current head of the list.
   2170		 */
   2171		error = xfs_iunlink_update_inode(tp, ip, pag, next_agino,
   2172				&old_agino);
   2173		if (error)
   2174			goto out;
   2175		ASSERT(old_agino == NULLAGINO);
   2176
   2177		/*
   2178		 * agino has been unlinked, add a backref from the next inode
   2179		 * back to agino.
   2180		 */
   2181		error = xfs_iunlink_add_backref(pag, agino, next_agino);
   2182		if (error)
   2183			goto out;
   2184	}
   2185
   2186	/* Point the head of the list to point to this inode. */
   2187	error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
   2188out:
   2189	xfs_perag_put(pag);
   2190	return error;
   2191}
   2192
   2193/* Return the imap, dinode pointer, and buffer for an inode. */
   2194STATIC int
   2195xfs_iunlink_map_ino(
   2196	struct xfs_trans	*tp,
   2197	xfs_agnumber_t		agno,
   2198	xfs_agino_t		agino,
   2199	struct xfs_imap		*imap,
   2200	struct xfs_dinode	**dipp,
   2201	struct xfs_buf		**bpp)
   2202{
   2203	struct xfs_mount	*mp = tp->t_mountp;
   2204	int			error;
   2205
   2206	imap->im_blkno = 0;
   2207	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
   2208	if (error) {
   2209		xfs_warn(mp, "%s: xfs_imap returned error %d.",
   2210				__func__, error);
   2211		return error;
   2212	}
   2213
   2214	error = xfs_imap_to_bp(mp, tp, imap, bpp);
   2215	if (error) {
   2216		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
   2217				__func__, error);
   2218		return error;
   2219	}
   2220
   2221	*dipp = xfs_buf_offset(*bpp, imap->im_boffset);
   2222	return 0;
   2223}
   2224
   2225/*
   2226 * Walk the unlinked chain from @head_agino until we find the inode that
   2227 * points to @target_agino.  Return the inode number, map, dinode pointer,
   2228 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
   2229 *
   2230 * @tp, @pag, @head_agino, and @target_agino are input parameters.
   2231 * @agino, @imap, @dipp, and @bpp are all output parameters.
   2232 *
   2233 * Do not call this function if @target_agino is the head of the list.
   2234 */
   2235STATIC int
   2236xfs_iunlink_map_prev(
   2237	struct xfs_trans	*tp,
   2238	struct xfs_perag	*pag,
   2239	xfs_agino_t		head_agino,
   2240	xfs_agino_t		target_agino,
   2241	xfs_agino_t		*agino,
   2242	struct xfs_imap		*imap,
   2243	struct xfs_dinode	**dipp,
   2244	struct xfs_buf		**bpp)
   2245{
   2246	struct xfs_mount	*mp = tp->t_mountp;
   2247	xfs_agino_t		next_agino;
   2248	int			error;
   2249
   2250	ASSERT(head_agino != target_agino);
   2251	*bpp = NULL;
   2252
   2253	/* See if our backref cache can find it faster. */
   2254	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
   2255	if (*agino != NULLAGINO) {
   2256		error = xfs_iunlink_map_ino(tp, pag->pag_agno, *agino, imap,
   2257				dipp, bpp);
   2258		if (error)
   2259			return error;
   2260
   2261		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
   2262			return 0;
   2263
   2264		/*
   2265		 * If we get here the cache contents were corrupt, so drop the
   2266		 * buffer and fall back to walking the bucket list.
   2267		 */
   2268		xfs_trans_brelse(tp, *bpp);
   2269		*bpp = NULL;
   2270		WARN_ON_ONCE(1);
   2271	}
   2272
   2273	trace_xfs_iunlink_map_prev_fallback(mp, pag->pag_agno);
   2274
   2275	/* Otherwise, walk the entire bucket until we find it. */
   2276	next_agino = head_agino;
   2277	while (next_agino != target_agino) {
   2278		xfs_agino_t	unlinked_agino;
   2279
   2280		if (*bpp)
   2281			xfs_trans_brelse(tp, *bpp);
   2282
   2283		*agino = next_agino;
   2284		error = xfs_iunlink_map_ino(tp, pag->pag_agno, next_agino, imap,
   2285				dipp, bpp);
   2286		if (error)
   2287			return error;
   2288
   2289		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
   2290		/*
   2291		 * Make sure this pointer is valid and isn't an obvious
   2292		 * infinite loop.
   2293		 */
   2294		if (!xfs_verify_agino(mp, pag->pag_agno, unlinked_agino) ||
   2295		    next_agino == unlinked_agino) {
   2296			XFS_CORRUPTION_ERROR(__func__,
   2297					XFS_ERRLEVEL_LOW, mp,
   2298					*dipp, sizeof(**dipp));
   2299			error = -EFSCORRUPTED;
   2300			return error;
   2301		}
   2302		next_agino = unlinked_agino;
   2303	}
   2304
   2305	return 0;
   2306}
   2307
   2308/*
   2309 * Pull the on-disk inode from the AGI unlinked list.
   2310 */
   2311STATIC int
   2312xfs_iunlink_remove(
   2313	struct xfs_trans	*tp,
   2314	struct xfs_perag	*pag,
   2315	struct xfs_inode	*ip)
   2316{
   2317	struct xfs_mount	*mp = tp->t_mountp;
   2318	struct xfs_agi		*agi;
   2319	struct xfs_buf		*agibp;
   2320	struct xfs_buf		*last_ibp;
   2321	struct xfs_dinode	*last_dip = NULL;
   2322	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
   2323	xfs_agino_t		next_agino;
   2324	xfs_agino_t		head_agino;
   2325	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
   2326	int			error;
   2327
   2328	trace_xfs_iunlink_remove(ip);
   2329
   2330	/* Get the agi buffer first.  It ensures lock ordering on the list. */
   2331	error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
   2332	if (error)
   2333		return error;
   2334	agi = agibp->b_addr;
   2335
   2336	/*
   2337	 * Get the index into the agi hash table for the list this inode will
   2338	 * go on.  Make sure the head pointer isn't garbage.
   2339	 */
   2340	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
   2341	if (!xfs_verify_agino(mp, pag->pag_agno, head_agino)) {
   2342		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
   2343				agi, sizeof(*agi));
   2344		return -EFSCORRUPTED;
   2345	}
   2346
   2347	/*
   2348	 * Set our inode's next_unlinked pointer to NULL and then return
   2349	 * the old pointer value so that we can update whatever was previous
   2350	 * to us in the list to point to whatever was next in the list.
   2351	 */
   2352	error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino);
   2353	if (error)
   2354		return error;
   2355
   2356	/*
   2357	 * If there was a backref pointing from the next inode back to this
   2358	 * one, remove it because we've removed this inode from the list.
   2359	 *
   2360	 * Later, if this inode was in the middle of the list we'll update
   2361	 * this inode's backref to point from the next inode.
   2362	 */
   2363	if (next_agino != NULLAGINO) {
   2364		error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO);
   2365		if (error)
   2366			return error;
   2367	}
   2368
   2369	if (head_agino != agino) {
   2370		struct xfs_imap	imap;
   2371		xfs_agino_t	prev_agino;
   2372
   2373		/* We need to search the list for the inode being freed. */
   2374		error = xfs_iunlink_map_prev(tp, pag, head_agino, agino,
   2375				&prev_agino, &imap, &last_dip, &last_ibp);
   2376		if (error)
   2377			return error;
   2378
   2379		/* Point the previous inode on the list to the next inode. */
   2380		xfs_iunlink_update_dinode(tp, pag, prev_agino, last_ibp,
   2381				last_dip, &imap, next_agino);
   2382
   2383		/*
   2384		 * Now we deal with the backref for this inode.  If this inode
   2385		 * pointed at a real inode, change the backref that pointed to
   2386		 * us to point to our old next.  If this inode was the end of
   2387		 * the list, delete the backref that pointed to us.  Note that
   2388		 * change_backref takes care of deleting the backref if
   2389		 * next_agino is NULLAGINO.
   2390		 */
   2391		return xfs_iunlink_change_backref(agibp->b_pag, agino,
   2392				next_agino);
   2393	}
   2394
   2395	/* Point the head of the list to the next unlinked inode. */
   2396	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
   2397			next_agino);
   2398}
   2399
   2400/*
   2401 * Look up the inode number specified and if it is not already marked XFS_ISTALE
   2402 * mark it stale. We should only find clean inodes in this lookup that aren't
   2403 * already stale.
   2404 */
   2405static void
   2406xfs_ifree_mark_inode_stale(
   2407	struct xfs_perag	*pag,
   2408	struct xfs_inode	*free_ip,
   2409	xfs_ino_t		inum)
   2410{
   2411	struct xfs_mount	*mp = pag->pag_mount;
   2412	struct xfs_inode_log_item *iip;
   2413	struct xfs_inode	*ip;
   2414
   2415retry:
   2416	rcu_read_lock();
   2417	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
   2418
   2419	/* Inode not in memory, nothing to do */
   2420	if (!ip) {
   2421		rcu_read_unlock();
   2422		return;
   2423	}
   2424
   2425	/*
   2426	 * because this is an RCU protected lookup, we could find a recently
   2427	 * freed or even reallocated inode during the lookup. We need to check
   2428	 * under the i_flags_lock for a valid inode here. Skip it if it is not
   2429	 * valid, the wrong inode or stale.
   2430	 */
   2431	spin_lock(&ip->i_flags_lock);
   2432	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
   2433		goto out_iflags_unlock;
   2434
   2435	/*
   2436	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
   2437	 * other inodes that we did not find in the list attached to the buffer
   2438	 * and are not already marked stale. If we can't lock it, back off and
   2439	 * retry.
   2440	 */
   2441	if (ip != free_ip) {
   2442		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
   2443			spin_unlock(&ip->i_flags_lock);
   2444			rcu_read_unlock();
   2445			delay(1);
   2446			goto retry;
   2447		}
   2448	}
   2449	ip->i_flags |= XFS_ISTALE;
   2450
   2451	/*
   2452	 * If the inode is flushing, it is already attached to the buffer.  All
   2453	 * we needed to do here is mark the inode stale so buffer IO completion
   2454	 * will remove it from the AIL.
   2455	 */
   2456	iip = ip->i_itemp;
   2457	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
   2458		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
   2459		ASSERT(iip->ili_last_fields);
   2460		goto out_iunlock;
   2461	}
   2462
   2463	/*
   2464	 * Inodes not attached to the buffer can be released immediately.
   2465	 * Everything else has to go through xfs_iflush_abort() on journal
   2466	 * commit as the flock synchronises removal of the inode from the
   2467	 * cluster buffer against inode reclaim.
   2468	 */
   2469	if (!iip || list_empty(&iip->ili_item.li_bio_list))
   2470		goto out_iunlock;
   2471
   2472	__xfs_iflags_set(ip, XFS_IFLUSHING);
   2473	spin_unlock(&ip->i_flags_lock);
   2474	rcu_read_unlock();
   2475
   2476	/* we have a dirty inode in memory that has not yet been flushed. */
   2477	spin_lock(&iip->ili_lock);
   2478	iip->ili_last_fields = iip->ili_fields;
   2479	iip->ili_fields = 0;
   2480	iip->ili_fsync_fields = 0;
   2481	spin_unlock(&iip->ili_lock);
   2482	ASSERT(iip->ili_last_fields);
   2483
   2484	if (ip != free_ip)
   2485		xfs_iunlock(ip, XFS_ILOCK_EXCL);
   2486	return;
   2487
   2488out_iunlock:
   2489	if (ip != free_ip)
   2490		xfs_iunlock(ip, XFS_ILOCK_EXCL);
   2491out_iflags_unlock:
   2492	spin_unlock(&ip->i_flags_lock);
   2493	rcu_read_unlock();
   2494}
   2495
   2496/*
   2497 * A big issue when freeing the inode cluster is that we _cannot_ skip any
   2498 * inodes that are in memory - they all must be marked stale and attached to
   2499 * the cluster buffer.
   2500 */
   2501static int
   2502xfs_ifree_cluster(
   2503	struct xfs_trans	*tp,
   2504	struct xfs_perag	*pag,
   2505	struct xfs_inode	*free_ip,
   2506	struct xfs_icluster	*xic)
   2507{
   2508	struct xfs_mount	*mp = free_ip->i_mount;
   2509	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
   2510	struct xfs_buf		*bp;
   2511	xfs_daddr_t		blkno;
   2512	xfs_ino_t		inum = xic->first_ino;
   2513	int			nbufs;
   2514	int			i, j;
   2515	int			ioffset;
   2516	int			error;
   2517
   2518	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
   2519
   2520	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
   2521		/*
   2522		 * The allocation bitmap tells us which inodes of the chunk were
   2523		 * physically allocated. Skip the cluster if an inode falls into
   2524		 * a sparse region.
   2525		 */
   2526		ioffset = inum - xic->first_ino;
   2527		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
   2528			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
   2529			continue;
   2530		}
   2531
   2532		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
   2533					 XFS_INO_TO_AGBNO(mp, inum));
   2534
   2535		/*
   2536		 * We obtain and lock the backing buffer first in the process
   2537		 * here to ensure dirty inodes attached to the buffer remain in
   2538		 * the flushing state while we mark them stale.
   2539		 *
   2540		 * If we scan the in-memory inodes first, then buffer IO can
   2541		 * complete before we get a lock on it, and hence we may fail
   2542		 * to mark all the active inodes on the buffer stale.
   2543		 */
   2544		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
   2545				mp->m_bsize * igeo->blocks_per_cluster,
   2546				XBF_UNMAPPED, &bp);
   2547		if (error)
   2548			return error;
   2549
   2550		/*
   2551		 * This buffer may not have been correctly initialised as we
   2552		 * didn't read it from disk. That's not important because we are
   2553		 * only using to mark the buffer as stale in the log, and to
   2554		 * attach stale cached inodes on it. That means it will never be
   2555		 * dispatched for IO. If it is, we want to know about it, and we
   2556		 * want it to fail. We can acheive this by adding a write
   2557		 * verifier to the buffer.
   2558		 */
   2559		bp->b_ops = &xfs_inode_buf_ops;
   2560
   2561		/*
   2562		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
   2563		 * too. This requires lookups, and will skip inodes that we've
   2564		 * already marked XFS_ISTALE.
   2565		 */
   2566		for (i = 0; i < igeo->inodes_per_cluster; i++)
   2567			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
   2568
   2569		xfs_trans_stale_inode_buf(tp, bp);
   2570		xfs_trans_binval(tp, bp);
   2571	}
   2572	return 0;
   2573}
   2574
   2575/*
   2576 * This is called to return an inode to the inode free list.  The inode should
   2577 * already be truncated to 0 length and have no pages associated with it.  This
   2578 * routine also assumes that the inode is already a part of the transaction.
   2579 *
   2580 * The on-disk copy of the inode will have been added to the list of unlinked
   2581 * inodes in the AGI. We need to remove the inode from that list atomically with
   2582 * respect to freeing it here.
   2583 */
   2584int
   2585xfs_ifree(
   2586	struct xfs_trans	*tp,
   2587	struct xfs_inode	*ip)
   2588{
   2589	struct xfs_mount	*mp = ip->i_mount;
   2590	struct xfs_perag	*pag;
   2591	struct xfs_icluster	xic = { 0 };
   2592	struct xfs_inode_log_item *iip = ip->i_itemp;
   2593	int			error;
   2594
   2595	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
   2596	ASSERT(VFS_I(ip)->i_nlink == 0);
   2597	ASSERT(ip->i_df.if_nextents == 0);
   2598	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
   2599	ASSERT(ip->i_nblocks == 0);
   2600
   2601	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
   2602
   2603	/*
   2604	 * Free the inode first so that we guarantee that the AGI lock is going
   2605	 * to be taken before we remove the inode from the unlinked list. This
   2606	 * makes the AGI lock -> unlinked list modification order the same as
   2607	 * used in O_TMPFILE creation.
   2608	 */
   2609	error = xfs_difree(tp, pag, ip->i_ino, &xic);
   2610	if (error)
   2611		goto out;
   2612
   2613	error = xfs_iunlink_remove(tp, pag, ip);
   2614	if (error)
   2615		goto out;
   2616
   2617	/*
   2618	 * Free any local-format data sitting around before we reset the
   2619	 * data fork to extents format.  Note that the attr fork data has
   2620	 * already been freed by xfs_attr_inactive.
   2621	 */
   2622	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
   2623		kmem_free(ip->i_df.if_u1.if_data);
   2624		ip->i_df.if_u1.if_data = NULL;
   2625		ip->i_df.if_bytes = 0;
   2626	}
   2627
   2628	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
   2629	ip->i_diflags = 0;
   2630	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
   2631	ip->i_forkoff = 0;		/* mark the attr fork not in use */
   2632	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
   2633	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
   2634		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
   2635
   2636	/* Don't attempt to replay owner changes for a deleted inode */
   2637	spin_lock(&iip->ili_lock);
   2638	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
   2639	spin_unlock(&iip->ili_lock);
   2640
   2641	/*
   2642	 * Bump the generation count so no one will be confused
   2643	 * by reincarnations of this inode.
   2644	 */
   2645	VFS_I(ip)->i_generation++;
   2646	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
   2647
   2648	if (xic.deleted)
   2649		error = xfs_ifree_cluster(tp, pag, ip, &xic);
   2650out:
   2651	xfs_perag_put(pag);
   2652	return error;
   2653}
   2654
   2655/*
   2656 * This is called to unpin an inode.  The caller must have the inode locked
   2657 * in at least shared mode so that the buffer cannot be subsequently pinned
   2658 * once someone is waiting for it to be unpinned.
   2659 */
   2660static void
   2661xfs_iunpin(
   2662	struct xfs_inode	*ip)
   2663{
   2664	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
   2665
   2666	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
   2667
   2668	/* Give the log a push to start the unpinning I/O */
   2669	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
   2670
   2671}
   2672
   2673static void
   2674__xfs_iunpin_wait(
   2675	struct xfs_inode	*ip)
   2676{
   2677	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
   2678	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
   2679
   2680	xfs_iunpin(ip);
   2681
   2682	do {
   2683		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
   2684		if (xfs_ipincount(ip))
   2685			io_schedule();
   2686	} while (xfs_ipincount(ip));
   2687	finish_wait(wq, &wait.wq_entry);
   2688}
   2689
   2690void
   2691xfs_iunpin_wait(
   2692	struct xfs_inode	*ip)
   2693{
   2694	if (xfs_ipincount(ip))
   2695		__xfs_iunpin_wait(ip);
   2696}
   2697
   2698/*
   2699 * Removing an inode from the namespace involves removing the directory entry
   2700 * and dropping the link count on the inode. Removing the directory entry can
   2701 * result in locking an AGF (directory blocks were freed) and removing a link
   2702 * count can result in placing the inode on an unlinked list which results in
   2703 * locking an AGI.
   2704 *
   2705 * The big problem here is that we have an ordering constraint on AGF and AGI
   2706 * locking - inode allocation locks the AGI, then can allocate a new extent for
   2707 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
   2708 * removes the inode from the unlinked list, requiring that we lock the AGI
   2709 * first, and then freeing the inode can result in an inode chunk being freed
   2710 * and hence freeing disk space requiring that we lock an AGF.
   2711 *
   2712 * Hence the ordering that is imposed by other parts of the code is AGI before
   2713 * AGF. This means we cannot remove the directory entry before we drop the inode
   2714 * reference count and put it on the unlinked list as this results in a lock
   2715 * order of AGF then AGI, and this can deadlock against inode allocation and
   2716 * freeing. Therefore we must drop the link counts before we remove the
   2717 * directory entry.
   2718 *
   2719 * This is still safe from a transactional point of view - it is not until we
   2720 * get to xfs_defer_finish() that we have the possibility of multiple
   2721 * transactions in this operation. Hence as long as we remove the directory
   2722 * entry and drop the link count in the first transaction of the remove
   2723 * operation, there are no transactional constraints on the ordering here.
   2724 */
   2725int
   2726xfs_remove(
   2727	xfs_inode_t             *dp,
   2728	struct xfs_name		*name,
   2729	xfs_inode_t		*ip)
   2730{
   2731	xfs_mount_t		*mp = dp->i_mount;
   2732	xfs_trans_t             *tp = NULL;
   2733	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
   2734	int			dontcare;
   2735	int                     error = 0;
   2736	uint			resblks;
   2737
   2738	trace_xfs_remove(dp, name);
   2739
   2740	if (xfs_is_shutdown(mp))
   2741		return -EIO;
   2742
   2743	error = xfs_qm_dqattach(dp);
   2744	if (error)
   2745		goto std_return;
   2746
   2747	error = xfs_qm_dqattach(ip);
   2748	if (error)
   2749		goto std_return;
   2750
   2751	/*
   2752	 * We try to get the real space reservation first, allowing for
   2753	 * directory btree deletion(s) implying possible bmap insert(s).  If we
   2754	 * can't get the space reservation then we use 0 instead, and avoid the
   2755	 * bmap btree insert(s) in the directory code by, if the bmap insert
   2756	 * tries to happen, instead trimming the LAST block from the directory.
   2757	 *
   2758	 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
   2759	 * the directory code can handle a reservationless update and we don't
   2760	 * want to prevent a user from trying to free space by deleting things.
   2761	 */
   2762	resblks = XFS_REMOVE_SPACE_RES(mp);
   2763	error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
   2764			&tp, &dontcare);
   2765	if (error) {
   2766		ASSERT(error != -ENOSPC);
   2767		goto std_return;
   2768	}
   2769
   2770	/*
   2771	 * If we're removing a directory perform some additional validation.
   2772	 */
   2773	if (is_dir) {
   2774		ASSERT(VFS_I(ip)->i_nlink >= 2);
   2775		if (VFS_I(ip)->i_nlink != 2) {
   2776			error = -ENOTEMPTY;
   2777			goto out_trans_cancel;
   2778		}
   2779		if (!xfs_dir_isempty(ip)) {
   2780			error = -ENOTEMPTY;
   2781			goto out_trans_cancel;
   2782		}
   2783
   2784		/* Drop the link from ip's "..".  */
   2785		error = xfs_droplink(tp, dp);
   2786		if (error)
   2787			goto out_trans_cancel;
   2788
   2789		/* Drop the "." link from ip to self.  */
   2790		error = xfs_droplink(tp, ip);
   2791		if (error)
   2792			goto out_trans_cancel;
   2793
   2794		/*
   2795		 * Point the unlinked child directory's ".." entry to the root
   2796		 * directory to eliminate back-references to inodes that may
   2797		 * get freed before the child directory is closed.  If the fs
   2798		 * gets shrunk, this can lead to dirent inode validation errors.
   2799		 */
   2800		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
   2801			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
   2802					tp->t_mountp->m_sb.sb_rootino, 0);
   2803			if (error)
   2804				return error;
   2805		}
   2806	} else {
   2807		/*
   2808		 * When removing a non-directory we need to log the parent
   2809		 * inode here.  For a directory this is done implicitly
   2810		 * by the xfs_droplink call for the ".." entry.
   2811		 */
   2812		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
   2813	}
   2814	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
   2815
   2816	/* Drop the link from dp to ip. */
   2817	error = xfs_droplink(tp, ip);
   2818	if (error)
   2819		goto out_trans_cancel;
   2820
   2821	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
   2822	if (error) {
   2823		ASSERT(error != -ENOENT);
   2824		goto out_trans_cancel;
   2825	}
   2826
   2827	/*
   2828	 * If this is a synchronous mount, make sure that the
   2829	 * remove transaction goes to disk before returning to
   2830	 * the user.
   2831	 */
   2832	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
   2833		xfs_trans_set_sync(tp);
   2834
   2835	error = xfs_trans_commit(tp);
   2836	if (error)
   2837		goto std_return;
   2838
   2839	if (is_dir && xfs_inode_is_filestream(ip))
   2840		xfs_filestream_deassociate(ip);
   2841
   2842	return 0;
   2843
   2844 out_trans_cancel:
   2845	xfs_trans_cancel(tp);
   2846 std_return:
   2847	return error;
   2848}
   2849
   2850/*
   2851 * Enter all inodes for a rename transaction into a sorted array.
   2852 */
   2853#define __XFS_SORT_INODES	5
   2854STATIC void
   2855xfs_sort_for_rename(
   2856	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
   2857	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
   2858	struct xfs_inode	*ip1,	/* in: inode of old entry */
   2859	struct xfs_inode	*ip2,	/* in: inode of new entry */
   2860	struct xfs_inode	*wip,	/* in: whiteout inode */
   2861	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
   2862	int			*num_inodes)  /* in/out: inodes in array */
   2863{
   2864	int			i, j;
   2865
   2866	ASSERT(*num_inodes == __XFS_SORT_INODES);
   2867	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
   2868
   2869	/*
   2870	 * i_tab contains a list of pointers to inodes.  We initialize
   2871	 * the table here & we'll sort it.  We will then use it to
   2872	 * order the acquisition of the inode locks.
   2873	 *
   2874	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
   2875	 */
   2876	i = 0;
   2877	i_tab[i++] = dp1;
   2878	i_tab[i++] = dp2;
   2879	i_tab[i++] = ip1;
   2880	if (ip2)
   2881		i_tab[i++] = ip2;
   2882	if (wip)
   2883		i_tab[i++] = wip;
   2884	*num_inodes = i;
   2885
   2886	/*
   2887	 * Sort the elements via bubble sort.  (Remember, there are at
   2888	 * most 5 elements to sort, so this is adequate.)
   2889	 */
   2890	for (i = 0; i < *num_inodes; i++) {
   2891		for (j = 1; j < *num_inodes; j++) {
   2892			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
   2893				struct xfs_inode *temp = i_tab[j];
   2894				i_tab[j] = i_tab[j-1];
   2895				i_tab[j-1] = temp;
   2896			}
   2897		}
   2898	}
   2899}
   2900
   2901static int
   2902xfs_finish_rename(
   2903	struct xfs_trans	*tp)
   2904{
   2905	/*
   2906	 * If this is a synchronous mount, make sure that the rename transaction
   2907	 * goes to disk before returning to the user.
   2908	 */
   2909	if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
   2910		xfs_trans_set_sync(tp);
   2911
   2912	return xfs_trans_commit(tp);
   2913}
   2914
   2915/*
   2916 * xfs_cross_rename()
   2917 *
   2918 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
   2919 */
   2920STATIC int
   2921xfs_cross_rename(
   2922	struct xfs_trans	*tp,
   2923	struct xfs_inode	*dp1,
   2924	struct xfs_name		*name1,
   2925	struct xfs_inode	*ip1,
   2926	struct xfs_inode	*dp2,
   2927	struct xfs_name		*name2,
   2928	struct xfs_inode	*ip2,
   2929	int			spaceres)
   2930{
   2931	int		error = 0;
   2932	int		ip1_flags = 0;
   2933	int		ip2_flags = 0;
   2934	int		dp2_flags = 0;
   2935
   2936	/* Swap inode number for dirent in first parent */
   2937	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
   2938	if (error)
   2939		goto out_trans_abort;
   2940
   2941	/* Swap inode number for dirent in second parent */
   2942	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
   2943	if (error)
   2944		goto out_trans_abort;
   2945
   2946	/*
   2947	 * If we're renaming one or more directories across different parents,
   2948	 * update the respective ".." entries (and link counts) to match the new
   2949	 * parents.
   2950	 */
   2951	if (dp1 != dp2) {
   2952		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
   2953
   2954		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
   2955			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
   2956						dp1->i_ino, spaceres);
   2957			if (error)
   2958				goto out_trans_abort;
   2959
   2960			/* transfer ip2 ".." reference to dp1 */
   2961			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
   2962				error = xfs_droplink(tp, dp2);
   2963				if (error)
   2964					goto out_trans_abort;
   2965				xfs_bumplink(tp, dp1);
   2966			}
   2967
   2968			/*
   2969			 * Although ip1 isn't changed here, userspace needs
   2970			 * to be warned about the change, so that applications
   2971			 * relying on it (like backup ones), will properly
   2972			 * notify the change
   2973			 */
   2974			ip1_flags |= XFS_ICHGTIME_CHG;
   2975			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
   2976		}
   2977
   2978		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
   2979			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
   2980						dp2->i_ino, spaceres);
   2981			if (error)
   2982				goto out_trans_abort;
   2983
   2984			/* transfer ip1 ".." reference to dp2 */
   2985			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
   2986				error = xfs_droplink(tp, dp1);
   2987				if (error)
   2988					goto out_trans_abort;
   2989				xfs_bumplink(tp, dp2);
   2990			}
   2991
   2992			/*
   2993			 * Although ip2 isn't changed here, userspace needs
   2994			 * to be warned about the change, so that applications
   2995			 * relying on it (like backup ones), will properly
   2996			 * notify the change
   2997			 */
   2998			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
   2999			ip2_flags |= XFS_ICHGTIME_CHG;
   3000		}
   3001	}
   3002
   3003	if (ip1_flags) {
   3004		xfs_trans_ichgtime(tp, ip1, ip1_flags);
   3005		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
   3006	}
   3007	if (ip2_flags) {
   3008		xfs_trans_ichgtime(tp, ip2, ip2_flags);
   3009		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
   3010	}
   3011	if (dp2_flags) {
   3012		xfs_trans_ichgtime(tp, dp2, dp2_flags);
   3013		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
   3014	}
   3015	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
   3016	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
   3017	return xfs_finish_rename(tp);
   3018
   3019out_trans_abort:
   3020	xfs_trans_cancel(tp);
   3021	return error;
   3022}
   3023
   3024/*
   3025 * xfs_rename_alloc_whiteout()
   3026 *
   3027 * Return a referenced, unlinked, unlocked inode that can be used as a
   3028 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
   3029 * crash between allocating the inode and linking it into the rename transaction
   3030 * recovery will free the inode and we won't leak it.
   3031 */
   3032static int
   3033xfs_rename_alloc_whiteout(
   3034	struct user_namespace	*mnt_userns,
   3035	struct xfs_inode	*dp,
   3036	struct xfs_inode	**wip)
   3037{
   3038	struct xfs_inode	*tmpfile;
   3039	int			error;
   3040
   3041	error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
   3042				   &tmpfile);
   3043	if (error)
   3044		return error;
   3045
   3046	/*
   3047	 * Prepare the tmpfile inode as if it were created through the VFS.
   3048	 * Complete the inode setup and flag it as linkable.  nlink is already
   3049	 * zero, so we can skip the drop_nlink.
   3050	 */
   3051	xfs_setup_iops(tmpfile);
   3052	xfs_finish_inode_setup(tmpfile);
   3053	VFS_I(tmpfile)->i_state |= I_LINKABLE;
   3054
   3055	*wip = tmpfile;
   3056	return 0;
   3057}
   3058
   3059/*
   3060 * xfs_rename
   3061 */
   3062int
   3063xfs_rename(
   3064	struct user_namespace	*mnt_userns,
   3065	struct xfs_inode	*src_dp,
   3066	struct xfs_name		*src_name,
   3067	struct xfs_inode	*src_ip,
   3068	struct xfs_inode	*target_dp,
   3069	struct xfs_name		*target_name,
   3070	struct xfs_inode	*target_ip,
   3071	unsigned int		flags)
   3072{
   3073	struct xfs_mount	*mp = src_dp->i_mount;
   3074	struct xfs_trans	*tp;
   3075	struct xfs_inode	*wip = NULL;		/* whiteout inode */
   3076	struct xfs_inode	*inodes[__XFS_SORT_INODES];
   3077	int			i;
   3078	int			num_inodes = __XFS_SORT_INODES;
   3079	bool			new_parent = (src_dp != target_dp);
   3080	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
   3081	int			spaceres;
   3082	bool			retried = false;
   3083	int			error, nospace_error = 0;
   3084
   3085	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
   3086
   3087	if ((flags & RENAME_EXCHANGE) && !target_ip)
   3088		return -EINVAL;
   3089
   3090	/*
   3091	 * If we are doing a whiteout operation, allocate the whiteout inode
   3092	 * we will be placing at the target and ensure the type is set
   3093	 * appropriately.
   3094	 */
   3095	if (flags & RENAME_WHITEOUT) {
   3096		error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
   3097		if (error)
   3098			return error;
   3099
   3100		/* setup target dirent info as whiteout */
   3101		src_name->type = XFS_DIR3_FT_CHRDEV;
   3102	}
   3103
   3104	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
   3105				inodes, &num_inodes);
   3106
   3107retry:
   3108	nospace_error = 0;
   3109	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
   3110	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
   3111	if (error == -ENOSPC) {
   3112		nospace_error = error;
   3113		spaceres = 0;
   3114		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
   3115				&tp);
   3116	}
   3117	if (error)
   3118		goto out_release_wip;
   3119
   3120	/*
   3121	 * Attach the dquots to the inodes
   3122	 */
   3123	error = xfs_qm_vop_rename_dqattach(inodes);
   3124	if (error)
   3125		goto out_trans_cancel;
   3126
   3127	/*
   3128	 * Lock all the participating inodes. Depending upon whether
   3129	 * the target_name exists in the target directory, and
   3130	 * whether the target directory is the same as the source
   3131	 * directory, we can lock from 2 to 4 inodes.
   3132	 */
   3133	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
   3134
   3135	/*
   3136	 * Join all the inodes to the transaction. From this point on,
   3137	 * we can rely on either trans_commit or trans_cancel to unlock
   3138	 * them.
   3139	 */
   3140	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
   3141	if (new_parent)
   3142		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
   3143	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
   3144	if (target_ip)
   3145		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
   3146	if (wip)
   3147		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
   3148
   3149	/*
   3150	 * If we are using project inheritance, we only allow renames
   3151	 * into our tree when the project IDs are the same; else the
   3152	 * tree quota mechanism would be circumvented.
   3153	 */
   3154	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
   3155		     target_dp->i_projid != src_ip->i_projid)) {
   3156		error = -EXDEV;
   3157		goto out_trans_cancel;
   3158	}
   3159
   3160	/* RENAME_EXCHANGE is unique from here on. */
   3161	if (flags & RENAME_EXCHANGE)
   3162		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
   3163					target_dp, target_name, target_ip,
   3164					spaceres);
   3165
   3166	/*
   3167	 * Try to reserve quota to handle an expansion of the target directory.
   3168	 * We'll allow the rename to continue in reservationless mode if we hit
   3169	 * a space usage constraint.  If we trigger reservationless mode, save
   3170	 * the errno if there isn't any free space in the target directory.
   3171	 */
   3172	if (spaceres != 0) {
   3173		error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
   3174				0, false);
   3175		if (error == -EDQUOT || error == -ENOSPC) {
   3176			if (!retried) {
   3177				xfs_trans_cancel(tp);
   3178				xfs_blockgc_free_quota(target_dp, 0);
   3179				retried = true;
   3180				goto retry;
   3181			}
   3182
   3183			nospace_error = error;
   3184			spaceres = 0;
   3185			error = 0;
   3186		}
   3187		if (error)
   3188			goto out_trans_cancel;
   3189	}
   3190
   3191	/*
   3192	 * Check for expected errors before we dirty the transaction
   3193	 * so we can return an error without a transaction abort.
   3194	 */
   3195	if (target_ip == NULL) {
   3196		/*
   3197		 * If there's no space reservation, check the entry will
   3198		 * fit before actually inserting it.
   3199		 */
   3200		if (!spaceres) {
   3201			error = xfs_dir_canenter(tp, target_dp, target_name);
   3202			if (error)
   3203				goto out_trans_cancel;
   3204		}
   3205	} else {
   3206		/*
   3207		 * If target exists and it's a directory, check that whether
   3208		 * it can be destroyed.
   3209		 */
   3210		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
   3211		    (!xfs_dir_isempty(target_ip) ||
   3212		     (VFS_I(target_ip)->i_nlink > 2))) {
   3213			error = -EEXIST;
   3214			goto out_trans_cancel;
   3215		}
   3216	}
   3217
   3218	/*
   3219	 * Lock the AGI buffers we need to handle bumping the nlink of the
   3220	 * whiteout inode off the unlinked list and to handle dropping the
   3221	 * nlink of the target inode.  Per locking order rules, do this in
   3222	 * increasing AG order and before directory block allocation tries to
   3223	 * grab AGFs because we grab AGIs before AGFs.
   3224	 *
   3225	 * The (vfs) caller must ensure that if src is a directory then
   3226	 * target_ip is either null or an empty directory.
   3227	 */
   3228	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
   3229		if (inodes[i] == wip ||
   3230		    (inodes[i] == target_ip &&
   3231		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
   3232			struct xfs_buf	*bp;
   3233			xfs_agnumber_t	agno;
   3234
   3235			agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
   3236			error = xfs_read_agi(mp, tp, agno, &bp);
   3237			if (error)
   3238				goto out_trans_cancel;
   3239		}
   3240	}
   3241
   3242	/*
   3243	 * Directory entry creation below may acquire the AGF. Remove
   3244	 * the whiteout from the unlinked list first to preserve correct
   3245	 * AGI/AGF locking order. This dirties the transaction so failures
   3246	 * after this point will abort and log recovery will clean up the
   3247	 * mess.
   3248	 *
   3249	 * For whiteouts, we need to bump the link count on the whiteout
   3250	 * inode. After this point, we have a real link, clear the tmpfile
   3251	 * state flag from the inode so it doesn't accidentally get misused
   3252	 * in future.
   3253	 */
   3254	if (wip) {
   3255		struct xfs_perag	*pag;
   3256
   3257		ASSERT(VFS_I(wip)->i_nlink == 0);
   3258
   3259		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
   3260		error = xfs_iunlink_remove(tp, pag, wip);
   3261		xfs_perag_put(pag);
   3262		if (error)
   3263			goto out_trans_cancel;
   3264
   3265		xfs_bumplink(tp, wip);
   3266		VFS_I(wip)->i_state &= ~I_LINKABLE;
   3267	}
   3268
   3269	/*
   3270	 * Set up the target.
   3271	 */
   3272	if (target_ip == NULL) {
   3273		/*
   3274		 * If target does not exist and the rename crosses
   3275		 * directories, adjust the target directory link count
   3276		 * to account for the ".." reference from the new entry.
   3277		 */
   3278		error = xfs_dir_createname(tp, target_dp, target_name,
   3279					   src_ip->i_ino, spaceres);
   3280		if (error)
   3281			goto out_trans_cancel;
   3282
   3283		xfs_trans_ichgtime(tp, target_dp,
   3284					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
   3285
   3286		if (new_parent && src_is_directory) {
   3287			xfs_bumplink(tp, target_dp);
   3288		}
   3289	} else { /* target_ip != NULL */
   3290		/*
   3291		 * Link the source inode under the target name.
   3292		 * If the source inode is a directory and we are moving
   3293		 * it across directories, its ".." entry will be
   3294		 * inconsistent until we replace that down below.
   3295		 *
   3296		 * In case there is already an entry with the same
   3297		 * name at the destination directory, remove it first.
   3298		 */
   3299		error = xfs_dir_replace(tp, target_dp, target_name,
   3300					src_ip->i_ino, spaceres);
   3301		if (error)
   3302			goto out_trans_cancel;
   3303
   3304		xfs_trans_ichgtime(tp, target_dp,
   3305					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
   3306
   3307		/*
   3308		 * Decrement the link count on the target since the target
   3309		 * dir no longer points to it.
   3310		 */
   3311		error = xfs_droplink(tp, target_ip);
   3312		if (error)
   3313			goto out_trans_cancel;
   3314
   3315		if (src_is_directory) {
   3316			/*
   3317			 * Drop the link from the old "." entry.
   3318			 */
   3319			error = xfs_droplink(tp, target_ip);
   3320			if (error)
   3321				goto out_trans_cancel;
   3322		}
   3323	} /* target_ip != NULL */
   3324
   3325	/*
   3326	 * Remove the source.
   3327	 */
   3328	if (new_parent && src_is_directory) {
   3329		/*
   3330		 * Rewrite the ".." entry to point to the new
   3331		 * directory.
   3332		 */
   3333		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
   3334					target_dp->i_ino, spaceres);
   3335		ASSERT(error != -EEXIST);
   3336		if (error)
   3337			goto out_trans_cancel;
   3338	}
   3339
   3340	/*
   3341	 * We always want to hit the ctime on the source inode.
   3342	 *
   3343	 * This isn't strictly required by the standards since the source
   3344	 * inode isn't really being changed, but old unix file systems did
   3345	 * it and some incremental backup programs won't work without it.
   3346	 */
   3347	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
   3348	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
   3349
   3350	/*
   3351	 * Adjust the link count on src_dp.  This is necessary when
   3352	 * renaming a directory, either within one parent when
   3353	 * the target existed, or across two parent directories.
   3354	 */
   3355	if (src_is_directory && (new_parent || target_ip != NULL)) {
   3356
   3357		/*
   3358		 * Decrement link count on src_directory since the
   3359		 * entry that's moved no longer points to it.
   3360		 */
   3361		error = xfs_droplink(tp, src_dp);
   3362		if (error)
   3363			goto out_trans_cancel;
   3364	}
   3365
   3366	/*
   3367	 * For whiteouts, we only need to update the source dirent with the
   3368	 * inode number of the whiteout inode rather than removing it
   3369	 * altogether.
   3370	 */
   3371	if (wip)
   3372		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
   3373					spaceres);
   3374	else
   3375		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
   3376					   spaceres);
   3377
   3378	if (error)
   3379		goto out_trans_cancel;
   3380
   3381	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
   3382	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
   3383	if (new_parent)
   3384		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
   3385
   3386	error = xfs_finish_rename(tp);
   3387	if (wip)
   3388		xfs_irele(wip);
   3389	return error;
   3390
   3391out_trans_cancel:
   3392	xfs_trans_cancel(tp);
   3393out_release_wip:
   3394	if (wip)
   3395		xfs_irele(wip);
   3396	if (error == -ENOSPC && nospace_error)
   3397		error = nospace_error;
   3398	return error;
   3399}
   3400
   3401static int
   3402xfs_iflush(
   3403	struct xfs_inode	*ip,
   3404	struct xfs_buf		*bp)
   3405{
   3406	struct xfs_inode_log_item *iip = ip->i_itemp;
   3407	struct xfs_dinode	*dip;
   3408	struct xfs_mount	*mp = ip->i_mount;
   3409	int			error;
   3410
   3411	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
   3412	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
   3413	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
   3414	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
   3415	ASSERT(iip->ili_item.li_buf == bp);
   3416
   3417	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
   3418
   3419	/*
   3420	 * We don't flush the inode if any of the following checks fail, but we
   3421	 * do still update the log item and attach to the backing buffer as if
   3422	 * the flush happened. This is a formality to facilitate predictable
   3423	 * error handling as the caller will shutdown and fail the buffer.
   3424	 */
   3425	error = -EFSCORRUPTED;
   3426	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
   3427			       mp, XFS_ERRTAG_IFLUSH_1)) {
   3428		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
   3429			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
   3430			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
   3431		goto flush_out;
   3432	}
   3433	if (S_ISREG(VFS_I(ip)->i_mode)) {
   3434		if (XFS_TEST_ERROR(
   3435		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
   3436		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
   3437		    mp, XFS_ERRTAG_IFLUSH_3)) {
   3438			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
   3439				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
   3440				__func__, ip->i_ino, ip);
   3441			goto flush_out;
   3442		}
   3443	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
   3444		if (XFS_TEST_ERROR(
   3445		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
   3446		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
   3447		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
   3448		    mp, XFS_ERRTAG_IFLUSH_4)) {
   3449			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
   3450				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
   3451				__func__, ip->i_ino, ip);
   3452			goto flush_out;
   3453		}
   3454	}
   3455	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
   3456				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
   3457		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
   3458			"%s: detected corrupt incore inode %llu, "
   3459			"total extents = %llu nblocks = %lld, ptr "PTR_FMT,
   3460			__func__, ip->i_ino,
   3461			ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
   3462			ip->i_nblocks, ip);
   3463		goto flush_out;
   3464	}
   3465	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
   3466				mp, XFS_ERRTAG_IFLUSH_6)) {
   3467		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
   3468			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
   3469			__func__, ip->i_ino, ip->i_forkoff, ip);
   3470		goto flush_out;
   3471	}
   3472
   3473	/*
   3474	 * Inode item log recovery for v2 inodes are dependent on the flushiter
   3475	 * count for correct sequencing.  We bump the flush iteration count so
   3476	 * we can detect flushes which postdate a log record during recovery.
   3477	 * This is redundant as we now log every change and hence this can't
   3478	 * happen but we need to still do it to ensure backwards compatibility
   3479	 * with old kernels that predate logging all inode changes.
   3480	 */
   3481	if (!xfs_has_v3inodes(mp))
   3482		ip->i_flushiter++;
   3483
   3484	/*
   3485	 * If there are inline format data / attr forks attached to this inode,
   3486	 * make sure they are not corrupt.
   3487	 */
   3488	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
   3489	    xfs_ifork_verify_local_data(ip))
   3490		goto flush_out;
   3491	if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
   3492	    xfs_ifork_verify_local_attr(ip))
   3493		goto flush_out;
   3494
   3495	/*
   3496	 * Copy the dirty parts of the inode into the on-disk inode.  We always
   3497	 * copy out the core of the inode, because if the inode is dirty at all
   3498	 * the core must be.
   3499	 */
   3500	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
   3501
   3502	/* Wrap, we never let the log put out DI_MAX_FLUSH */
   3503	if (!xfs_has_v3inodes(mp)) {
   3504		if (ip->i_flushiter == DI_MAX_FLUSH)
   3505			ip->i_flushiter = 0;
   3506	}
   3507
   3508	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
   3509	if (XFS_IFORK_Q(ip))
   3510		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
   3511
   3512	/*
   3513	 * We've recorded everything logged in the inode, so we'd like to clear
   3514	 * the ili_fields bits so we don't log and flush things unnecessarily.
   3515	 * However, we can't stop logging all this information until the data
   3516	 * we've copied into the disk buffer is written to disk.  If we did we
   3517	 * might overwrite the copy of the inode in the log with all the data
   3518	 * after re-logging only part of it, and in the face of a crash we
   3519	 * wouldn't have all the data we need to recover.
   3520	 *
   3521	 * What we do is move the bits to the ili_last_fields field.  When
   3522	 * logging the inode, these bits are moved back to the ili_fields field.
   3523	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
   3524	 * we know that the information those bits represent is permanently on
   3525	 * disk.  As long as the flush completes before the inode is logged
   3526	 * again, then both ili_fields and ili_last_fields will be cleared.
   3527	 */
   3528	error = 0;
   3529flush_out:
   3530	spin_lock(&iip->ili_lock);
   3531	iip->ili_last_fields = iip->ili_fields;
   3532	iip->ili_fields = 0;
   3533	iip->ili_fsync_fields = 0;
   3534	spin_unlock(&iip->ili_lock);
   3535
   3536	/*
   3537	 * Store the current LSN of the inode so that we can tell whether the
   3538	 * item has moved in the AIL from xfs_buf_inode_iodone().
   3539	 */
   3540	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
   3541				&iip->ili_item.li_lsn);
   3542
   3543	/* generate the checksum. */
   3544	xfs_dinode_calc_crc(mp, dip);
   3545	return error;
   3546}
   3547
   3548/*
   3549 * Non-blocking flush of dirty inode metadata into the backing buffer.
   3550 *
   3551 * The caller must have a reference to the inode and hold the cluster buffer
   3552 * locked. The function will walk across all the inodes on the cluster buffer it
   3553 * can find and lock without blocking, and flush them to the cluster buffer.
   3554 *
   3555 * On successful flushing of at least one inode, the caller must write out the
   3556 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
   3557 * the caller needs to release the buffer. On failure, the filesystem will be
   3558 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
   3559 * will be returned.
   3560 */
   3561int
   3562xfs_iflush_cluster(
   3563	struct xfs_buf		*bp)
   3564{
   3565	struct xfs_mount	*mp = bp->b_mount;
   3566	struct xfs_log_item	*lip, *n;
   3567	struct xfs_inode	*ip;
   3568	struct xfs_inode_log_item *iip;
   3569	int			clcount = 0;
   3570	int			error = 0;
   3571
   3572	/*
   3573	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
   3574	 * will remove itself from the list.
   3575	 */
   3576	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
   3577		iip = (struct xfs_inode_log_item *)lip;
   3578		ip = iip->ili_inode;
   3579
   3580		/*
   3581		 * Quick and dirty check to avoid locks if possible.
   3582		 */
   3583		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
   3584			continue;
   3585		if (xfs_ipincount(ip))
   3586			continue;
   3587
   3588		/*
   3589		 * The inode is still attached to the buffer, which means it is
   3590		 * dirty but reclaim might try to grab it. Check carefully for
   3591		 * that, and grab the ilock while still holding the i_flags_lock
   3592		 * to guarantee reclaim will not be able to reclaim this inode
   3593		 * once we drop the i_flags_lock.
   3594		 */
   3595		spin_lock(&ip->i_flags_lock);
   3596		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
   3597		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
   3598			spin_unlock(&ip->i_flags_lock);
   3599			continue;
   3600		}
   3601
   3602		/*
   3603		 * ILOCK will pin the inode against reclaim and prevent
   3604		 * concurrent transactions modifying the inode while we are
   3605		 * flushing the inode. If we get the lock, set the flushing
   3606		 * state before we drop the i_flags_lock.
   3607		 */
   3608		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
   3609			spin_unlock(&ip->i_flags_lock);
   3610			continue;
   3611		}
   3612		__xfs_iflags_set(ip, XFS_IFLUSHING);
   3613		spin_unlock(&ip->i_flags_lock);
   3614
   3615		/*
   3616		 * Abort flushing this inode if we are shut down because the
   3617		 * inode may not currently be in the AIL. This can occur when
   3618		 * log I/O failure unpins the inode without inserting into the
   3619		 * AIL, leaving a dirty/unpinned inode attached to the buffer
   3620		 * that otherwise looks like it should be flushed.
   3621		 */
   3622		if (xlog_is_shutdown(mp->m_log)) {
   3623			xfs_iunpin_wait(ip);
   3624			xfs_iflush_abort(ip);
   3625			xfs_iunlock(ip, XFS_ILOCK_SHARED);
   3626			error = -EIO;
   3627			continue;
   3628		}
   3629
   3630		/* don't block waiting on a log force to unpin dirty inodes */
   3631		if (xfs_ipincount(ip)) {
   3632			xfs_iflags_clear(ip, XFS_IFLUSHING);
   3633			xfs_iunlock(ip, XFS_ILOCK_SHARED);
   3634			continue;
   3635		}
   3636
   3637		if (!xfs_inode_clean(ip))
   3638			error = xfs_iflush(ip, bp);
   3639		else
   3640			xfs_iflags_clear(ip, XFS_IFLUSHING);
   3641		xfs_iunlock(ip, XFS_ILOCK_SHARED);
   3642		if (error)
   3643			break;
   3644		clcount++;
   3645	}
   3646
   3647	if (error) {
   3648		/*
   3649		 * Shutdown first so we kill the log before we release this
   3650		 * buffer. If it is an INODE_ALLOC buffer and pins the tail
   3651		 * of the log, failing it before the _log_ is shut down can
   3652		 * result in the log tail being moved forward in the journal
   3653		 * on disk because log writes can still be taking place. Hence
   3654		 * unpinning the tail will allow the ICREATE intent to be
   3655		 * removed from the log an recovery will fail with uninitialised
   3656		 * inode cluster buffers.
   3657		 */
   3658		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
   3659		bp->b_flags |= XBF_ASYNC;
   3660		xfs_buf_ioend_fail(bp);
   3661		return error;
   3662	}
   3663
   3664	if (!clcount)
   3665		return -EAGAIN;
   3666
   3667	XFS_STATS_INC(mp, xs_icluster_flushcnt);
   3668	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
   3669	return 0;
   3670
   3671}
   3672
   3673/* Release an inode. */
   3674void
   3675xfs_irele(
   3676	struct xfs_inode	*ip)
   3677{
   3678	trace_xfs_irele(ip, _RET_IP_);
   3679	iput(VFS_I(ip));
   3680}
   3681
   3682/*
   3683 * Ensure all commited transactions touching the inode are written to the log.
   3684 */
   3685int
   3686xfs_log_force_inode(
   3687	struct xfs_inode	*ip)
   3688{
   3689	xfs_csn_t		seq = 0;
   3690
   3691	xfs_ilock(ip, XFS_ILOCK_SHARED);
   3692	if (xfs_ipincount(ip))
   3693		seq = ip->i_itemp->ili_commit_seq;
   3694	xfs_iunlock(ip, XFS_ILOCK_SHARED);
   3695
   3696	if (!seq)
   3697		return 0;
   3698	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
   3699}
   3700
   3701/*
   3702 * Grab the exclusive iolock for a data copy from src to dest, making sure to
   3703 * abide vfs locking order (lowest pointer value goes first) and breaking the
   3704 * layout leases before proceeding.  The loop is needed because we cannot call
   3705 * the blocking break_layout() with the iolocks held, and therefore have to
   3706 * back out both locks.
   3707 */
   3708static int
   3709xfs_iolock_two_inodes_and_break_layout(
   3710	struct inode		*src,
   3711	struct inode		*dest)
   3712{
   3713	int			error;
   3714
   3715	if (src > dest)
   3716		swap(src, dest);
   3717
   3718retry:
   3719	/* Wait to break both inodes' layouts before we start locking. */
   3720	error = break_layout(src, true);
   3721	if (error)
   3722		return error;
   3723	if (src != dest) {
   3724		error = break_layout(dest, true);
   3725		if (error)
   3726			return error;
   3727	}
   3728
   3729	/* Lock one inode and make sure nobody got in and leased it. */
   3730	inode_lock(src);
   3731	error = break_layout(src, false);
   3732	if (error) {
   3733		inode_unlock(src);
   3734		if (error == -EWOULDBLOCK)
   3735			goto retry;
   3736		return error;
   3737	}
   3738
   3739	if (src == dest)
   3740		return 0;
   3741
   3742	/* Lock the other inode and make sure nobody got in and leased it. */
   3743	inode_lock_nested(dest, I_MUTEX_NONDIR2);
   3744	error = break_layout(dest, false);
   3745	if (error) {
   3746		inode_unlock(src);
   3747		inode_unlock(dest);
   3748		if (error == -EWOULDBLOCK)
   3749			goto retry;
   3750		return error;
   3751	}
   3752
   3753	return 0;
   3754}
   3755
   3756/*
   3757 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
   3758 * mmap activity.
   3759 */
   3760int
   3761xfs_ilock2_io_mmap(
   3762	struct xfs_inode	*ip1,
   3763	struct xfs_inode	*ip2)
   3764{
   3765	int			ret;
   3766
   3767	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
   3768	if (ret)
   3769		return ret;
   3770	filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
   3771				    VFS_I(ip2)->i_mapping);
   3772	return 0;
   3773}
   3774
   3775/* Unlock both inodes to allow IO and mmap activity. */
   3776void
   3777xfs_iunlock2_io_mmap(
   3778	struct xfs_inode	*ip1,
   3779	struct xfs_inode	*ip2)
   3780{
   3781	filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
   3782				      VFS_I(ip2)->i_mapping);
   3783	inode_unlock(VFS_I(ip2));
   3784	if (ip1 != ip2)
   3785		inode_unlock(VFS_I(ip1));
   3786}