cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xfs_trans_buf.c (20691B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
      4 * All Rights Reserved.
      5 */
      6#include "xfs.h"
      7#include "xfs_fs.h"
      8#include "xfs_shared.h"
      9#include "xfs_format.h"
     10#include "xfs_log_format.h"
     11#include "xfs_trans_resv.h"
     12#include "xfs_mount.h"
     13#include "xfs_trans.h"
     14#include "xfs_buf_item.h"
     15#include "xfs_trans_priv.h"
     16#include "xfs_trace.h"
     17
     18/*
     19 * Check to see if a buffer matching the given parameters is already
     20 * a part of the given transaction.
     21 */
     22STATIC struct xfs_buf *
     23xfs_trans_buf_item_match(
     24	struct xfs_trans	*tp,
     25	struct xfs_buftarg	*target,
     26	struct xfs_buf_map	*map,
     27	int			nmaps)
     28{
     29	struct xfs_log_item	*lip;
     30	struct xfs_buf_log_item	*blip;
     31	int			len = 0;
     32	int			i;
     33
     34	for (i = 0; i < nmaps; i++)
     35		len += map[i].bm_len;
     36
     37	list_for_each_entry(lip, &tp->t_items, li_trans) {
     38		blip = (struct xfs_buf_log_item *)lip;
     39		if (blip->bli_item.li_type == XFS_LI_BUF &&
     40		    blip->bli_buf->b_target == target &&
     41		    xfs_buf_daddr(blip->bli_buf) == map[0].bm_bn &&
     42		    blip->bli_buf->b_length == len) {
     43			ASSERT(blip->bli_buf->b_map_count == nmaps);
     44			return blip->bli_buf;
     45		}
     46	}
     47
     48	return NULL;
     49}
     50
     51/*
     52 * Add the locked buffer to the transaction.
     53 *
     54 * The buffer must be locked, and it cannot be associated with any
     55 * transaction.
     56 *
     57 * If the buffer does not yet have a buf log item associated with it,
     58 * then allocate one for it.  Then add the buf item to the transaction.
     59 */
     60STATIC void
     61_xfs_trans_bjoin(
     62	struct xfs_trans	*tp,
     63	struct xfs_buf		*bp,
     64	int			reset_recur)
     65{
     66	struct xfs_buf_log_item	*bip;
     67
     68	ASSERT(bp->b_transp == NULL);
     69
     70	/*
     71	 * The xfs_buf_log_item pointer is stored in b_log_item.  If
     72	 * it doesn't have one yet, then allocate one and initialize it.
     73	 * The checks to see if one is there are in xfs_buf_item_init().
     74	 */
     75	xfs_buf_item_init(bp, tp->t_mountp);
     76	bip = bp->b_log_item;
     77	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
     78	ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
     79	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
     80	if (reset_recur)
     81		bip->bli_recur = 0;
     82
     83	/*
     84	 * Take a reference for this transaction on the buf item.
     85	 */
     86	atomic_inc(&bip->bli_refcount);
     87
     88	/*
     89	 * Attach the item to the transaction so we can find it in
     90	 * xfs_trans_get_buf() and friends.
     91	 */
     92	xfs_trans_add_item(tp, &bip->bli_item);
     93	bp->b_transp = tp;
     94
     95}
     96
     97void
     98xfs_trans_bjoin(
     99	struct xfs_trans	*tp,
    100	struct xfs_buf		*bp)
    101{
    102	_xfs_trans_bjoin(tp, bp, 0);
    103	trace_xfs_trans_bjoin(bp->b_log_item);
    104}
    105
    106/*
    107 * Get and lock the buffer for the caller if it is not already
    108 * locked within the given transaction.  If it is already locked
    109 * within the transaction, just increment its lock recursion count
    110 * and return a pointer to it.
    111 *
    112 * If the transaction pointer is NULL, make this just a normal
    113 * get_buf() call.
    114 */
    115int
    116xfs_trans_get_buf_map(
    117	struct xfs_trans	*tp,
    118	struct xfs_buftarg	*target,
    119	struct xfs_buf_map	*map,
    120	int			nmaps,
    121	xfs_buf_flags_t		flags,
    122	struct xfs_buf		**bpp)
    123{
    124	struct xfs_buf		*bp;
    125	struct xfs_buf_log_item	*bip;
    126	int			error;
    127
    128	*bpp = NULL;
    129	if (!tp)
    130		return xfs_buf_get_map(target, map, nmaps, flags, bpp);
    131
    132	/*
    133	 * If we find the buffer in the cache with this transaction
    134	 * pointer in its b_fsprivate2 field, then we know we already
    135	 * have it locked.  In this case we just increment the lock
    136	 * recursion count and return the buffer to the caller.
    137	 */
    138	bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
    139	if (bp != NULL) {
    140		ASSERT(xfs_buf_islocked(bp));
    141		if (xfs_is_shutdown(tp->t_mountp)) {
    142			xfs_buf_stale(bp);
    143			bp->b_flags |= XBF_DONE;
    144		}
    145
    146		ASSERT(bp->b_transp == tp);
    147		bip = bp->b_log_item;
    148		ASSERT(bip != NULL);
    149		ASSERT(atomic_read(&bip->bli_refcount) > 0);
    150		bip->bli_recur++;
    151		trace_xfs_trans_get_buf_recur(bip);
    152		*bpp = bp;
    153		return 0;
    154	}
    155
    156	error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
    157	if (error)
    158		return error;
    159
    160	ASSERT(!bp->b_error);
    161
    162	_xfs_trans_bjoin(tp, bp, 1);
    163	trace_xfs_trans_get_buf(bp->b_log_item);
    164	*bpp = bp;
    165	return 0;
    166}
    167
    168/*
    169 * Get and lock the superblock buffer for the given transaction.
    170 */
    171struct xfs_buf *
    172xfs_trans_getsb(
    173	struct xfs_trans	*tp)
    174{
    175	struct xfs_buf		*bp = tp->t_mountp->m_sb_bp;
    176
    177	/*
    178	 * Just increment the lock recursion count if the buffer is already
    179	 * attached to this transaction.
    180	 */
    181	if (bp->b_transp == tp) {
    182		struct xfs_buf_log_item	*bip = bp->b_log_item;
    183
    184		ASSERT(bip != NULL);
    185		ASSERT(atomic_read(&bip->bli_refcount) > 0);
    186		bip->bli_recur++;
    187
    188		trace_xfs_trans_getsb_recur(bip);
    189	} else {
    190		xfs_buf_lock(bp);
    191		xfs_buf_hold(bp);
    192		_xfs_trans_bjoin(tp, bp, 1);
    193
    194		trace_xfs_trans_getsb(bp->b_log_item);
    195	}
    196
    197	return bp;
    198}
    199
    200/*
    201 * Get and lock the buffer for the caller if it is not already
    202 * locked within the given transaction.  If it has not yet been
    203 * read in, read it from disk. If it is already locked
    204 * within the transaction and already read in, just increment its
    205 * lock recursion count and return a pointer to it.
    206 *
    207 * If the transaction pointer is NULL, make this just a normal
    208 * read_buf() call.
    209 */
    210int
    211xfs_trans_read_buf_map(
    212	struct xfs_mount	*mp,
    213	struct xfs_trans	*tp,
    214	struct xfs_buftarg	*target,
    215	struct xfs_buf_map	*map,
    216	int			nmaps,
    217	xfs_buf_flags_t		flags,
    218	struct xfs_buf		**bpp,
    219	const struct xfs_buf_ops *ops)
    220{
    221	struct xfs_buf		*bp = NULL;
    222	struct xfs_buf_log_item	*bip;
    223	int			error;
    224
    225	*bpp = NULL;
    226	/*
    227	 * If we find the buffer in the cache with this transaction
    228	 * pointer in its b_fsprivate2 field, then we know we already
    229	 * have it locked.  If it is already read in we just increment
    230	 * the lock recursion count and return the buffer to the caller.
    231	 * If the buffer is not yet read in, then we read it in, increment
    232	 * the lock recursion count, and return it to the caller.
    233	 */
    234	if (tp)
    235		bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
    236	if (bp) {
    237		ASSERT(xfs_buf_islocked(bp));
    238		ASSERT(bp->b_transp == tp);
    239		ASSERT(bp->b_log_item != NULL);
    240		ASSERT(!bp->b_error);
    241		ASSERT(bp->b_flags & XBF_DONE);
    242
    243		/*
    244		 * We never locked this buf ourselves, so we shouldn't
    245		 * brelse it either. Just get out.
    246		 */
    247		if (xfs_is_shutdown(mp)) {
    248			trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
    249			return -EIO;
    250		}
    251
    252		/*
    253		 * Check if the caller is trying to read a buffer that is
    254		 * already attached to the transaction yet has no buffer ops
    255		 * assigned.  Ops are usually attached when the buffer is
    256		 * attached to the transaction, or by the read caller if
    257		 * special circumstances.  That didn't happen, which is not
    258		 * how this is supposed to go.
    259		 *
    260		 * If the buffer passes verification we'll let this go, but if
    261		 * not we have to shut down.  Let the transaction cleanup code
    262		 * release this buffer when it kills the tranaction.
    263		 */
    264		ASSERT(bp->b_ops != NULL);
    265		error = xfs_buf_reverify(bp, ops);
    266		if (error) {
    267			xfs_buf_ioerror_alert(bp, __return_address);
    268
    269			if (tp->t_flags & XFS_TRANS_DIRTY)
    270				xfs_force_shutdown(tp->t_mountp,
    271						SHUTDOWN_META_IO_ERROR);
    272
    273			/* bad CRC means corrupted metadata */
    274			if (error == -EFSBADCRC)
    275				error = -EFSCORRUPTED;
    276			return error;
    277		}
    278
    279		bip = bp->b_log_item;
    280		bip->bli_recur++;
    281
    282		ASSERT(atomic_read(&bip->bli_refcount) > 0);
    283		trace_xfs_trans_read_buf_recur(bip);
    284		ASSERT(bp->b_ops != NULL || ops == NULL);
    285		*bpp = bp;
    286		return 0;
    287	}
    288
    289	error = xfs_buf_read_map(target, map, nmaps, flags, &bp, ops,
    290			__return_address);
    291	switch (error) {
    292	case 0:
    293		break;
    294	default:
    295		if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
    296			xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
    297		fallthrough;
    298	case -ENOMEM:
    299	case -EAGAIN:
    300		return error;
    301	}
    302
    303	if (xfs_is_shutdown(mp)) {
    304		xfs_buf_relse(bp);
    305		trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
    306		return -EIO;
    307	}
    308
    309	if (tp) {
    310		_xfs_trans_bjoin(tp, bp, 1);
    311		trace_xfs_trans_read_buf(bp->b_log_item);
    312	}
    313	ASSERT(bp->b_ops != NULL || ops == NULL);
    314	*bpp = bp;
    315	return 0;
    316
    317}
    318
    319/* Has this buffer been dirtied by anyone? */
    320bool
    321xfs_trans_buf_is_dirty(
    322	struct xfs_buf		*bp)
    323{
    324	struct xfs_buf_log_item	*bip = bp->b_log_item;
    325
    326	if (!bip)
    327		return false;
    328	ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
    329	return test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
    330}
    331
    332/*
    333 * Release a buffer previously joined to the transaction. If the buffer is
    334 * modified within this transaction, decrement the recursion count but do not
    335 * release the buffer even if the count goes to 0. If the buffer is not modified
    336 * within the transaction, decrement the recursion count and release the buffer
    337 * if the recursion count goes to 0.
    338 *
    339 * If the buffer is to be released and it was not already dirty before this
    340 * transaction began, then also free the buf_log_item associated with it.
    341 *
    342 * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
    343 */
    344void
    345xfs_trans_brelse(
    346	struct xfs_trans	*tp,
    347	struct xfs_buf		*bp)
    348{
    349	struct xfs_buf_log_item	*bip = bp->b_log_item;
    350
    351	ASSERT(bp->b_transp == tp);
    352
    353	if (!tp) {
    354		xfs_buf_relse(bp);
    355		return;
    356	}
    357
    358	trace_xfs_trans_brelse(bip);
    359	ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
    360	ASSERT(atomic_read(&bip->bli_refcount) > 0);
    361
    362	/*
    363	 * If the release is for a recursive lookup, then decrement the count
    364	 * and return.
    365	 */
    366	if (bip->bli_recur > 0) {
    367		bip->bli_recur--;
    368		return;
    369	}
    370
    371	/*
    372	 * If the buffer is invalidated or dirty in this transaction, we can't
    373	 * release it until we commit.
    374	 */
    375	if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
    376		return;
    377	if (bip->bli_flags & XFS_BLI_STALE)
    378		return;
    379
    380	/*
    381	 * Unlink the log item from the transaction and clear the hold flag, if
    382	 * set. We wouldn't want the next user of the buffer to get confused.
    383	 */
    384	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
    385	xfs_trans_del_item(&bip->bli_item);
    386	bip->bli_flags &= ~XFS_BLI_HOLD;
    387
    388	/* drop the reference to the bli */
    389	xfs_buf_item_put(bip);
    390
    391	bp->b_transp = NULL;
    392	xfs_buf_relse(bp);
    393}
    394
    395/*
    396 * Mark the buffer as not needing to be unlocked when the buf item's
    397 * iop_committing() routine is called.  The buffer must already be locked
    398 * and associated with the given transaction.
    399 */
    400/* ARGSUSED */
    401void
    402xfs_trans_bhold(
    403	xfs_trans_t		*tp,
    404	struct xfs_buf		*bp)
    405{
    406	struct xfs_buf_log_item	*bip = bp->b_log_item;
    407
    408	ASSERT(bp->b_transp == tp);
    409	ASSERT(bip != NULL);
    410	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
    411	ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
    412	ASSERT(atomic_read(&bip->bli_refcount) > 0);
    413
    414	bip->bli_flags |= XFS_BLI_HOLD;
    415	trace_xfs_trans_bhold(bip);
    416}
    417
    418/*
    419 * Cancel the previous buffer hold request made on this buffer
    420 * for this transaction.
    421 */
    422void
    423xfs_trans_bhold_release(
    424	xfs_trans_t		*tp,
    425	struct xfs_buf		*bp)
    426{
    427	struct xfs_buf_log_item	*bip = bp->b_log_item;
    428
    429	ASSERT(bp->b_transp == tp);
    430	ASSERT(bip != NULL);
    431	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
    432	ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
    433	ASSERT(atomic_read(&bip->bli_refcount) > 0);
    434	ASSERT(bip->bli_flags & XFS_BLI_HOLD);
    435
    436	bip->bli_flags &= ~XFS_BLI_HOLD;
    437	trace_xfs_trans_bhold_release(bip);
    438}
    439
    440/*
    441 * Mark a buffer dirty in the transaction.
    442 */
    443void
    444xfs_trans_dirty_buf(
    445	struct xfs_trans	*tp,
    446	struct xfs_buf		*bp)
    447{
    448	struct xfs_buf_log_item	*bip = bp->b_log_item;
    449
    450	ASSERT(bp->b_transp == tp);
    451	ASSERT(bip != NULL);
    452
    453	/*
    454	 * Mark the buffer as needing to be written out eventually,
    455	 * and set its iodone function to remove the buffer's buf log
    456	 * item from the AIL and free it when the buffer is flushed
    457	 * to disk.
    458	 */
    459	bp->b_flags |= XBF_DONE;
    460
    461	ASSERT(atomic_read(&bip->bli_refcount) > 0);
    462
    463	/*
    464	 * If we invalidated the buffer within this transaction, then
    465	 * cancel the invalidation now that we're dirtying the buffer
    466	 * again.  There are no races with the code in xfs_buf_item_unpin(),
    467	 * because we have a reference to the buffer this entire time.
    468	 */
    469	if (bip->bli_flags & XFS_BLI_STALE) {
    470		bip->bli_flags &= ~XFS_BLI_STALE;
    471		ASSERT(bp->b_flags & XBF_STALE);
    472		bp->b_flags &= ~XBF_STALE;
    473		bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
    474	}
    475	bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
    476
    477	tp->t_flags |= XFS_TRANS_DIRTY;
    478	set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
    479}
    480
    481/*
    482 * This is called to mark bytes first through last inclusive of the given
    483 * buffer as needing to be logged when the transaction is committed.
    484 * The buffer must already be associated with the given transaction.
    485 *
    486 * First and last are numbers relative to the beginning of this buffer,
    487 * so the first byte in the buffer is numbered 0 regardless of the
    488 * value of b_blkno.
    489 */
    490void
    491xfs_trans_log_buf(
    492	struct xfs_trans	*tp,
    493	struct xfs_buf		*bp,
    494	uint			first,
    495	uint			last)
    496{
    497	struct xfs_buf_log_item	*bip = bp->b_log_item;
    498
    499	ASSERT(first <= last && last < BBTOB(bp->b_length));
    500	ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED));
    501
    502	xfs_trans_dirty_buf(tp, bp);
    503
    504	trace_xfs_trans_log_buf(bip);
    505	xfs_buf_item_log(bip, first, last);
    506}
    507
    508
    509/*
    510 * Invalidate a buffer that is being used within a transaction.
    511 *
    512 * Typically this is because the blocks in the buffer are being freed, so we
    513 * need to prevent it from being written out when we're done.  Allowing it
    514 * to be written again might overwrite data in the free blocks if they are
    515 * reallocated to a file.
    516 *
    517 * We prevent the buffer from being written out by marking it stale.  We can't
    518 * get rid of the buf log item at this point because the buffer may still be
    519 * pinned by another transaction.  If that is the case, then we'll wait until
    520 * the buffer is committed to disk for the last time (we can tell by the ref
    521 * count) and free it in xfs_buf_item_unpin().  Until that happens we will
    522 * keep the buffer locked so that the buffer and buf log item are not reused.
    523 *
    524 * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
    525 * the buf item.  This will be used at recovery time to determine that copies
    526 * of the buffer in the log before this should not be replayed.
    527 *
    528 * We mark the item descriptor and the transaction dirty so that we'll hold
    529 * the buffer until after the commit.
    530 *
    531 * Since we're invalidating the buffer, we also clear the state about which
    532 * parts of the buffer have been logged.  We also clear the flag indicating
    533 * that this is an inode buffer since the data in the buffer will no longer
    534 * be valid.
    535 *
    536 * We set the stale bit in the buffer as well since we're getting rid of it.
    537 */
    538void
    539xfs_trans_binval(
    540	xfs_trans_t		*tp,
    541	struct xfs_buf		*bp)
    542{
    543	struct xfs_buf_log_item	*bip = bp->b_log_item;
    544	int			i;
    545
    546	ASSERT(bp->b_transp == tp);
    547	ASSERT(bip != NULL);
    548	ASSERT(atomic_read(&bip->bli_refcount) > 0);
    549
    550	trace_xfs_trans_binval(bip);
    551
    552	if (bip->bli_flags & XFS_BLI_STALE) {
    553		/*
    554		 * If the buffer is already invalidated, then
    555		 * just return.
    556		 */
    557		ASSERT(bp->b_flags & XBF_STALE);
    558		ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
    559		ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
    560		ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK));
    561		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
    562		ASSERT(test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags));
    563		ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
    564		return;
    565	}
    566
    567	xfs_buf_stale(bp);
    568
    569	bip->bli_flags |= XFS_BLI_STALE;
    570	bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
    571	bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
    572	bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
    573	bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK;
    574	for (i = 0; i < bip->bli_format_count; i++) {
    575		memset(bip->bli_formats[i].blf_data_map, 0,
    576		       (bip->bli_formats[i].blf_map_size * sizeof(uint)));
    577	}
    578	set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
    579	tp->t_flags |= XFS_TRANS_DIRTY;
    580}
    581
    582/*
    583 * This call is used to indicate that the buffer contains on-disk inodes which
    584 * must be handled specially during recovery.  They require special handling
    585 * because only the di_next_unlinked from the inodes in the buffer should be
    586 * recovered.  The rest of the data in the buffer is logged via the inodes
    587 * themselves.
    588 *
    589 * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
    590 * transferred to the buffer's log format structure so that we'll know what to
    591 * do at recovery time.
    592 */
    593void
    594xfs_trans_inode_buf(
    595	xfs_trans_t		*tp,
    596	struct xfs_buf		*bp)
    597{
    598	struct xfs_buf_log_item	*bip = bp->b_log_item;
    599
    600	ASSERT(bp->b_transp == tp);
    601	ASSERT(bip != NULL);
    602	ASSERT(atomic_read(&bip->bli_refcount) > 0);
    603
    604	bip->bli_flags |= XFS_BLI_INODE_BUF;
    605	bp->b_flags |= _XBF_INODES;
    606	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
    607}
    608
    609/*
    610 * This call is used to indicate that the buffer is going to
    611 * be staled and was an inode buffer. This means it gets
    612 * special processing during unpin - where any inodes
    613 * associated with the buffer should be removed from ail.
    614 * There is also special processing during recovery,
    615 * any replay of the inodes in the buffer needs to be
    616 * prevented as the buffer may have been reused.
    617 */
    618void
    619xfs_trans_stale_inode_buf(
    620	xfs_trans_t		*tp,
    621	struct xfs_buf		*bp)
    622{
    623	struct xfs_buf_log_item	*bip = bp->b_log_item;
    624
    625	ASSERT(bp->b_transp == tp);
    626	ASSERT(bip != NULL);
    627	ASSERT(atomic_read(&bip->bli_refcount) > 0);
    628
    629	bip->bli_flags |= XFS_BLI_STALE_INODE;
    630	bp->b_flags |= _XBF_INODES;
    631	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
    632}
    633
    634/*
    635 * Mark the buffer as being one which contains newly allocated
    636 * inodes.  We need to make sure that even if this buffer is
    637 * relogged as an 'inode buf' we still recover all of the inode
    638 * images in the face of a crash.  This works in coordination with
    639 * xfs_buf_item_committed() to ensure that the buffer remains in the
    640 * AIL at its original location even after it has been relogged.
    641 */
    642/* ARGSUSED */
    643void
    644xfs_trans_inode_alloc_buf(
    645	xfs_trans_t		*tp,
    646	struct xfs_buf		*bp)
    647{
    648	struct xfs_buf_log_item	*bip = bp->b_log_item;
    649
    650	ASSERT(bp->b_transp == tp);
    651	ASSERT(bip != NULL);
    652	ASSERT(atomic_read(&bip->bli_refcount) > 0);
    653
    654	bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
    655	bp->b_flags |= _XBF_INODES;
    656	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
    657}
    658
    659/*
    660 * Mark the buffer as ordered for this transaction. This means that the contents
    661 * of the buffer are not recorded in the transaction but it is tracked in the
    662 * AIL as though it was. This allows us to record logical changes in
    663 * transactions rather than the physical changes we make to the buffer without
    664 * changing writeback ordering constraints of metadata buffers.
    665 */
    666bool
    667xfs_trans_ordered_buf(
    668	struct xfs_trans	*tp,
    669	struct xfs_buf		*bp)
    670{
    671	struct xfs_buf_log_item	*bip = bp->b_log_item;
    672
    673	ASSERT(bp->b_transp == tp);
    674	ASSERT(bip != NULL);
    675	ASSERT(atomic_read(&bip->bli_refcount) > 0);
    676
    677	if (xfs_buf_item_dirty_format(bip))
    678		return false;
    679
    680	bip->bli_flags |= XFS_BLI_ORDERED;
    681	trace_xfs_buf_item_ordered(bip);
    682
    683	/*
    684	 * We don't log a dirty range of an ordered buffer but it still needs
    685	 * to be marked dirty and that it has been logged.
    686	 */
    687	xfs_trans_dirty_buf(tp, bp);
    688	return true;
    689}
    690
    691/*
    692 * Set the type of the buffer for log recovery so that it can correctly identify
    693 * and hence attach the correct buffer ops to the buffer after replay.
    694 */
    695void
    696xfs_trans_buf_set_type(
    697	struct xfs_trans	*tp,
    698	struct xfs_buf		*bp,
    699	enum xfs_blft		type)
    700{
    701	struct xfs_buf_log_item	*bip = bp->b_log_item;
    702
    703	if (!tp)
    704		return;
    705
    706	ASSERT(bp->b_transp == tp);
    707	ASSERT(bip != NULL);
    708	ASSERT(atomic_read(&bip->bli_refcount) > 0);
    709
    710	xfs_blft_to_flags(&bip->__bli_format, type);
    711}
    712
    713void
    714xfs_trans_buf_copy_type(
    715	struct xfs_buf		*dst_bp,
    716	struct xfs_buf		*src_bp)
    717{
    718	struct xfs_buf_log_item	*sbip = src_bp->b_log_item;
    719	struct xfs_buf_log_item	*dbip = dst_bp->b_log_item;
    720	enum xfs_blft		type;
    721
    722	type = xfs_blft_from_flags(&sbip->__bli_format);
    723	xfs_blft_to_flags(&dbip->__bli_format, type);
    724}
    725
    726/*
    727 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
    728 * dquots. However, unlike in inode buffer recovery, dquot buffers get
    729 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
    730 * The only thing that makes dquot buffers different from regular
    731 * buffers is that we must not replay dquot bufs when recovering
    732 * if a _corresponding_ quotaoff has happened. We also have to distinguish
    733 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
    734 * can be turned off independently.
    735 */
    736/* ARGSUSED */
    737void
    738xfs_trans_dquot_buf(
    739	xfs_trans_t		*tp,
    740	struct xfs_buf		*bp,
    741	uint			type)
    742{
    743	struct xfs_buf_log_item	*bip = bp->b_log_item;
    744
    745	ASSERT(type == XFS_BLF_UDQUOT_BUF ||
    746	       type == XFS_BLF_PDQUOT_BUF ||
    747	       type == XFS_BLF_GDQUOT_BUF);
    748
    749	bip->__bli_format.blf_flags |= type;
    750
    751	switch (type) {
    752	case XFS_BLF_UDQUOT_BUF:
    753		type = XFS_BLFT_UDQUOT_BUF;
    754		break;
    755	case XFS_BLF_PDQUOT_BUF:
    756		type = XFS_BLFT_PDQUOT_BUF;
    757		break;
    758	case XFS_BLF_GDQUOT_BUF:
    759		type = XFS_BLFT_GDQUOT_BUF;
    760		break;
    761	default:
    762		type = XFS_BLFT_UNKNOWN_BUF;
    763		break;
    764	}
    765
    766	bp->b_flags |= _XBF_DQUOTS;
    767	xfs_trans_buf_set_type(tp, bp, type);
    768}