cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xfs_log_priv.h (25746B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
      4 * All Rights Reserved.
      5 */
      6#ifndef	__XFS_LOG_PRIV_H__
      7#define __XFS_LOG_PRIV_H__
      8
      9struct xfs_buf;
     10struct xlog;
     11struct xlog_ticket;
     12struct xfs_mount;
     13
     14/*
     15 * get client id from packed copy.
     16 *
     17 * this hack is here because the xlog_pack code copies four bytes
     18 * of xlog_op_header containing the fields oh_clientid, oh_flags
     19 * and oh_res2 into the packed copy.
     20 *
     21 * later on this four byte chunk is treated as an int and the
     22 * client id is pulled out.
     23 *
     24 * this has endian issues, of course.
     25 */
     26static inline uint xlog_get_client_id(__be32 i)
     27{
     28	return be32_to_cpu(i) >> 24;
     29}
     30
     31/*
     32 * In core log state
     33 */
     34enum xlog_iclog_state {
     35	XLOG_STATE_ACTIVE,	/* Current IC log being written to */
     36	XLOG_STATE_WANT_SYNC,	/* Want to sync this iclog; no more writes */
     37	XLOG_STATE_SYNCING,	/* This IC log is syncing */
     38	XLOG_STATE_DONE_SYNC,	/* Done syncing to disk */
     39	XLOG_STATE_CALLBACK,	/* Callback functions now */
     40	XLOG_STATE_DIRTY,	/* Dirty IC log, not ready for ACTIVE status */
     41};
     42
     43#define XLOG_STATE_STRINGS \
     44	{ XLOG_STATE_ACTIVE,	"XLOG_STATE_ACTIVE" }, \
     45	{ XLOG_STATE_WANT_SYNC,	"XLOG_STATE_WANT_SYNC" }, \
     46	{ XLOG_STATE_SYNCING,	"XLOG_STATE_SYNCING" }, \
     47	{ XLOG_STATE_DONE_SYNC,	"XLOG_STATE_DONE_SYNC" }, \
     48	{ XLOG_STATE_CALLBACK,	"XLOG_STATE_CALLBACK" }, \
     49	{ XLOG_STATE_DIRTY,	"XLOG_STATE_DIRTY" }
     50
     51/*
     52 * In core log flags
     53 */
     54#define XLOG_ICL_NEED_FLUSH	(1u << 0)	/* iclog needs REQ_PREFLUSH */
     55#define XLOG_ICL_NEED_FUA	(1u << 1)	/* iclog needs REQ_FUA */
     56
     57#define XLOG_ICL_STRINGS \
     58	{ XLOG_ICL_NEED_FLUSH,	"XLOG_ICL_NEED_FLUSH" }, \
     59	{ XLOG_ICL_NEED_FUA,	"XLOG_ICL_NEED_FUA" }
     60
     61
     62/*
     63 * Log ticket flags
     64 */
     65#define XLOG_TIC_PERM_RESERV	(1u << 0)	/* permanent reservation */
     66
     67#define XLOG_TIC_FLAGS \
     68	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
     69
     70/*
     71 * Below are states for covering allocation transactions.
     72 * By covering, we mean changing the h_tail_lsn in the last on-disk
     73 * log write such that no allocation transactions will be re-done during
     74 * recovery after a system crash. Recovery starts at the last on-disk
     75 * log write.
     76 *
     77 * These states are used to insert dummy log entries to cover
     78 * space allocation transactions which can undo non-transactional changes
     79 * after a crash. Writes to a file with space
     80 * already allocated do not result in any transactions. Allocations
     81 * might include space beyond the EOF. So if we just push the EOF a
     82 * little, the last transaction for the file could contain the wrong
     83 * size. If there is no file system activity, after an allocation
     84 * transaction, and the system crashes, the allocation transaction
     85 * will get replayed and the file will be truncated. This could
     86 * be hours/days/... after the allocation occurred.
     87 *
     88 * The fix for this is to do two dummy transactions when the
     89 * system is idle. We need two dummy transaction because the h_tail_lsn
     90 * in the log record header needs to point beyond the last possible
     91 * non-dummy transaction. The first dummy changes the h_tail_lsn to
     92 * the first transaction before the dummy. The second dummy causes
     93 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
     94 *
     95 * These dummy transactions get committed when everything
     96 * is idle (after there has been some activity).
     97 *
     98 * There are 5 states used to control this.
     99 *
    100 *  IDLE -- no logging has been done on the file system or
    101 *		we are done covering previous transactions.
    102 *  NEED -- logging has occurred and we need a dummy transaction
    103 *		when the log becomes idle.
    104 *  DONE -- we were in the NEED state and have committed a dummy
    105 *		transaction.
    106 *  NEED2 -- we detected that a dummy transaction has gone to the
    107 *		on disk log with no other transactions.
    108 *  DONE2 -- we committed a dummy transaction when in the NEED2 state.
    109 *
    110 * There are two places where we switch states:
    111 *
    112 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
    113 *	We commit the dummy transaction and switch to DONE or DONE2,
    114 *	respectively. In all other states, we don't do anything.
    115 *
    116 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
    117 *
    118 *	No matter what state we are in, if this isn't the dummy
    119 *	transaction going out, the next state is NEED.
    120 *	So, if we aren't in the DONE or DONE2 states, the next state
    121 *	is NEED. We can't be finishing a write of the dummy record
    122 *	unless it was committed and the state switched to DONE or DONE2.
    123 *
    124 *	If we are in the DONE state and this was a write of the
    125 *		dummy transaction, we move to NEED2.
    126 *
    127 *	If we are in the DONE2 state and this was a write of the
    128 *		dummy transaction, we move to IDLE.
    129 *
    130 *
    131 * Writing only one dummy transaction can get appended to
    132 * one file space allocation. When this happens, the log recovery
    133 * code replays the space allocation and a file could be truncated.
    134 * This is why we have the NEED2 and DONE2 states before going idle.
    135 */
    136
    137#define XLOG_STATE_COVER_IDLE	0
    138#define XLOG_STATE_COVER_NEED	1
    139#define XLOG_STATE_COVER_DONE	2
    140#define XLOG_STATE_COVER_NEED2	3
    141#define XLOG_STATE_COVER_DONE2	4
    142
    143#define XLOG_COVER_OPS		5
    144
    145typedef struct xlog_ticket {
    146	struct list_head   t_queue;	 /* reserve/write queue */
    147	struct task_struct *t_task;	 /* task that owns this ticket */
    148	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
    149	atomic_t	   t_ref;	 /* ticket reference count       : 4  */
    150	int		   t_curr_res;	 /* current reservation in bytes : 4  */
    151	int		   t_unit_res;	 /* unit reservation in bytes    : 4  */
    152	char		   t_ocnt;	 /* original count		 : 1  */
    153	char		   t_cnt;	 /* current count		 : 1  */
    154	uint8_t		   t_flags;	 /* properties of reservation	 : 1  */
    155} xlog_ticket_t;
    156
    157/*
    158 * - A log record header is 512 bytes.  There is plenty of room to grow the
    159 *	xlog_rec_header_t into the reserved space.
    160 * - ic_data follows, so a write to disk can start at the beginning of
    161 *	the iclog.
    162 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
    163 * - ic_next is the pointer to the next iclog in the ring.
    164 * - ic_log is a pointer back to the global log structure.
    165 * - ic_size is the full size of the log buffer, minus the cycle headers.
    166 * - ic_offset is the current number of bytes written to in this iclog.
    167 * - ic_refcnt is bumped when someone is writing to the log.
    168 * - ic_state is the state of the iclog.
    169 *
    170 * Because of cacheline contention on large machines, we need to separate
    171 * various resources onto different cachelines. To start with, make the
    172 * structure cacheline aligned. The following fields can be contended on
    173 * by independent processes:
    174 *
    175 *	- ic_callbacks
    176 *	- ic_refcnt
    177 *	- fields protected by the global l_icloglock
    178 *
    179 * so we need to ensure that these fields are located in separate cachelines.
    180 * We'll put all the read-only and l_icloglock fields in the first cacheline,
    181 * and move everything else out to subsequent cachelines.
    182 */
    183typedef struct xlog_in_core {
    184	wait_queue_head_t	ic_force_wait;
    185	wait_queue_head_t	ic_write_wait;
    186	struct xlog_in_core	*ic_next;
    187	struct xlog_in_core	*ic_prev;
    188	struct xlog		*ic_log;
    189	u32			ic_size;
    190	u32			ic_offset;
    191	enum xlog_iclog_state	ic_state;
    192	unsigned int		ic_flags;
    193	void			*ic_datap;	/* pointer to iclog data */
    194	struct list_head	ic_callbacks;
    195
    196	/* reference counts need their own cacheline */
    197	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
    198	xlog_in_core_2_t	*ic_data;
    199#define ic_header	ic_data->hic_header
    200#ifdef DEBUG
    201	bool			ic_fail_crc : 1;
    202#endif
    203	struct semaphore	ic_sema;
    204	struct work_struct	ic_end_io_work;
    205	struct bio		ic_bio;
    206	struct bio_vec		ic_bvec[];
    207} xlog_in_core_t;
    208
    209/*
    210 * The CIL context is used to aggregate per-transaction details as well be
    211 * passed to the iclog for checkpoint post-commit processing.  After being
    212 * passed to the iclog, another context needs to be allocated for tracking the
    213 * next set of transactions to be aggregated into a checkpoint.
    214 */
    215struct xfs_cil;
    216
    217struct xfs_cil_ctx {
    218	struct xfs_cil		*cil;
    219	xfs_csn_t		sequence;	/* chkpt sequence # */
    220	xfs_lsn_t		start_lsn;	/* first LSN of chkpt commit */
    221	xfs_lsn_t		commit_lsn;	/* chkpt commit record lsn */
    222	struct xlog_in_core	*commit_iclog;
    223	struct xlog_ticket	*ticket;	/* chkpt ticket */
    224	int			space_used;	/* aggregate size of regions */
    225	struct list_head	busy_extents;	/* busy extents in chkpt */
    226	struct xfs_log_vec	*lv_chain;	/* logvecs being pushed */
    227	struct list_head	iclog_entry;
    228	struct list_head	committing;	/* ctx committing list */
    229	struct work_struct	discard_endio_work;
    230	struct work_struct	push_work;
    231};
    232
    233/*
    234 * Committed Item List structure
    235 *
    236 * This structure is used to track log items that have been committed but not
    237 * yet written into the log. It is used only when the delayed logging mount
    238 * option is enabled.
    239 *
    240 * This structure tracks the list of committing checkpoint contexts so
    241 * we can avoid the problem of having to hold out new transactions during a
    242 * flush until we have a the commit record LSN of the checkpoint. We can
    243 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
    244 * sequence match and extract the commit LSN directly from there. If the
    245 * checkpoint is still in the process of committing, we can block waiting for
    246 * the commit LSN to be determined as well. This should make synchronous
    247 * operations almost as efficient as the old logging methods.
    248 */
    249struct xfs_cil {
    250	struct xlog		*xc_log;
    251	struct list_head	xc_cil;
    252	spinlock_t		xc_cil_lock;
    253	struct workqueue_struct	*xc_push_wq;
    254
    255	struct rw_semaphore	xc_ctx_lock ____cacheline_aligned_in_smp;
    256	struct xfs_cil_ctx	*xc_ctx;
    257
    258	spinlock_t		xc_push_lock ____cacheline_aligned_in_smp;
    259	xfs_csn_t		xc_push_seq;
    260	bool			xc_push_commit_stable;
    261	struct list_head	xc_committing;
    262	wait_queue_head_t	xc_commit_wait;
    263	wait_queue_head_t	xc_start_wait;
    264	xfs_csn_t		xc_current_sequence;
    265	wait_queue_head_t	xc_push_wait;	/* background push throttle */
    266} ____cacheline_aligned_in_smp;
    267
    268/*
    269 * The amount of log space we allow the CIL to aggregate is difficult to size.
    270 * Whatever we choose, we have to make sure we can get a reservation for the
    271 * log space effectively, that it is large enough to capture sufficient
    272 * relogging to reduce log buffer IO significantly, but it is not too large for
    273 * the log or induces too much latency when writing out through the iclogs. We
    274 * track both space consumed and the number of vectors in the checkpoint
    275 * context, so we need to decide which to use for limiting.
    276 *
    277 * Every log buffer we write out during a push needs a header reserved, which
    278 * is at least one sector and more for v2 logs. Hence we need a reservation of
    279 * at least 512 bytes per 32k of log space just for the LR headers. That means
    280 * 16KB of reservation per megabyte of delayed logging space we will consume,
    281 * plus various headers.  The number of headers will vary based on the num of
    282 * io vectors, so limiting on a specific number of vectors is going to result
    283 * in transactions of varying size. IOWs, it is more consistent to track and
    284 * limit space consumed in the log rather than by the number of objects being
    285 * logged in order to prevent checkpoint ticket overruns.
    286 *
    287 * Further, use of static reservations through the log grant mechanism is
    288 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
    289 * grant) and a significant deadlock potential because regranting write space
    290 * can block on log pushes. Hence if we have to regrant log space during a log
    291 * push, we can deadlock.
    292 *
    293 * However, we can avoid this by use of a dynamic "reservation stealing"
    294 * technique during transaction commit whereby unused reservation space in the
    295 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
    296 * space needed by the checkpoint transaction. This means that we never need to
    297 * specifically reserve space for the CIL checkpoint transaction, nor do we
    298 * need to regrant space once the checkpoint completes. This also means the
    299 * checkpoint transaction ticket is specific to the checkpoint context, rather
    300 * than the CIL itself.
    301 *
    302 * With dynamic reservations, we can effectively make up arbitrary limits for
    303 * the checkpoint size so long as they don't violate any other size rules.
    304 * Recovery imposes a rule that no transaction exceed half the log, so we are
    305 * limited by that.  Furthermore, the log transaction reservation subsystem
    306 * tries to keep 25% of the log free, so we need to keep below that limit or we
    307 * risk running out of free log space to start any new transactions.
    308 *
    309 * In order to keep background CIL push efficient, we only need to ensure the
    310 * CIL is large enough to maintain sufficient in-memory relogging to avoid
    311 * repeated physical writes of frequently modified metadata. If we allow the CIL
    312 * to grow to a substantial fraction of the log, then we may be pinning hundreds
    313 * of megabytes of metadata in memory until the CIL flushes. This can cause
    314 * issues when we are running low on memory - pinned memory cannot be reclaimed,
    315 * and the CIL consumes a lot of memory. Hence we need to set an upper physical
    316 * size limit for the CIL that limits the maximum amount of memory pinned by the
    317 * CIL but does not limit performance by reducing relogging efficiency
    318 * significantly.
    319 *
    320 * As such, the CIL push threshold ends up being the smaller of two thresholds:
    321 * - a threshold large enough that it allows CIL to be pushed and progress to be
    322 *   made without excessive blocking of incoming transaction commits. This is
    323 *   defined to be 12.5% of the log space - half the 25% push threshold of the
    324 *   AIL.
    325 * - small enough that it doesn't pin excessive amounts of memory but maintains
    326 *   close to peak relogging efficiency. This is defined to be 16x the iclog
    327 *   buffer window (32MB) as measurements have shown this to be roughly the
    328 *   point of diminishing performance increases under highly concurrent
    329 *   modification workloads.
    330 *
    331 * To prevent the CIL from overflowing upper commit size bounds, we introduce a
    332 * new threshold at which we block committing transactions until the background
    333 * CIL commit commences and switches to a new context. While this is not a hard
    334 * limit, it forces the process committing a transaction to the CIL to block and
    335 * yeild the CPU, giving the CIL push work a chance to be scheduled and start
    336 * work. This prevents a process running lots of transactions from overfilling
    337 * the CIL because it is not yielding the CPU. We set the blocking limit at
    338 * twice the background push space threshold so we keep in line with the AIL
    339 * push thresholds.
    340 *
    341 * Note: this is not a -hard- limit as blocking is applied after the transaction
    342 * is inserted into the CIL and the push has been triggered. It is largely a
    343 * throttling mechanism that allows the CIL push to be scheduled and run. A hard
    344 * limit will be difficult to implement without introducing global serialisation
    345 * in the CIL commit fast path, and it's not at all clear that we actually need
    346 * such hard limits given the ~7 years we've run without a hard limit before
    347 * finding the first situation where a checkpoint size overflow actually
    348 * occurred. Hence the simple throttle, and an ASSERT check to tell us that
    349 * we've overrun the max size.
    350 */
    351#define XLOG_CIL_SPACE_LIMIT(log)	\
    352	min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
    353
    354#define XLOG_CIL_BLOCKING_SPACE_LIMIT(log)	\
    355	(XLOG_CIL_SPACE_LIMIT(log) * 2)
    356
    357/*
    358 * ticket grant locks, queues and accounting have their own cachlines
    359 * as these are quite hot and can be operated on concurrently.
    360 */
    361struct xlog_grant_head {
    362	spinlock_t		lock ____cacheline_aligned_in_smp;
    363	struct list_head	waiters;
    364	atomic64_t		grant;
    365};
    366
    367/*
    368 * The reservation head lsn is not made up of a cycle number and block number.
    369 * Instead, it uses a cycle number and byte number.  Logs don't expect to
    370 * overflow 31 bits worth of byte offset, so using a byte number will mean
    371 * that round off problems won't occur when releasing partial reservations.
    372 */
    373struct xlog {
    374	/* The following fields don't need locking */
    375	struct xfs_mount	*l_mp;	        /* mount point */
    376	struct xfs_ail		*l_ailp;	/* AIL log is working with */
    377	struct xfs_cil		*l_cilp;	/* CIL log is working with */
    378	struct xfs_buftarg	*l_targ;        /* buftarg of log */
    379	struct workqueue_struct	*l_ioend_workqueue; /* for I/O completions */
    380	struct delayed_work	l_work;		/* background flush work */
    381	long			l_opstate;	/* operational state */
    382	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
    383	struct list_head	*l_buf_cancel_table;
    384	int			l_iclog_hsize;  /* size of iclog header */
    385	int			l_iclog_heads;  /* # of iclog header sectors */
    386	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
    387	int			l_iclog_size;	/* size of log in bytes */
    388	int			l_iclog_bufs;	/* number of iclog buffers */
    389	xfs_daddr_t		l_logBBstart;   /* start block of log */
    390	int			l_logsize;      /* size of log in bytes */
    391	int			l_logBBsize;    /* size of log in BB chunks */
    392
    393	/* The following block of fields are changed while holding icloglock */
    394	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
    395						/* waiting for iclog flush */
    396	int			l_covered_state;/* state of "covering disk
    397						 * log entries" */
    398	xlog_in_core_t		*l_iclog;       /* head log queue	*/
    399	spinlock_t		l_icloglock;    /* grab to change iclog state */
    400	int			l_curr_cycle;   /* Cycle number of log writes */
    401	int			l_prev_cycle;   /* Cycle number before last
    402						 * block increment */
    403	int			l_curr_block;   /* current logical log block */
    404	int			l_prev_block;   /* previous logical log block */
    405
    406	/*
    407	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
    408	 * read without needing to hold specific locks. To avoid operations
    409	 * contending with other hot objects, place each of them on a separate
    410	 * cacheline.
    411	 */
    412	/* lsn of last LR on disk */
    413	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
    414	/* lsn of 1st LR with unflushed * buffers */
    415	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
    416
    417	struct xlog_grant_head	l_reserve_head;
    418	struct xlog_grant_head	l_write_head;
    419
    420	struct xfs_kobj		l_kobj;
    421
    422	/* log recovery lsn tracking (for buffer submission */
    423	xfs_lsn_t		l_recovery_lsn;
    424
    425	uint32_t		l_iclog_roundoff;/* padding roundoff */
    426
    427	/* Users of log incompat features should take a read lock. */
    428	struct rw_semaphore	l_incompat_users;
    429};
    430
    431/*
    432 * Bits for operational state
    433 */
    434#define XLOG_ACTIVE_RECOVERY	0	/* in the middle of recovery */
    435#define XLOG_RECOVERY_NEEDED	1	/* log was recovered */
    436#define XLOG_IO_ERROR		2	/* log hit an I/O error, and being
    437				   shutdown */
    438#define XLOG_TAIL_WARN		3	/* log tail verify warning issued */
    439
    440static inline bool
    441xlog_recovery_needed(struct xlog *log)
    442{
    443	return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
    444}
    445
    446static inline bool
    447xlog_in_recovery(struct xlog *log)
    448{
    449	return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
    450}
    451
    452static inline bool
    453xlog_is_shutdown(struct xlog *log)
    454{
    455	return test_bit(XLOG_IO_ERROR, &log->l_opstate);
    456}
    457
    458/*
    459 * Wait until the xlog_force_shutdown() has marked the log as shut down
    460 * so xlog_is_shutdown() will always return true.
    461 */
    462static inline void
    463xlog_shutdown_wait(
    464	struct xlog	*log)
    465{
    466	wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
    467}
    468
    469/* common routines */
    470extern int
    471xlog_recover(
    472	struct xlog		*log);
    473extern int
    474xlog_recover_finish(
    475	struct xlog		*log);
    476extern void
    477xlog_recover_cancel(struct xlog *);
    478
    479extern __le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
    480			    char *dp, int size);
    481
    482extern struct kmem_cache *xfs_log_ticket_cache;
    483struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
    484		int count, bool permanent);
    485
    486void	xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
    487void	xlog_print_trans(struct xfs_trans *);
    488int	xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx,
    489		struct xfs_log_vec *log_vector, struct xlog_ticket *tic,
    490		uint32_t len);
    491void	xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
    492void	xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
    493
    494void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
    495		int eventual_size);
    496int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
    497
    498/*
    499 * When we crack an atomic LSN, we sample it first so that the value will not
    500 * change while we are cracking it into the component values. This means we
    501 * will always get consistent component values to work from. This should always
    502 * be used to sample and crack LSNs that are stored and updated in atomic
    503 * variables.
    504 */
    505static inline void
    506xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
    507{
    508	xfs_lsn_t val = atomic64_read(lsn);
    509
    510	*cycle = CYCLE_LSN(val);
    511	*block = BLOCK_LSN(val);
    512}
    513
    514/*
    515 * Calculate and assign a value to an atomic LSN variable from component pieces.
    516 */
    517static inline void
    518xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
    519{
    520	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
    521}
    522
    523/*
    524 * When we crack the grant head, we sample it first so that the value will not
    525 * change while we are cracking it into the component values. This means we
    526 * will always get consistent component values to work from.
    527 */
    528static inline void
    529xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
    530{
    531	*cycle = val >> 32;
    532	*space = val & 0xffffffff;
    533}
    534
    535static inline void
    536xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
    537{
    538	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
    539}
    540
    541static inline int64_t
    542xlog_assign_grant_head_val(int cycle, int space)
    543{
    544	return ((int64_t)cycle << 32) | space;
    545}
    546
    547static inline void
    548xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
    549{
    550	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
    551}
    552
    553/*
    554 * Committed Item List interfaces
    555 */
    556int	xlog_cil_init(struct xlog *log);
    557void	xlog_cil_init_post_recovery(struct xlog *log);
    558void	xlog_cil_destroy(struct xlog *log);
    559bool	xlog_cil_empty(struct xlog *log);
    560void	xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
    561			xfs_csn_t *commit_seq, bool regrant);
    562void	xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx,
    563			struct xlog_in_core *iclog);
    564
    565
    566/*
    567 * CIL force routines
    568 */
    569void xlog_cil_flush(struct xlog *log);
    570xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
    571
    572static inline void
    573xlog_cil_force(struct xlog *log)
    574{
    575	xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
    576}
    577
    578/*
    579 * Wrapper function for waiting on a wait queue serialised against wakeups
    580 * by a spinlock. This matches the semantics of all the wait queues used in the
    581 * log code.
    582 */
    583static inline void
    584xlog_wait(
    585	struct wait_queue_head	*wq,
    586	struct spinlock		*lock)
    587		__releases(lock)
    588{
    589	DECLARE_WAITQUEUE(wait, current);
    590
    591	add_wait_queue_exclusive(wq, &wait);
    592	__set_current_state(TASK_UNINTERRUPTIBLE);
    593	spin_unlock(lock);
    594	schedule();
    595	remove_wait_queue(wq, &wait);
    596}
    597
    598int xlog_wait_on_iclog(struct xlog_in_core *iclog);
    599
    600/*
    601 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
    602 * means that the next log record that includes this metadata could have a
    603 * smaller LSN. In turn, this means that the modification in the log would not
    604 * replay.
    605 */
    606static inline bool
    607xlog_valid_lsn(
    608	struct xlog	*log,
    609	xfs_lsn_t	lsn)
    610{
    611	int		cur_cycle;
    612	int		cur_block;
    613	bool		valid = true;
    614
    615	/*
    616	 * First, sample the current lsn without locking to avoid added
    617	 * contention from metadata I/O. The current cycle and block are updated
    618	 * (in xlog_state_switch_iclogs()) and read here in a particular order
    619	 * to avoid false negatives (e.g., thinking the metadata LSN is valid
    620	 * when it is not).
    621	 *
    622	 * The current block is always rewound before the cycle is bumped in
    623	 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
    624	 * a transiently forward state. Instead, we can see the LSN in a
    625	 * transiently behind state if we happen to race with a cycle wrap.
    626	 */
    627	cur_cycle = READ_ONCE(log->l_curr_cycle);
    628	smp_rmb();
    629	cur_block = READ_ONCE(log->l_curr_block);
    630
    631	if ((CYCLE_LSN(lsn) > cur_cycle) ||
    632	    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
    633		/*
    634		 * If the metadata LSN appears invalid, it's possible the check
    635		 * above raced with a wrap to the next log cycle. Grab the lock
    636		 * to check for sure.
    637		 */
    638		spin_lock(&log->l_icloglock);
    639		cur_cycle = log->l_curr_cycle;
    640		cur_block = log->l_curr_block;
    641		spin_unlock(&log->l_icloglock);
    642
    643		if ((CYCLE_LSN(lsn) > cur_cycle) ||
    644		    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
    645			valid = false;
    646	}
    647
    648	return valid;
    649}
    650
    651/*
    652 * Log vector and shadow buffers can be large, so we need to use kvmalloc() here
    653 * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts
    654 * to fall back to vmalloc, so we can't actually do anything useful with gfp
    655 * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc()
    656 * will do direct reclaim and compaction in the slow path, both of which are
    657 * horrendously expensive. We just want kmalloc to fail fast and fall back to
    658 * vmalloc if it can't get somethign straight away from the free lists or
    659 * buddy allocator. Hence we have to open code kvmalloc outselves here.
    660 *
    661 * This assumes that the caller uses memalloc_nofs_save task context here, so
    662 * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS
    663 * allocations. This is actually the only way to make vmalloc() do GFP_NOFS
    664 * allocations, so lets just all pretend this is a GFP_KERNEL context
    665 * operation....
    666 */
    667static inline void *
    668xlog_kvmalloc(
    669	size_t		buf_size)
    670{
    671	gfp_t		flags = GFP_KERNEL;
    672	void		*p;
    673
    674	flags &= ~__GFP_DIRECT_RECLAIM;
    675	flags |= __GFP_NOWARN | __GFP_NORETRY;
    676	do {
    677		p = kmalloc(buf_size, flags);
    678		if (!p)
    679			p = vmalloc(buf_size);
    680	} while (!p);
    681
    682	return p;
    683}
    684
    685#endif	/* __XFS_LOG_PRIV_H__ */