cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xfs_log.c (112303B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
      4 * All Rights Reserved.
      5 */
      6#include "xfs.h"
      7#include "xfs_fs.h"
      8#include "xfs_shared.h"
      9#include "xfs_format.h"
     10#include "xfs_log_format.h"
     11#include "xfs_trans_resv.h"
     12#include "xfs_mount.h"
     13#include "xfs_errortag.h"
     14#include "xfs_error.h"
     15#include "xfs_trans.h"
     16#include "xfs_trans_priv.h"
     17#include "xfs_log.h"
     18#include "xfs_log_priv.h"
     19#include "xfs_trace.h"
     20#include "xfs_sysfs.h"
     21#include "xfs_sb.h"
     22#include "xfs_health.h"
     23
     24struct kmem_cache	*xfs_log_ticket_cache;
     25
     26/* Local miscellaneous function prototypes */
     27STATIC struct xlog *
     28xlog_alloc_log(
     29	struct xfs_mount	*mp,
     30	struct xfs_buftarg	*log_target,
     31	xfs_daddr_t		blk_offset,
     32	int			num_bblks);
     33STATIC int
     34xlog_space_left(
     35	struct xlog		*log,
     36	atomic64_t		*head);
     37STATIC void
     38xlog_dealloc_log(
     39	struct xlog		*log);
     40
     41/* local state machine functions */
     42STATIC void xlog_state_done_syncing(
     43	struct xlog_in_core	*iclog);
     44STATIC void xlog_state_do_callback(
     45	struct xlog		*log);
     46STATIC int
     47xlog_state_get_iclog_space(
     48	struct xlog		*log,
     49	int			len,
     50	struct xlog_in_core	**iclog,
     51	struct xlog_ticket	*ticket,
     52	int			*logoffsetp);
     53STATIC void
     54xlog_grant_push_ail(
     55	struct xlog		*log,
     56	int			need_bytes);
     57STATIC void
     58xlog_sync(
     59	struct xlog		*log,
     60	struct xlog_in_core	*iclog);
     61#if defined(DEBUG)
     62STATIC void
     63xlog_verify_grant_tail(
     64	struct xlog *log);
     65STATIC void
     66xlog_verify_iclog(
     67	struct xlog		*log,
     68	struct xlog_in_core	*iclog,
     69	int			count);
     70STATIC void
     71xlog_verify_tail_lsn(
     72	struct xlog		*log,
     73	struct xlog_in_core	*iclog);
     74#else
     75#define xlog_verify_grant_tail(a)
     76#define xlog_verify_iclog(a,b,c)
     77#define xlog_verify_tail_lsn(a,b)
     78#endif
     79
     80STATIC int
     81xlog_iclogs_empty(
     82	struct xlog		*log);
     83
     84static int
     85xfs_log_cover(struct xfs_mount *);
     86
     87/*
     88 * We need to make sure the buffer pointer returned is naturally aligned for the
     89 * biggest basic data type we put into it. We have already accounted for this
     90 * padding when sizing the buffer.
     91 *
     92 * However, this padding does not get written into the log, and hence we have to
     93 * track the space used by the log vectors separately to prevent log space hangs
     94 * due to inaccurate accounting (i.e. a leak) of the used log space through the
     95 * CIL context ticket.
     96 *
     97 * We also add space for the xlog_op_header that describes this region in the
     98 * log. This prepends the data region we return to the caller to copy their data
     99 * into, so do all the static initialisation of the ophdr now. Because the ophdr
    100 * is not 8 byte aligned, we have to be careful to ensure that we align the
    101 * start of the buffer such that the region we return to the call is 8 byte
    102 * aligned and packed against the tail of the ophdr.
    103 */
    104void *
    105xlog_prepare_iovec(
    106	struct xfs_log_vec	*lv,
    107	struct xfs_log_iovec	**vecp,
    108	uint			type)
    109{
    110	struct xfs_log_iovec	*vec = *vecp;
    111	struct xlog_op_header	*oph;
    112	uint32_t		len;
    113	void			*buf;
    114
    115	if (vec) {
    116		ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
    117		vec++;
    118	} else {
    119		vec = &lv->lv_iovecp[0];
    120	}
    121
    122	len = lv->lv_buf_len + sizeof(struct xlog_op_header);
    123	if (!IS_ALIGNED(len, sizeof(uint64_t))) {
    124		lv->lv_buf_len = round_up(len, sizeof(uint64_t)) -
    125					sizeof(struct xlog_op_header);
    126	}
    127
    128	vec->i_type = type;
    129	vec->i_addr = lv->lv_buf + lv->lv_buf_len;
    130
    131	oph = vec->i_addr;
    132	oph->oh_clientid = XFS_TRANSACTION;
    133	oph->oh_res2 = 0;
    134	oph->oh_flags = 0;
    135
    136	buf = vec->i_addr + sizeof(struct xlog_op_header);
    137	ASSERT(IS_ALIGNED((unsigned long)buf, sizeof(uint64_t)));
    138
    139	*vecp = vec;
    140	return buf;
    141}
    142
    143static void
    144xlog_grant_sub_space(
    145	struct xlog		*log,
    146	atomic64_t		*head,
    147	int			bytes)
    148{
    149	int64_t	head_val = atomic64_read(head);
    150	int64_t new, old;
    151
    152	do {
    153		int	cycle, space;
    154
    155		xlog_crack_grant_head_val(head_val, &cycle, &space);
    156
    157		space -= bytes;
    158		if (space < 0) {
    159			space += log->l_logsize;
    160			cycle--;
    161		}
    162
    163		old = head_val;
    164		new = xlog_assign_grant_head_val(cycle, space);
    165		head_val = atomic64_cmpxchg(head, old, new);
    166	} while (head_val != old);
    167}
    168
    169static void
    170xlog_grant_add_space(
    171	struct xlog		*log,
    172	atomic64_t		*head,
    173	int			bytes)
    174{
    175	int64_t	head_val = atomic64_read(head);
    176	int64_t new, old;
    177
    178	do {
    179		int		tmp;
    180		int		cycle, space;
    181
    182		xlog_crack_grant_head_val(head_val, &cycle, &space);
    183
    184		tmp = log->l_logsize - space;
    185		if (tmp > bytes)
    186			space += bytes;
    187		else {
    188			space = bytes - tmp;
    189			cycle++;
    190		}
    191
    192		old = head_val;
    193		new = xlog_assign_grant_head_val(cycle, space);
    194		head_val = atomic64_cmpxchg(head, old, new);
    195	} while (head_val != old);
    196}
    197
    198STATIC void
    199xlog_grant_head_init(
    200	struct xlog_grant_head	*head)
    201{
    202	xlog_assign_grant_head(&head->grant, 1, 0);
    203	INIT_LIST_HEAD(&head->waiters);
    204	spin_lock_init(&head->lock);
    205}
    206
    207STATIC void
    208xlog_grant_head_wake_all(
    209	struct xlog_grant_head	*head)
    210{
    211	struct xlog_ticket	*tic;
    212
    213	spin_lock(&head->lock);
    214	list_for_each_entry(tic, &head->waiters, t_queue)
    215		wake_up_process(tic->t_task);
    216	spin_unlock(&head->lock);
    217}
    218
    219static inline int
    220xlog_ticket_reservation(
    221	struct xlog		*log,
    222	struct xlog_grant_head	*head,
    223	struct xlog_ticket	*tic)
    224{
    225	if (head == &log->l_write_head) {
    226		ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
    227		return tic->t_unit_res;
    228	} else {
    229		if (tic->t_flags & XLOG_TIC_PERM_RESERV)
    230			return tic->t_unit_res * tic->t_cnt;
    231		else
    232			return tic->t_unit_res;
    233	}
    234}
    235
    236STATIC bool
    237xlog_grant_head_wake(
    238	struct xlog		*log,
    239	struct xlog_grant_head	*head,
    240	int			*free_bytes)
    241{
    242	struct xlog_ticket	*tic;
    243	int			need_bytes;
    244	bool			woken_task = false;
    245
    246	list_for_each_entry(tic, &head->waiters, t_queue) {
    247
    248		/*
    249		 * There is a chance that the size of the CIL checkpoints in
    250		 * progress at the last AIL push target calculation resulted in
    251		 * limiting the target to the log head (l_last_sync_lsn) at the
    252		 * time. This may not reflect where the log head is now as the
    253		 * CIL checkpoints may have completed.
    254		 *
    255		 * Hence when we are woken here, it may be that the head of the
    256		 * log that has moved rather than the tail. As the tail didn't
    257		 * move, there still won't be space available for the
    258		 * reservation we require.  However, if the AIL has already
    259		 * pushed to the target defined by the old log head location, we
    260		 * will hang here waiting for something else to update the AIL
    261		 * push target.
    262		 *
    263		 * Therefore, if there isn't space to wake the first waiter on
    264		 * the grant head, we need to push the AIL again to ensure the
    265		 * target reflects both the current log tail and log head
    266		 * position before we wait for the tail to move again.
    267		 */
    268
    269		need_bytes = xlog_ticket_reservation(log, head, tic);
    270		if (*free_bytes < need_bytes) {
    271			if (!woken_task)
    272				xlog_grant_push_ail(log, need_bytes);
    273			return false;
    274		}
    275
    276		*free_bytes -= need_bytes;
    277		trace_xfs_log_grant_wake_up(log, tic);
    278		wake_up_process(tic->t_task);
    279		woken_task = true;
    280	}
    281
    282	return true;
    283}
    284
    285STATIC int
    286xlog_grant_head_wait(
    287	struct xlog		*log,
    288	struct xlog_grant_head	*head,
    289	struct xlog_ticket	*tic,
    290	int			need_bytes) __releases(&head->lock)
    291					    __acquires(&head->lock)
    292{
    293	list_add_tail(&tic->t_queue, &head->waiters);
    294
    295	do {
    296		if (xlog_is_shutdown(log))
    297			goto shutdown;
    298		xlog_grant_push_ail(log, need_bytes);
    299
    300		__set_current_state(TASK_UNINTERRUPTIBLE);
    301		spin_unlock(&head->lock);
    302
    303		XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
    304
    305		trace_xfs_log_grant_sleep(log, tic);
    306		schedule();
    307		trace_xfs_log_grant_wake(log, tic);
    308
    309		spin_lock(&head->lock);
    310		if (xlog_is_shutdown(log))
    311			goto shutdown;
    312	} while (xlog_space_left(log, &head->grant) < need_bytes);
    313
    314	list_del_init(&tic->t_queue);
    315	return 0;
    316shutdown:
    317	list_del_init(&tic->t_queue);
    318	return -EIO;
    319}
    320
    321/*
    322 * Atomically get the log space required for a log ticket.
    323 *
    324 * Once a ticket gets put onto head->waiters, it will only return after the
    325 * needed reservation is satisfied.
    326 *
    327 * This function is structured so that it has a lock free fast path. This is
    328 * necessary because every new transaction reservation will come through this
    329 * path. Hence any lock will be globally hot if we take it unconditionally on
    330 * every pass.
    331 *
    332 * As tickets are only ever moved on and off head->waiters under head->lock, we
    333 * only need to take that lock if we are going to add the ticket to the queue
    334 * and sleep. We can avoid taking the lock if the ticket was never added to
    335 * head->waiters because the t_queue list head will be empty and we hold the
    336 * only reference to it so it can safely be checked unlocked.
    337 */
    338STATIC int
    339xlog_grant_head_check(
    340	struct xlog		*log,
    341	struct xlog_grant_head	*head,
    342	struct xlog_ticket	*tic,
    343	int			*need_bytes)
    344{
    345	int			free_bytes;
    346	int			error = 0;
    347
    348	ASSERT(!xlog_in_recovery(log));
    349
    350	/*
    351	 * If there are other waiters on the queue then give them a chance at
    352	 * logspace before us.  Wake up the first waiters, if we do not wake
    353	 * up all the waiters then go to sleep waiting for more free space,
    354	 * otherwise try to get some space for this transaction.
    355	 */
    356	*need_bytes = xlog_ticket_reservation(log, head, tic);
    357	free_bytes = xlog_space_left(log, &head->grant);
    358	if (!list_empty_careful(&head->waiters)) {
    359		spin_lock(&head->lock);
    360		if (!xlog_grant_head_wake(log, head, &free_bytes) ||
    361		    free_bytes < *need_bytes) {
    362			error = xlog_grant_head_wait(log, head, tic,
    363						     *need_bytes);
    364		}
    365		spin_unlock(&head->lock);
    366	} else if (free_bytes < *need_bytes) {
    367		spin_lock(&head->lock);
    368		error = xlog_grant_head_wait(log, head, tic, *need_bytes);
    369		spin_unlock(&head->lock);
    370	}
    371
    372	return error;
    373}
    374
    375bool
    376xfs_log_writable(
    377	struct xfs_mount	*mp)
    378{
    379	/*
    380	 * Do not write to the log on norecovery mounts, if the data or log
    381	 * devices are read-only, or if the filesystem is shutdown. Read-only
    382	 * mounts allow internal writes for log recovery and unmount purposes,
    383	 * so don't restrict that case.
    384	 */
    385	if (xfs_has_norecovery(mp))
    386		return false;
    387	if (xfs_readonly_buftarg(mp->m_ddev_targp))
    388		return false;
    389	if (xfs_readonly_buftarg(mp->m_log->l_targ))
    390		return false;
    391	if (xlog_is_shutdown(mp->m_log))
    392		return false;
    393	return true;
    394}
    395
    396/*
    397 * Replenish the byte reservation required by moving the grant write head.
    398 */
    399int
    400xfs_log_regrant(
    401	struct xfs_mount	*mp,
    402	struct xlog_ticket	*tic)
    403{
    404	struct xlog		*log = mp->m_log;
    405	int			need_bytes;
    406	int			error = 0;
    407
    408	if (xlog_is_shutdown(log))
    409		return -EIO;
    410
    411	XFS_STATS_INC(mp, xs_try_logspace);
    412
    413	/*
    414	 * This is a new transaction on the ticket, so we need to change the
    415	 * transaction ID so that the next transaction has a different TID in
    416	 * the log. Just add one to the existing tid so that we can see chains
    417	 * of rolling transactions in the log easily.
    418	 */
    419	tic->t_tid++;
    420
    421	xlog_grant_push_ail(log, tic->t_unit_res);
    422
    423	tic->t_curr_res = tic->t_unit_res;
    424	if (tic->t_cnt > 0)
    425		return 0;
    426
    427	trace_xfs_log_regrant(log, tic);
    428
    429	error = xlog_grant_head_check(log, &log->l_write_head, tic,
    430				      &need_bytes);
    431	if (error)
    432		goto out_error;
    433
    434	xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
    435	trace_xfs_log_regrant_exit(log, tic);
    436	xlog_verify_grant_tail(log);
    437	return 0;
    438
    439out_error:
    440	/*
    441	 * If we are failing, make sure the ticket doesn't have any current
    442	 * reservations.  We don't want to add this back when the ticket/
    443	 * transaction gets cancelled.
    444	 */
    445	tic->t_curr_res = 0;
    446	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
    447	return error;
    448}
    449
    450/*
    451 * Reserve log space and return a ticket corresponding to the reservation.
    452 *
    453 * Each reservation is going to reserve extra space for a log record header.
    454 * When writes happen to the on-disk log, we don't subtract the length of the
    455 * log record header from any reservation.  By wasting space in each
    456 * reservation, we prevent over allocation problems.
    457 */
    458int
    459xfs_log_reserve(
    460	struct xfs_mount	*mp,
    461	int			unit_bytes,
    462	int			cnt,
    463	struct xlog_ticket	**ticp,
    464	bool			permanent)
    465{
    466	struct xlog		*log = mp->m_log;
    467	struct xlog_ticket	*tic;
    468	int			need_bytes;
    469	int			error = 0;
    470
    471	if (xlog_is_shutdown(log))
    472		return -EIO;
    473
    474	XFS_STATS_INC(mp, xs_try_logspace);
    475
    476	ASSERT(*ticp == NULL);
    477	tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
    478	*ticp = tic;
    479
    480	xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
    481					    : tic->t_unit_res);
    482
    483	trace_xfs_log_reserve(log, tic);
    484
    485	error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
    486				      &need_bytes);
    487	if (error)
    488		goto out_error;
    489
    490	xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
    491	xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
    492	trace_xfs_log_reserve_exit(log, tic);
    493	xlog_verify_grant_tail(log);
    494	return 0;
    495
    496out_error:
    497	/*
    498	 * If we are failing, make sure the ticket doesn't have any current
    499	 * reservations.  We don't want to add this back when the ticket/
    500	 * transaction gets cancelled.
    501	 */
    502	tic->t_curr_res = 0;
    503	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
    504	return error;
    505}
    506
    507/*
    508 * Run all the pending iclog callbacks and wake log force waiters and iclog
    509 * space waiters so they can process the newly set shutdown state. We really
    510 * don't care what order we process callbacks here because the log is shut down
    511 * and so state cannot change on disk anymore. However, we cannot wake waiters
    512 * until the callbacks have been processed because we may be in unmount and
    513 * we must ensure that all AIL operations the callbacks perform have completed
    514 * before we tear down the AIL.
    515 *
    516 * We avoid processing actively referenced iclogs so that we don't run callbacks
    517 * while the iclog owner might still be preparing the iclog for IO submssion.
    518 * These will be caught by xlog_state_iclog_release() and call this function
    519 * again to process any callbacks that may have been added to that iclog.
    520 */
    521static void
    522xlog_state_shutdown_callbacks(
    523	struct xlog		*log)
    524{
    525	struct xlog_in_core	*iclog;
    526	LIST_HEAD(cb_list);
    527
    528	iclog = log->l_iclog;
    529	do {
    530		if (atomic_read(&iclog->ic_refcnt)) {
    531			/* Reference holder will re-run iclog callbacks. */
    532			continue;
    533		}
    534		list_splice_init(&iclog->ic_callbacks, &cb_list);
    535		spin_unlock(&log->l_icloglock);
    536
    537		xlog_cil_process_committed(&cb_list);
    538
    539		spin_lock(&log->l_icloglock);
    540		wake_up_all(&iclog->ic_write_wait);
    541		wake_up_all(&iclog->ic_force_wait);
    542	} while ((iclog = iclog->ic_next) != log->l_iclog);
    543
    544	wake_up_all(&log->l_flush_wait);
    545}
    546
    547/*
    548 * Flush iclog to disk if this is the last reference to the given iclog and the
    549 * it is in the WANT_SYNC state.
    550 *
    551 * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
    552 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
    553 * written to stable storage, and implies that a commit record is contained
    554 * within the iclog. We need to ensure that the log tail does not move beyond
    555 * the tail that the first commit record in the iclog ordered against, otherwise
    556 * correct recovery of that checkpoint becomes dependent on future operations
    557 * performed on this iclog.
    558 *
    559 * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
    560 * current tail into iclog. Once the iclog tail is set, future operations must
    561 * not modify it, otherwise they potentially violate ordering constraints for
    562 * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
    563 * the iclog will get zeroed on activation of the iclog after sync, so we
    564 * always capture the tail lsn on the iclog on the first NEED_FUA release
    565 * regardless of the number of active reference counts on this iclog.
    566 */
    567int
    568xlog_state_release_iclog(
    569	struct xlog		*log,
    570	struct xlog_in_core	*iclog)
    571{
    572	xfs_lsn_t		tail_lsn;
    573	bool			last_ref;
    574
    575	lockdep_assert_held(&log->l_icloglock);
    576
    577	trace_xlog_iclog_release(iclog, _RET_IP_);
    578	/*
    579	 * Grabbing the current log tail needs to be atomic w.r.t. the writing
    580	 * of the tail LSN into the iclog so we guarantee that the log tail does
    581	 * not move between the first time we know that the iclog needs to be
    582	 * made stable and when we eventually submit it.
    583	 */
    584	if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
    585	     (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
    586	    !iclog->ic_header.h_tail_lsn) {
    587		tail_lsn = xlog_assign_tail_lsn(log->l_mp);
    588		iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
    589	}
    590
    591	last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
    592
    593	if (xlog_is_shutdown(log)) {
    594		/*
    595		 * If there are no more references to this iclog, process the
    596		 * pending iclog callbacks that were waiting on the release of
    597		 * this iclog.
    598		 */
    599		if (last_ref)
    600			xlog_state_shutdown_callbacks(log);
    601		return -EIO;
    602	}
    603
    604	if (!last_ref)
    605		return 0;
    606
    607	if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
    608		ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
    609		return 0;
    610	}
    611
    612	iclog->ic_state = XLOG_STATE_SYNCING;
    613	xlog_verify_tail_lsn(log, iclog);
    614	trace_xlog_iclog_syncing(iclog, _RET_IP_);
    615
    616	spin_unlock(&log->l_icloglock);
    617	xlog_sync(log, iclog);
    618	spin_lock(&log->l_icloglock);
    619	return 0;
    620}
    621
    622/*
    623 * Mount a log filesystem
    624 *
    625 * mp		- ubiquitous xfs mount point structure
    626 * log_target	- buftarg of on-disk log device
    627 * blk_offset	- Start block # where block size is 512 bytes (BBSIZE)
    628 * num_bblocks	- Number of BBSIZE blocks in on-disk log
    629 *
    630 * Return error or zero.
    631 */
    632int
    633xfs_log_mount(
    634	xfs_mount_t	*mp,
    635	xfs_buftarg_t	*log_target,
    636	xfs_daddr_t	blk_offset,
    637	int		num_bblks)
    638{
    639	struct xlog	*log;
    640	bool		fatal = xfs_has_crc(mp);
    641	int		error = 0;
    642	int		min_logfsbs;
    643
    644	if (!xfs_has_norecovery(mp)) {
    645		xfs_notice(mp, "Mounting V%d Filesystem",
    646			   XFS_SB_VERSION_NUM(&mp->m_sb));
    647	} else {
    648		xfs_notice(mp,
    649"Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.",
    650			   XFS_SB_VERSION_NUM(&mp->m_sb));
    651		ASSERT(xfs_is_readonly(mp));
    652	}
    653
    654	log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
    655	if (IS_ERR(log)) {
    656		error = PTR_ERR(log);
    657		goto out;
    658	}
    659	mp->m_log = log;
    660
    661	/*
    662	 * Validate the given log space and drop a critical message via syslog
    663	 * if the log size is too small that would lead to some unexpected
    664	 * situations in transaction log space reservation stage.
    665	 *
    666	 * Note: we can't just reject the mount if the validation fails.  This
    667	 * would mean that people would have to downgrade their kernel just to
    668	 * remedy the situation as there is no way to grow the log (short of
    669	 * black magic surgery with xfs_db).
    670	 *
    671	 * We can, however, reject mounts for CRC format filesystems, as the
    672	 * mkfs binary being used to make the filesystem should never create a
    673	 * filesystem with a log that is too small.
    674	 */
    675	min_logfsbs = xfs_log_calc_minimum_size(mp);
    676
    677	if (mp->m_sb.sb_logblocks < min_logfsbs) {
    678		xfs_warn(mp,
    679		"Log size %d blocks too small, minimum size is %d blocks",
    680			 mp->m_sb.sb_logblocks, min_logfsbs);
    681		error = -EINVAL;
    682	} else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
    683		xfs_warn(mp,
    684		"Log size %d blocks too large, maximum size is %lld blocks",
    685			 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
    686		error = -EINVAL;
    687	} else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
    688		xfs_warn(mp,
    689		"log size %lld bytes too large, maximum size is %lld bytes",
    690			 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
    691			 XFS_MAX_LOG_BYTES);
    692		error = -EINVAL;
    693	} else if (mp->m_sb.sb_logsunit > 1 &&
    694		   mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) {
    695		xfs_warn(mp,
    696		"log stripe unit %u bytes must be a multiple of block size",
    697			 mp->m_sb.sb_logsunit);
    698		error = -EINVAL;
    699		fatal = true;
    700	}
    701	if (error) {
    702		/*
    703		 * Log check errors are always fatal on v5; or whenever bad
    704		 * metadata leads to a crash.
    705		 */
    706		if (fatal) {
    707			xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
    708			ASSERT(0);
    709			goto out_free_log;
    710		}
    711		xfs_crit(mp, "Log size out of supported range.");
    712		xfs_crit(mp,
    713"Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
    714	}
    715
    716	/*
    717	 * Initialize the AIL now we have a log.
    718	 */
    719	error = xfs_trans_ail_init(mp);
    720	if (error) {
    721		xfs_warn(mp, "AIL initialisation failed: error %d", error);
    722		goto out_free_log;
    723	}
    724	log->l_ailp = mp->m_ail;
    725
    726	/*
    727	 * skip log recovery on a norecovery mount.  pretend it all
    728	 * just worked.
    729	 */
    730	if (!xfs_has_norecovery(mp)) {
    731		/*
    732		 * log recovery ignores readonly state and so we need to clear
    733		 * mount-based read only state so it can write to disk.
    734		 */
    735		bool	readonly = test_and_clear_bit(XFS_OPSTATE_READONLY,
    736						&mp->m_opstate);
    737		error = xlog_recover(log);
    738		if (readonly)
    739			set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
    740		if (error) {
    741			xfs_warn(mp, "log mount/recovery failed: error %d",
    742				error);
    743			xlog_recover_cancel(log);
    744			goto out_destroy_ail;
    745		}
    746	}
    747
    748	error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
    749			       "log");
    750	if (error)
    751		goto out_destroy_ail;
    752
    753	/* Normal transactions can now occur */
    754	clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
    755
    756	/*
    757	 * Now the log has been fully initialised and we know were our
    758	 * space grant counters are, we can initialise the permanent ticket
    759	 * needed for delayed logging to work.
    760	 */
    761	xlog_cil_init_post_recovery(log);
    762
    763	return 0;
    764
    765out_destroy_ail:
    766	xfs_trans_ail_destroy(mp);
    767out_free_log:
    768	xlog_dealloc_log(log);
    769out:
    770	return error;
    771}
    772
    773/*
    774 * Finish the recovery of the file system.  This is separate from the
    775 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
    776 * in the root and real-time bitmap inodes between calling xfs_log_mount() and
    777 * here.
    778 *
    779 * If we finish recovery successfully, start the background log work. If we are
    780 * not doing recovery, then we have a RO filesystem and we don't need to start
    781 * it.
    782 */
    783int
    784xfs_log_mount_finish(
    785	struct xfs_mount	*mp)
    786{
    787	struct xlog		*log = mp->m_log;
    788	bool			readonly;
    789	int			error = 0;
    790
    791	if (xfs_has_norecovery(mp)) {
    792		ASSERT(xfs_is_readonly(mp));
    793		return 0;
    794	}
    795
    796	/*
    797	 * log recovery ignores readonly state and so we need to clear
    798	 * mount-based read only state so it can write to disk.
    799	 */
    800	readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
    801
    802	/*
    803	 * During the second phase of log recovery, we need iget and
    804	 * iput to behave like they do for an active filesystem.
    805	 * xfs_fs_drop_inode needs to be able to prevent the deletion
    806	 * of inodes before we're done replaying log items on those
    807	 * inodes.  Turn it off immediately after recovery finishes
    808	 * so that we don't leak the quota inodes if subsequent mount
    809	 * activities fail.
    810	 *
    811	 * We let all inodes involved in redo item processing end up on
    812	 * the LRU instead of being evicted immediately so that if we do
    813	 * something to an unlinked inode, the irele won't cause
    814	 * premature truncation and freeing of the inode, which results
    815	 * in log recovery failure.  We have to evict the unreferenced
    816	 * lru inodes after clearing SB_ACTIVE because we don't
    817	 * otherwise clean up the lru if there's a subsequent failure in
    818	 * xfs_mountfs, which leads to us leaking the inodes if nothing
    819	 * else (e.g. quotacheck) references the inodes before the
    820	 * mount failure occurs.
    821	 */
    822	mp->m_super->s_flags |= SB_ACTIVE;
    823	xfs_log_work_queue(mp);
    824	if (xlog_recovery_needed(log))
    825		error = xlog_recover_finish(log);
    826	mp->m_super->s_flags &= ~SB_ACTIVE;
    827	evict_inodes(mp->m_super);
    828
    829	/*
    830	 * Drain the buffer LRU after log recovery. This is required for v4
    831	 * filesystems to avoid leaving around buffers with NULL verifier ops,
    832	 * but we do it unconditionally to make sure we're always in a clean
    833	 * cache state after mount.
    834	 *
    835	 * Don't push in the error case because the AIL may have pending intents
    836	 * that aren't removed until recovery is cancelled.
    837	 */
    838	if (xlog_recovery_needed(log)) {
    839		if (!error) {
    840			xfs_log_force(mp, XFS_LOG_SYNC);
    841			xfs_ail_push_all_sync(mp->m_ail);
    842		}
    843		xfs_notice(mp, "Ending recovery (logdev: %s)",
    844				mp->m_logname ? mp->m_logname : "internal");
    845	} else {
    846		xfs_info(mp, "Ending clean mount");
    847	}
    848	xfs_buftarg_drain(mp->m_ddev_targp);
    849
    850	clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
    851	if (readonly)
    852		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
    853
    854	/* Make sure the log is dead if we're returning failure. */
    855	ASSERT(!error || xlog_is_shutdown(log));
    856
    857	return error;
    858}
    859
    860/*
    861 * The mount has failed. Cancel the recovery if it hasn't completed and destroy
    862 * the log.
    863 */
    864void
    865xfs_log_mount_cancel(
    866	struct xfs_mount	*mp)
    867{
    868	xlog_recover_cancel(mp->m_log);
    869	xfs_log_unmount(mp);
    870}
    871
    872/*
    873 * Flush out the iclog to disk ensuring that device caches are flushed and
    874 * the iclog hits stable storage before any completion waiters are woken.
    875 */
    876static inline int
    877xlog_force_iclog(
    878	struct xlog_in_core	*iclog)
    879{
    880	atomic_inc(&iclog->ic_refcnt);
    881	iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
    882	if (iclog->ic_state == XLOG_STATE_ACTIVE)
    883		xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
    884	return xlog_state_release_iclog(iclog->ic_log, iclog);
    885}
    886
    887/*
    888 * Wait for the iclog and all prior iclogs to be written disk as required by the
    889 * log force state machine. Waiting on ic_force_wait ensures iclog completions
    890 * have been ordered and callbacks run before we are woken here, hence
    891 * guaranteeing that all the iclogs up to this one are on stable storage.
    892 */
    893int
    894xlog_wait_on_iclog(
    895	struct xlog_in_core	*iclog)
    896		__releases(iclog->ic_log->l_icloglock)
    897{
    898	struct xlog		*log = iclog->ic_log;
    899
    900	trace_xlog_iclog_wait_on(iclog, _RET_IP_);
    901	if (!xlog_is_shutdown(log) &&
    902	    iclog->ic_state != XLOG_STATE_ACTIVE &&
    903	    iclog->ic_state != XLOG_STATE_DIRTY) {
    904		XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
    905		xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
    906	} else {
    907		spin_unlock(&log->l_icloglock);
    908	}
    909
    910	if (xlog_is_shutdown(log))
    911		return -EIO;
    912	return 0;
    913}
    914
    915/*
    916 * Write out an unmount record using the ticket provided. We have to account for
    917 * the data space used in the unmount ticket as this write is not done from a
    918 * transaction context that has already done the accounting for us.
    919 */
    920static int
    921xlog_write_unmount_record(
    922	struct xlog		*log,
    923	struct xlog_ticket	*ticket)
    924{
    925	struct  {
    926		struct xlog_op_header ophdr;
    927		struct xfs_unmount_log_format ulf;
    928	} unmount_rec = {
    929		.ophdr = {
    930			.oh_clientid = XFS_LOG,
    931			.oh_tid = cpu_to_be32(ticket->t_tid),
    932			.oh_flags = XLOG_UNMOUNT_TRANS,
    933		},
    934		.ulf = {
    935			.magic = XLOG_UNMOUNT_TYPE,
    936		},
    937	};
    938	struct xfs_log_iovec reg = {
    939		.i_addr = &unmount_rec,
    940		.i_len = sizeof(unmount_rec),
    941		.i_type = XLOG_REG_TYPE_UNMOUNT,
    942	};
    943	struct xfs_log_vec vec = {
    944		.lv_niovecs = 1,
    945		.lv_iovecp = &reg,
    946	};
    947
    948	BUILD_BUG_ON((sizeof(struct xlog_op_header) +
    949		      sizeof(struct xfs_unmount_log_format)) !=
    950							sizeof(unmount_rec));
    951
    952	/* account for space used by record data */
    953	ticket->t_curr_res -= sizeof(unmount_rec);
    954
    955	return xlog_write(log, NULL, &vec, ticket, reg.i_len);
    956}
    957
    958/*
    959 * Mark the filesystem clean by writing an unmount record to the head of the
    960 * log.
    961 */
    962static void
    963xlog_unmount_write(
    964	struct xlog		*log)
    965{
    966	struct xfs_mount	*mp = log->l_mp;
    967	struct xlog_in_core	*iclog;
    968	struct xlog_ticket	*tic = NULL;
    969	int			error;
    970
    971	error = xfs_log_reserve(mp, 600, 1, &tic, 0);
    972	if (error)
    973		goto out_err;
    974
    975	error = xlog_write_unmount_record(log, tic);
    976	/*
    977	 * At this point, we're umounting anyway, so there's no point in
    978	 * transitioning log state to shutdown. Just continue...
    979	 */
    980out_err:
    981	if (error)
    982		xfs_alert(mp, "%s: unmount record failed", __func__);
    983
    984	spin_lock(&log->l_icloglock);
    985	iclog = log->l_iclog;
    986	error = xlog_force_iclog(iclog);
    987	xlog_wait_on_iclog(iclog);
    988
    989	if (tic) {
    990		trace_xfs_log_umount_write(log, tic);
    991		xfs_log_ticket_ungrant(log, tic);
    992	}
    993}
    994
    995static void
    996xfs_log_unmount_verify_iclog(
    997	struct xlog		*log)
    998{
    999	struct xlog_in_core	*iclog = log->l_iclog;
   1000
   1001	do {
   1002		ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
   1003		ASSERT(iclog->ic_offset == 0);
   1004	} while ((iclog = iclog->ic_next) != log->l_iclog);
   1005}
   1006
   1007/*
   1008 * Unmount record used to have a string "Unmount filesystem--" in the
   1009 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
   1010 * We just write the magic number now since that particular field isn't
   1011 * currently architecture converted and "Unmount" is a bit foo.
   1012 * As far as I know, there weren't any dependencies on the old behaviour.
   1013 */
   1014static void
   1015xfs_log_unmount_write(
   1016	struct xfs_mount	*mp)
   1017{
   1018	struct xlog		*log = mp->m_log;
   1019
   1020	if (!xfs_log_writable(mp))
   1021		return;
   1022
   1023	xfs_log_force(mp, XFS_LOG_SYNC);
   1024
   1025	if (xlog_is_shutdown(log))
   1026		return;
   1027
   1028	/*
   1029	 * If we think the summary counters are bad, avoid writing the unmount
   1030	 * record to force log recovery at next mount, after which the summary
   1031	 * counters will be recalculated.  Refer to xlog_check_unmount_rec for
   1032	 * more details.
   1033	 */
   1034	if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
   1035			XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
   1036		xfs_alert(mp, "%s: will fix summary counters at next mount",
   1037				__func__);
   1038		return;
   1039	}
   1040
   1041	xfs_log_unmount_verify_iclog(log);
   1042	xlog_unmount_write(log);
   1043}
   1044
   1045/*
   1046 * Empty the log for unmount/freeze.
   1047 *
   1048 * To do this, we first need to shut down the background log work so it is not
   1049 * trying to cover the log as we clean up. We then need to unpin all objects in
   1050 * the log so we can then flush them out. Once they have completed their IO and
   1051 * run the callbacks removing themselves from the AIL, we can cover the log.
   1052 */
   1053int
   1054xfs_log_quiesce(
   1055	struct xfs_mount	*mp)
   1056{
   1057	/*
   1058	 * Clear log incompat features since we're quiescing the log.  Report
   1059	 * failures, though it's not fatal to have a higher log feature
   1060	 * protection level than the log contents actually require.
   1061	 */
   1062	if (xfs_clear_incompat_log_features(mp)) {
   1063		int error;
   1064
   1065		error = xfs_sync_sb(mp, false);
   1066		if (error)
   1067			xfs_warn(mp,
   1068	"Failed to clear log incompat features on quiesce");
   1069	}
   1070
   1071	cancel_delayed_work_sync(&mp->m_log->l_work);
   1072	xfs_log_force(mp, XFS_LOG_SYNC);
   1073
   1074	/*
   1075	 * The superblock buffer is uncached and while xfs_ail_push_all_sync()
   1076	 * will push it, xfs_buftarg_wait() will not wait for it. Further,
   1077	 * xfs_buf_iowait() cannot be used because it was pushed with the
   1078	 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
   1079	 * the IO to complete.
   1080	 */
   1081	xfs_ail_push_all_sync(mp->m_ail);
   1082	xfs_buftarg_wait(mp->m_ddev_targp);
   1083	xfs_buf_lock(mp->m_sb_bp);
   1084	xfs_buf_unlock(mp->m_sb_bp);
   1085
   1086	return xfs_log_cover(mp);
   1087}
   1088
   1089void
   1090xfs_log_clean(
   1091	struct xfs_mount	*mp)
   1092{
   1093	xfs_log_quiesce(mp);
   1094	xfs_log_unmount_write(mp);
   1095}
   1096
   1097/*
   1098 * Shut down and release the AIL and Log.
   1099 *
   1100 * During unmount, we need to ensure we flush all the dirty metadata objects
   1101 * from the AIL so that the log is empty before we write the unmount record to
   1102 * the log. Once this is done, we can tear down the AIL and the log.
   1103 */
   1104void
   1105xfs_log_unmount(
   1106	struct xfs_mount	*mp)
   1107{
   1108	xfs_log_clean(mp);
   1109
   1110	xfs_buftarg_drain(mp->m_ddev_targp);
   1111
   1112	xfs_trans_ail_destroy(mp);
   1113
   1114	xfs_sysfs_del(&mp->m_log->l_kobj);
   1115
   1116	xlog_dealloc_log(mp->m_log);
   1117}
   1118
   1119void
   1120xfs_log_item_init(
   1121	struct xfs_mount	*mp,
   1122	struct xfs_log_item	*item,
   1123	int			type,
   1124	const struct xfs_item_ops *ops)
   1125{
   1126	item->li_log = mp->m_log;
   1127	item->li_ailp = mp->m_ail;
   1128	item->li_type = type;
   1129	item->li_ops = ops;
   1130	item->li_lv = NULL;
   1131
   1132	INIT_LIST_HEAD(&item->li_ail);
   1133	INIT_LIST_HEAD(&item->li_cil);
   1134	INIT_LIST_HEAD(&item->li_bio_list);
   1135	INIT_LIST_HEAD(&item->li_trans);
   1136}
   1137
   1138/*
   1139 * Wake up processes waiting for log space after we have moved the log tail.
   1140 */
   1141void
   1142xfs_log_space_wake(
   1143	struct xfs_mount	*mp)
   1144{
   1145	struct xlog		*log = mp->m_log;
   1146	int			free_bytes;
   1147
   1148	if (xlog_is_shutdown(log))
   1149		return;
   1150
   1151	if (!list_empty_careful(&log->l_write_head.waiters)) {
   1152		ASSERT(!xlog_in_recovery(log));
   1153
   1154		spin_lock(&log->l_write_head.lock);
   1155		free_bytes = xlog_space_left(log, &log->l_write_head.grant);
   1156		xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
   1157		spin_unlock(&log->l_write_head.lock);
   1158	}
   1159
   1160	if (!list_empty_careful(&log->l_reserve_head.waiters)) {
   1161		ASSERT(!xlog_in_recovery(log));
   1162
   1163		spin_lock(&log->l_reserve_head.lock);
   1164		free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
   1165		xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
   1166		spin_unlock(&log->l_reserve_head.lock);
   1167	}
   1168}
   1169
   1170/*
   1171 * Determine if we have a transaction that has gone to disk that needs to be
   1172 * covered. To begin the transition to the idle state firstly the log needs to
   1173 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
   1174 * we start attempting to cover the log.
   1175 *
   1176 * Only if we are then in a state where covering is needed, the caller is
   1177 * informed that dummy transactions are required to move the log into the idle
   1178 * state.
   1179 *
   1180 * If there are any items in the AIl or CIL, then we do not want to attempt to
   1181 * cover the log as we may be in a situation where there isn't log space
   1182 * available to run a dummy transaction and this can lead to deadlocks when the
   1183 * tail of the log is pinned by an item that is modified in the CIL.  Hence
   1184 * there's no point in running a dummy transaction at this point because we
   1185 * can't start trying to idle the log until both the CIL and AIL are empty.
   1186 */
   1187static bool
   1188xfs_log_need_covered(
   1189	struct xfs_mount	*mp)
   1190{
   1191	struct xlog		*log = mp->m_log;
   1192	bool			needed = false;
   1193
   1194	if (!xlog_cil_empty(log))
   1195		return false;
   1196
   1197	spin_lock(&log->l_icloglock);
   1198	switch (log->l_covered_state) {
   1199	case XLOG_STATE_COVER_DONE:
   1200	case XLOG_STATE_COVER_DONE2:
   1201	case XLOG_STATE_COVER_IDLE:
   1202		break;
   1203	case XLOG_STATE_COVER_NEED:
   1204	case XLOG_STATE_COVER_NEED2:
   1205		if (xfs_ail_min_lsn(log->l_ailp))
   1206			break;
   1207		if (!xlog_iclogs_empty(log))
   1208			break;
   1209
   1210		needed = true;
   1211		if (log->l_covered_state == XLOG_STATE_COVER_NEED)
   1212			log->l_covered_state = XLOG_STATE_COVER_DONE;
   1213		else
   1214			log->l_covered_state = XLOG_STATE_COVER_DONE2;
   1215		break;
   1216	default:
   1217		needed = true;
   1218		break;
   1219	}
   1220	spin_unlock(&log->l_icloglock);
   1221	return needed;
   1222}
   1223
   1224/*
   1225 * Explicitly cover the log. This is similar to background log covering but
   1226 * intended for usage in quiesce codepaths. The caller is responsible to ensure
   1227 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
   1228 * must all be empty.
   1229 */
   1230static int
   1231xfs_log_cover(
   1232	struct xfs_mount	*mp)
   1233{
   1234	int			error = 0;
   1235	bool			need_covered;
   1236
   1237	ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
   1238	        !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
   1239		xlog_is_shutdown(mp->m_log));
   1240
   1241	if (!xfs_log_writable(mp))
   1242		return 0;
   1243
   1244	/*
   1245	 * xfs_log_need_covered() is not idempotent because it progresses the
   1246	 * state machine if the log requires covering. Therefore, we must call
   1247	 * this function once and use the result until we've issued an sb sync.
   1248	 * Do so first to make that abundantly clear.
   1249	 *
   1250	 * Fall into the covering sequence if the log needs covering or the
   1251	 * mount has lazy superblock accounting to sync to disk. The sb sync
   1252	 * used for covering accumulates the in-core counters, so covering
   1253	 * handles this for us.
   1254	 */
   1255	need_covered = xfs_log_need_covered(mp);
   1256	if (!need_covered && !xfs_has_lazysbcount(mp))
   1257		return 0;
   1258
   1259	/*
   1260	 * To cover the log, commit the superblock twice (at most) in
   1261	 * independent checkpoints. The first serves as a reference for the
   1262	 * tail pointer. The sync transaction and AIL push empties the AIL and
   1263	 * updates the in-core tail to the LSN of the first checkpoint. The
   1264	 * second commit updates the on-disk tail with the in-core LSN,
   1265	 * covering the log. Push the AIL one more time to leave it empty, as
   1266	 * we found it.
   1267	 */
   1268	do {
   1269		error = xfs_sync_sb(mp, true);
   1270		if (error)
   1271			break;
   1272		xfs_ail_push_all_sync(mp->m_ail);
   1273	} while (xfs_log_need_covered(mp));
   1274
   1275	return error;
   1276}
   1277
   1278/*
   1279 * We may be holding the log iclog lock upon entering this routine.
   1280 */
   1281xfs_lsn_t
   1282xlog_assign_tail_lsn_locked(
   1283	struct xfs_mount	*mp)
   1284{
   1285	struct xlog		*log = mp->m_log;
   1286	struct xfs_log_item	*lip;
   1287	xfs_lsn_t		tail_lsn;
   1288
   1289	assert_spin_locked(&mp->m_ail->ail_lock);
   1290
   1291	/*
   1292	 * To make sure we always have a valid LSN for the log tail we keep
   1293	 * track of the last LSN which was committed in log->l_last_sync_lsn,
   1294	 * and use that when the AIL was empty.
   1295	 */
   1296	lip = xfs_ail_min(mp->m_ail);
   1297	if (lip)
   1298		tail_lsn = lip->li_lsn;
   1299	else
   1300		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
   1301	trace_xfs_log_assign_tail_lsn(log, tail_lsn);
   1302	atomic64_set(&log->l_tail_lsn, tail_lsn);
   1303	return tail_lsn;
   1304}
   1305
   1306xfs_lsn_t
   1307xlog_assign_tail_lsn(
   1308	struct xfs_mount	*mp)
   1309{
   1310	xfs_lsn_t		tail_lsn;
   1311
   1312	spin_lock(&mp->m_ail->ail_lock);
   1313	tail_lsn = xlog_assign_tail_lsn_locked(mp);
   1314	spin_unlock(&mp->m_ail->ail_lock);
   1315
   1316	return tail_lsn;
   1317}
   1318
   1319/*
   1320 * Return the space in the log between the tail and the head.  The head
   1321 * is passed in the cycle/bytes formal parms.  In the special case where
   1322 * the reserve head has wrapped passed the tail, this calculation is no
   1323 * longer valid.  In this case, just return 0 which means there is no space
   1324 * in the log.  This works for all places where this function is called
   1325 * with the reserve head.  Of course, if the write head were to ever
   1326 * wrap the tail, we should blow up.  Rather than catch this case here,
   1327 * we depend on other ASSERTions in other parts of the code.   XXXmiken
   1328 *
   1329 * If reservation head is behind the tail, we have a problem. Warn about it,
   1330 * but then treat it as if the log is empty.
   1331 *
   1332 * If the log is shut down, the head and tail may be invalid or out of whack, so
   1333 * shortcut invalidity asserts in this case so that we don't trigger them
   1334 * falsely.
   1335 */
   1336STATIC int
   1337xlog_space_left(
   1338	struct xlog	*log,
   1339	atomic64_t	*head)
   1340{
   1341	int		tail_bytes;
   1342	int		tail_cycle;
   1343	int		head_cycle;
   1344	int		head_bytes;
   1345
   1346	xlog_crack_grant_head(head, &head_cycle, &head_bytes);
   1347	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
   1348	tail_bytes = BBTOB(tail_bytes);
   1349	if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
   1350		return log->l_logsize - (head_bytes - tail_bytes);
   1351	if (tail_cycle + 1 < head_cycle)
   1352		return 0;
   1353
   1354	/* Ignore potential inconsistency when shutdown. */
   1355	if (xlog_is_shutdown(log))
   1356		return log->l_logsize;
   1357
   1358	if (tail_cycle < head_cycle) {
   1359		ASSERT(tail_cycle == (head_cycle - 1));
   1360		return tail_bytes - head_bytes;
   1361	}
   1362
   1363	/*
   1364	 * The reservation head is behind the tail. In this case we just want to
   1365	 * return the size of the log as the amount of space left.
   1366	 */
   1367	xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
   1368	xfs_alert(log->l_mp, "  tail_cycle = %d, tail_bytes = %d",
   1369		  tail_cycle, tail_bytes);
   1370	xfs_alert(log->l_mp, "  GH   cycle = %d, GH   bytes = %d",
   1371		  head_cycle, head_bytes);
   1372	ASSERT(0);
   1373	return log->l_logsize;
   1374}
   1375
   1376
   1377static void
   1378xlog_ioend_work(
   1379	struct work_struct	*work)
   1380{
   1381	struct xlog_in_core     *iclog =
   1382		container_of(work, struct xlog_in_core, ic_end_io_work);
   1383	struct xlog		*log = iclog->ic_log;
   1384	int			error;
   1385
   1386	error = blk_status_to_errno(iclog->ic_bio.bi_status);
   1387#ifdef DEBUG
   1388	/* treat writes with injected CRC errors as failed */
   1389	if (iclog->ic_fail_crc)
   1390		error = -EIO;
   1391#endif
   1392
   1393	/*
   1394	 * Race to shutdown the filesystem if we see an error.
   1395	 */
   1396	if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
   1397		xfs_alert(log->l_mp, "log I/O error %d", error);
   1398		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
   1399	}
   1400
   1401	xlog_state_done_syncing(iclog);
   1402	bio_uninit(&iclog->ic_bio);
   1403
   1404	/*
   1405	 * Drop the lock to signal that we are done. Nothing references the
   1406	 * iclog after this, so an unmount waiting on this lock can now tear it
   1407	 * down safely. As such, it is unsafe to reference the iclog after the
   1408	 * unlock as we could race with it being freed.
   1409	 */
   1410	up(&iclog->ic_sema);
   1411}
   1412
   1413/*
   1414 * Return size of each in-core log record buffer.
   1415 *
   1416 * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
   1417 *
   1418 * If the filesystem blocksize is too large, we may need to choose a
   1419 * larger size since the directory code currently logs entire blocks.
   1420 */
   1421STATIC void
   1422xlog_get_iclog_buffer_size(
   1423	struct xfs_mount	*mp,
   1424	struct xlog		*log)
   1425{
   1426	if (mp->m_logbufs <= 0)
   1427		mp->m_logbufs = XLOG_MAX_ICLOGS;
   1428	if (mp->m_logbsize <= 0)
   1429		mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
   1430
   1431	log->l_iclog_bufs = mp->m_logbufs;
   1432	log->l_iclog_size = mp->m_logbsize;
   1433
   1434	/*
   1435	 * # headers = size / 32k - one header holds cycles from 32k of data.
   1436	 */
   1437	log->l_iclog_heads =
   1438		DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE);
   1439	log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
   1440}
   1441
   1442void
   1443xfs_log_work_queue(
   1444	struct xfs_mount        *mp)
   1445{
   1446	queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
   1447				msecs_to_jiffies(xfs_syncd_centisecs * 10));
   1448}
   1449
   1450/*
   1451 * Clear the log incompat flags if we have the opportunity.
   1452 *
   1453 * This only happens if we're about to log the second dummy transaction as part
   1454 * of covering the log and we can get the log incompat feature usage lock.
   1455 */
   1456static inline void
   1457xlog_clear_incompat(
   1458	struct xlog		*log)
   1459{
   1460	struct xfs_mount	*mp = log->l_mp;
   1461
   1462	if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
   1463				XFS_SB_FEAT_INCOMPAT_LOG_ALL))
   1464		return;
   1465
   1466	if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
   1467		return;
   1468
   1469	if (!down_write_trylock(&log->l_incompat_users))
   1470		return;
   1471
   1472	xfs_clear_incompat_log_features(mp);
   1473	up_write(&log->l_incompat_users);
   1474}
   1475
   1476/*
   1477 * Every sync period we need to unpin all items in the AIL and push them to
   1478 * disk. If there is nothing dirty, then we might need to cover the log to
   1479 * indicate that the filesystem is idle.
   1480 */
   1481static void
   1482xfs_log_worker(
   1483	struct work_struct	*work)
   1484{
   1485	struct xlog		*log = container_of(to_delayed_work(work),
   1486						struct xlog, l_work);
   1487	struct xfs_mount	*mp = log->l_mp;
   1488
   1489	/* dgc: errors ignored - not fatal and nowhere to report them */
   1490	if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
   1491		/*
   1492		 * Dump a transaction into the log that contains no real change.
   1493		 * This is needed to stamp the current tail LSN into the log
   1494		 * during the covering operation.
   1495		 *
   1496		 * We cannot use an inode here for this - that will push dirty
   1497		 * state back up into the VFS and then periodic inode flushing
   1498		 * will prevent log covering from making progress. Hence we
   1499		 * synchronously log the superblock instead to ensure the
   1500		 * superblock is immediately unpinned and can be written back.
   1501		 */
   1502		xlog_clear_incompat(log);
   1503		xfs_sync_sb(mp, true);
   1504	} else
   1505		xfs_log_force(mp, 0);
   1506
   1507	/* start pushing all the metadata that is currently dirty */
   1508	xfs_ail_push_all(mp->m_ail);
   1509
   1510	/* queue us up again */
   1511	xfs_log_work_queue(mp);
   1512}
   1513
   1514/*
   1515 * This routine initializes some of the log structure for a given mount point.
   1516 * Its primary purpose is to fill in enough, so recovery can occur.  However,
   1517 * some other stuff may be filled in too.
   1518 */
   1519STATIC struct xlog *
   1520xlog_alloc_log(
   1521	struct xfs_mount	*mp,
   1522	struct xfs_buftarg	*log_target,
   1523	xfs_daddr_t		blk_offset,
   1524	int			num_bblks)
   1525{
   1526	struct xlog		*log;
   1527	xlog_rec_header_t	*head;
   1528	xlog_in_core_t		**iclogp;
   1529	xlog_in_core_t		*iclog, *prev_iclog=NULL;
   1530	int			i;
   1531	int			error = -ENOMEM;
   1532	uint			log2_size = 0;
   1533
   1534	log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
   1535	if (!log) {
   1536		xfs_warn(mp, "Log allocation failed: No memory!");
   1537		goto out;
   1538	}
   1539
   1540	log->l_mp	   = mp;
   1541	log->l_targ	   = log_target;
   1542	log->l_logsize     = BBTOB(num_bblks);
   1543	log->l_logBBstart  = blk_offset;
   1544	log->l_logBBsize   = num_bblks;
   1545	log->l_covered_state = XLOG_STATE_COVER_IDLE;
   1546	set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
   1547	INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
   1548
   1549	log->l_prev_block  = -1;
   1550	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
   1551	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
   1552	xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
   1553	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
   1554
   1555	if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
   1556		log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
   1557	else
   1558		log->l_iclog_roundoff = BBSIZE;
   1559
   1560	xlog_grant_head_init(&log->l_reserve_head);
   1561	xlog_grant_head_init(&log->l_write_head);
   1562
   1563	error = -EFSCORRUPTED;
   1564	if (xfs_has_sector(mp)) {
   1565	        log2_size = mp->m_sb.sb_logsectlog;
   1566		if (log2_size < BBSHIFT) {
   1567			xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
   1568				log2_size, BBSHIFT);
   1569			goto out_free_log;
   1570		}
   1571
   1572	        log2_size -= BBSHIFT;
   1573		if (log2_size > mp->m_sectbb_log) {
   1574			xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
   1575				log2_size, mp->m_sectbb_log);
   1576			goto out_free_log;
   1577		}
   1578
   1579		/* for larger sector sizes, must have v2 or external log */
   1580		if (log2_size && log->l_logBBstart > 0 &&
   1581			    !xfs_has_logv2(mp)) {
   1582			xfs_warn(mp,
   1583		"log sector size (0x%x) invalid for configuration.",
   1584				log2_size);
   1585			goto out_free_log;
   1586		}
   1587	}
   1588	log->l_sectBBsize = 1 << log2_size;
   1589
   1590	init_rwsem(&log->l_incompat_users);
   1591
   1592	xlog_get_iclog_buffer_size(mp, log);
   1593
   1594	spin_lock_init(&log->l_icloglock);
   1595	init_waitqueue_head(&log->l_flush_wait);
   1596
   1597	iclogp = &log->l_iclog;
   1598	/*
   1599	 * The amount of memory to allocate for the iclog structure is
   1600	 * rather funky due to the way the structure is defined.  It is
   1601	 * done this way so that we can use different sizes for machines
   1602	 * with different amounts of memory.  See the definition of
   1603	 * xlog_in_core_t in xfs_log_priv.h for details.
   1604	 */
   1605	ASSERT(log->l_iclog_size >= 4096);
   1606	for (i = 0; i < log->l_iclog_bufs; i++) {
   1607		size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
   1608				sizeof(struct bio_vec);
   1609
   1610		iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
   1611		if (!iclog)
   1612			goto out_free_iclog;
   1613
   1614		*iclogp = iclog;
   1615		iclog->ic_prev = prev_iclog;
   1616		prev_iclog = iclog;
   1617
   1618		iclog->ic_data = kvzalloc(log->l_iclog_size,
   1619				GFP_KERNEL | __GFP_RETRY_MAYFAIL);
   1620		if (!iclog->ic_data)
   1621			goto out_free_iclog;
   1622		head = &iclog->ic_header;
   1623		memset(head, 0, sizeof(xlog_rec_header_t));
   1624		head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
   1625		head->h_version = cpu_to_be32(
   1626			xfs_has_logv2(log->l_mp) ? 2 : 1);
   1627		head->h_size = cpu_to_be32(log->l_iclog_size);
   1628		/* new fields */
   1629		head->h_fmt = cpu_to_be32(XLOG_FMT);
   1630		memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
   1631
   1632		iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
   1633		iclog->ic_state = XLOG_STATE_ACTIVE;
   1634		iclog->ic_log = log;
   1635		atomic_set(&iclog->ic_refcnt, 0);
   1636		INIT_LIST_HEAD(&iclog->ic_callbacks);
   1637		iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
   1638
   1639		init_waitqueue_head(&iclog->ic_force_wait);
   1640		init_waitqueue_head(&iclog->ic_write_wait);
   1641		INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
   1642		sema_init(&iclog->ic_sema, 1);
   1643
   1644		iclogp = &iclog->ic_next;
   1645	}
   1646	*iclogp = log->l_iclog;			/* complete ring */
   1647	log->l_iclog->ic_prev = prev_iclog;	/* re-write 1st prev ptr */
   1648
   1649	log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
   1650			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM |
   1651				    WQ_HIGHPRI),
   1652			0, mp->m_super->s_id);
   1653	if (!log->l_ioend_workqueue)
   1654		goto out_free_iclog;
   1655
   1656	error = xlog_cil_init(log);
   1657	if (error)
   1658		goto out_destroy_workqueue;
   1659	return log;
   1660
   1661out_destroy_workqueue:
   1662	destroy_workqueue(log->l_ioend_workqueue);
   1663out_free_iclog:
   1664	for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
   1665		prev_iclog = iclog->ic_next;
   1666		kmem_free(iclog->ic_data);
   1667		kmem_free(iclog);
   1668		if (prev_iclog == log->l_iclog)
   1669			break;
   1670	}
   1671out_free_log:
   1672	kmem_free(log);
   1673out:
   1674	return ERR_PTR(error);
   1675}	/* xlog_alloc_log */
   1676
   1677/*
   1678 * Compute the LSN that we'd need to push the log tail towards in order to have
   1679 * (a) enough on-disk log space to log the number of bytes specified, (b) at
   1680 * least 25% of the log space free, and (c) at least 256 blocks free.  If the
   1681 * log free space already meets all three thresholds, this function returns
   1682 * NULLCOMMITLSN.
   1683 */
   1684xfs_lsn_t
   1685xlog_grant_push_threshold(
   1686	struct xlog	*log,
   1687	int		need_bytes)
   1688{
   1689	xfs_lsn_t	threshold_lsn = 0;
   1690	xfs_lsn_t	last_sync_lsn;
   1691	int		free_blocks;
   1692	int		free_bytes;
   1693	int		threshold_block;
   1694	int		threshold_cycle;
   1695	int		free_threshold;
   1696
   1697	ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
   1698
   1699	free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
   1700	free_blocks = BTOBBT(free_bytes);
   1701
   1702	/*
   1703	 * Set the threshold for the minimum number of free blocks in the
   1704	 * log to the maximum of what the caller needs, one quarter of the
   1705	 * log, and 256 blocks.
   1706	 */
   1707	free_threshold = BTOBB(need_bytes);
   1708	free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
   1709	free_threshold = max(free_threshold, 256);
   1710	if (free_blocks >= free_threshold)
   1711		return NULLCOMMITLSN;
   1712
   1713	xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
   1714						&threshold_block);
   1715	threshold_block += free_threshold;
   1716	if (threshold_block >= log->l_logBBsize) {
   1717		threshold_block -= log->l_logBBsize;
   1718		threshold_cycle += 1;
   1719	}
   1720	threshold_lsn = xlog_assign_lsn(threshold_cycle,
   1721					threshold_block);
   1722	/*
   1723	 * Don't pass in an lsn greater than the lsn of the last
   1724	 * log record known to be on disk. Use a snapshot of the last sync lsn
   1725	 * so that it doesn't change between the compare and the set.
   1726	 */
   1727	last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
   1728	if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
   1729		threshold_lsn = last_sync_lsn;
   1730
   1731	return threshold_lsn;
   1732}
   1733
   1734/*
   1735 * Push the tail of the log if we need to do so to maintain the free log space
   1736 * thresholds set out by xlog_grant_push_threshold.  We may need to adopt a
   1737 * policy which pushes on an lsn which is further along in the log once we
   1738 * reach the high water mark.  In this manner, we would be creating a low water
   1739 * mark.
   1740 */
   1741STATIC void
   1742xlog_grant_push_ail(
   1743	struct xlog	*log,
   1744	int		need_bytes)
   1745{
   1746	xfs_lsn_t	threshold_lsn;
   1747
   1748	threshold_lsn = xlog_grant_push_threshold(log, need_bytes);
   1749	if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log))
   1750		return;
   1751
   1752	/*
   1753	 * Get the transaction layer to kick the dirty buffers out to
   1754	 * disk asynchronously. No point in trying to do this if
   1755	 * the filesystem is shutting down.
   1756	 */
   1757	xfs_ail_push(log->l_ailp, threshold_lsn);
   1758}
   1759
   1760/*
   1761 * Stamp cycle number in every block
   1762 */
   1763STATIC void
   1764xlog_pack_data(
   1765	struct xlog		*log,
   1766	struct xlog_in_core	*iclog,
   1767	int			roundoff)
   1768{
   1769	int			i, j, k;
   1770	int			size = iclog->ic_offset + roundoff;
   1771	__be32			cycle_lsn;
   1772	char			*dp;
   1773
   1774	cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
   1775
   1776	dp = iclog->ic_datap;
   1777	for (i = 0; i < BTOBB(size); i++) {
   1778		if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
   1779			break;
   1780		iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
   1781		*(__be32 *)dp = cycle_lsn;
   1782		dp += BBSIZE;
   1783	}
   1784
   1785	if (xfs_has_logv2(log->l_mp)) {
   1786		xlog_in_core_2_t *xhdr = iclog->ic_data;
   1787
   1788		for ( ; i < BTOBB(size); i++) {
   1789			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
   1790			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
   1791			xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
   1792			*(__be32 *)dp = cycle_lsn;
   1793			dp += BBSIZE;
   1794		}
   1795
   1796		for (i = 1; i < log->l_iclog_heads; i++)
   1797			xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
   1798	}
   1799}
   1800
   1801/*
   1802 * Calculate the checksum for a log buffer.
   1803 *
   1804 * This is a little more complicated than it should be because the various
   1805 * headers and the actual data are non-contiguous.
   1806 */
   1807__le32
   1808xlog_cksum(
   1809	struct xlog		*log,
   1810	struct xlog_rec_header	*rhead,
   1811	char			*dp,
   1812	int			size)
   1813{
   1814	uint32_t		crc;
   1815
   1816	/* first generate the crc for the record header ... */
   1817	crc = xfs_start_cksum_update((char *)rhead,
   1818			      sizeof(struct xlog_rec_header),
   1819			      offsetof(struct xlog_rec_header, h_crc));
   1820
   1821	/* ... then for additional cycle data for v2 logs ... */
   1822	if (xfs_has_logv2(log->l_mp)) {
   1823		union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
   1824		int		i;
   1825		int		xheads;
   1826
   1827		xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE);
   1828
   1829		for (i = 1; i < xheads; i++) {
   1830			crc = crc32c(crc, &xhdr[i].hic_xheader,
   1831				     sizeof(struct xlog_rec_ext_header));
   1832		}
   1833	}
   1834
   1835	/* ... and finally for the payload */
   1836	crc = crc32c(crc, dp, size);
   1837
   1838	return xfs_end_cksum(crc);
   1839}
   1840
   1841static void
   1842xlog_bio_end_io(
   1843	struct bio		*bio)
   1844{
   1845	struct xlog_in_core	*iclog = bio->bi_private;
   1846
   1847	queue_work(iclog->ic_log->l_ioend_workqueue,
   1848		   &iclog->ic_end_io_work);
   1849}
   1850
   1851static int
   1852xlog_map_iclog_data(
   1853	struct bio		*bio,
   1854	void			*data,
   1855	size_t			count)
   1856{
   1857	do {
   1858		struct page	*page = kmem_to_page(data);
   1859		unsigned int	off = offset_in_page(data);
   1860		size_t		len = min_t(size_t, count, PAGE_SIZE - off);
   1861
   1862		if (bio_add_page(bio, page, len, off) != len)
   1863			return -EIO;
   1864
   1865		data += len;
   1866		count -= len;
   1867	} while (count);
   1868
   1869	return 0;
   1870}
   1871
   1872STATIC void
   1873xlog_write_iclog(
   1874	struct xlog		*log,
   1875	struct xlog_in_core	*iclog,
   1876	uint64_t		bno,
   1877	unsigned int		count)
   1878{
   1879	ASSERT(bno < log->l_logBBsize);
   1880	trace_xlog_iclog_write(iclog, _RET_IP_);
   1881
   1882	/*
   1883	 * We lock the iclogbufs here so that we can serialise against I/O
   1884	 * completion during unmount.  We might be processing a shutdown
   1885	 * triggered during unmount, and that can occur asynchronously to the
   1886	 * unmount thread, and hence we need to ensure that completes before
   1887	 * tearing down the iclogbufs.  Hence we need to hold the buffer lock
   1888	 * across the log IO to archieve that.
   1889	 */
   1890	down(&iclog->ic_sema);
   1891	if (xlog_is_shutdown(log)) {
   1892		/*
   1893		 * It would seem logical to return EIO here, but we rely on
   1894		 * the log state machine to propagate I/O errors instead of
   1895		 * doing it here.  We kick of the state machine and unlock
   1896		 * the buffer manually, the code needs to be kept in sync
   1897		 * with the I/O completion path.
   1898		 */
   1899		xlog_state_done_syncing(iclog);
   1900		up(&iclog->ic_sema);
   1901		return;
   1902	}
   1903
   1904	/*
   1905	 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
   1906	 * IOs coming immediately after this one. This prevents the block layer
   1907	 * writeback throttle from throttling log writes behind background
   1908	 * metadata writeback and causing priority inversions.
   1909	 */
   1910	bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
   1911		 howmany(count, PAGE_SIZE),
   1912		 REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
   1913	iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
   1914	iclog->ic_bio.bi_end_io = xlog_bio_end_io;
   1915	iclog->ic_bio.bi_private = iclog;
   1916
   1917	if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
   1918		iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
   1919		/*
   1920		 * For external log devices, we also need to flush the data
   1921		 * device cache first to ensure all metadata writeback covered
   1922		 * by the LSN in this iclog is on stable storage. This is slow,
   1923		 * but it *must* complete before we issue the external log IO.
   1924		 */
   1925		if (log->l_targ != log->l_mp->m_ddev_targp)
   1926			blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev);
   1927	}
   1928	if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
   1929		iclog->ic_bio.bi_opf |= REQ_FUA;
   1930
   1931	iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
   1932
   1933	if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
   1934		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
   1935		return;
   1936	}
   1937	if (is_vmalloc_addr(iclog->ic_data))
   1938		flush_kernel_vmap_range(iclog->ic_data, count);
   1939
   1940	/*
   1941	 * If this log buffer would straddle the end of the log we will have
   1942	 * to split it up into two bios, so that we can continue at the start.
   1943	 */
   1944	if (bno + BTOBB(count) > log->l_logBBsize) {
   1945		struct bio *split;
   1946
   1947		split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
   1948				  GFP_NOIO, &fs_bio_set);
   1949		bio_chain(split, &iclog->ic_bio);
   1950		submit_bio(split);
   1951
   1952		/* restart at logical offset zero for the remainder */
   1953		iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
   1954	}
   1955
   1956	submit_bio(&iclog->ic_bio);
   1957}
   1958
   1959/*
   1960 * We need to bump cycle number for the part of the iclog that is
   1961 * written to the start of the log. Watch out for the header magic
   1962 * number case, though.
   1963 */
   1964static void
   1965xlog_split_iclog(
   1966	struct xlog		*log,
   1967	void			*data,
   1968	uint64_t		bno,
   1969	unsigned int		count)
   1970{
   1971	unsigned int		split_offset = BBTOB(log->l_logBBsize - bno);
   1972	unsigned int		i;
   1973
   1974	for (i = split_offset; i < count; i += BBSIZE) {
   1975		uint32_t cycle = get_unaligned_be32(data + i);
   1976
   1977		if (++cycle == XLOG_HEADER_MAGIC_NUM)
   1978			cycle++;
   1979		put_unaligned_be32(cycle, data + i);
   1980	}
   1981}
   1982
   1983static int
   1984xlog_calc_iclog_size(
   1985	struct xlog		*log,
   1986	struct xlog_in_core	*iclog,
   1987	uint32_t		*roundoff)
   1988{
   1989	uint32_t		count_init, count;
   1990
   1991	/* Add for LR header */
   1992	count_init = log->l_iclog_hsize + iclog->ic_offset;
   1993	count = roundup(count_init, log->l_iclog_roundoff);
   1994
   1995	*roundoff = count - count_init;
   1996
   1997	ASSERT(count >= count_init);
   1998	ASSERT(*roundoff < log->l_iclog_roundoff);
   1999	return count;
   2000}
   2001
   2002/*
   2003 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 
   2004 * fashion.  Previously, we should have moved the current iclog
   2005 * ptr in the log to point to the next available iclog.  This allows further
   2006 * write to continue while this code syncs out an iclog ready to go.
   2007 * Before an in-core log can be written out, the data section must be scanned
   2008 * to save away the 1st word of each BBSIZE block into the header.  We replace
   2009 * it with the current cycle count.  Each BBSIZE block is tagged with the
   2010 * cycle count because there in an implicit assumption that drives will
   2011 * guarantee that entire 512 byte blocks get written at once.  In other words,
   2012 * we can't have part of a 512 byte block written and part not written.  By
   2013 * tagging each block, we will know which blocks are valid when recovering
   2014 * after an unclean shutdown.
   2015 *
   2016 * This routine is single threaded on the iclog.  No other thread can be in
   2017 * this routine with the same iclog.  Changing contents of iclog can there-
   2018 * fore be done without grabbing the state machine lock.  Updating the global
   2019 * log will require grabbing the lock though.
   2020 *
   2021 * The entire log manager uses a logical block numbering scheme.  Only
   2022 * xlog_write_iclog knows about the fact that the log may not start with
   2023 * block zero on a given device.
   2024 */
   2025STATIC void
   2026xlog_sync(
   2027	struct xlog		*log,
   2028	struct xlog_in_core	*iclog)
   2029{
   2030	unsigned int		count;		/* byte count of bwrite */
   2031	unsigned int		roundoff;       /* roundoff to BB or stripe */
   2032	uint64_t		bno;
   2033	unsigned int		size;
   2034
   2035	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
   2036	trace_xlog_iclog_sync(iclog, _RET_IP_);
   2037
   2038	count = xlog_calc_iclog_size(log, iclog, &roundoff);
   2039
   2040	/* move grant heads by roundoff in sync */
   2041	xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
   2042	xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
   2043
   2044	/* put cycle number in every block */
   2045	xlog_pack_data(log, iclog, roundoff); 
   2046
   2047	/* real byte length */
   2048	size = iclog->ic_offset;
   2049	if (xfs_has_logv2(log->l_mp))
   2050		size += roundoff;
   2051	iclog->ic_header.h_len = cpu_to_be32(size);
   2052
   2053	XFS_STATS_INC(log->l_mp, xs_log_writes);
   2054	XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
   2055
   2056	bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
   2057
   2058	/* Do we need to split this write into 2 parts? */
   2059	if (bno + BTOBB(count) > log->l_logBBsize)
   2060		xlog_split_iclog(log, &iclog->ic_header, bno, count);
   2061
   2062	/* calculcate the checksum */
   2063	iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
   2064					    iclog->ic_datap, size);
   2065	/*
   2066	 * Intentionally corrupt the log record CRC based on the error injection
   2067	 * frequency, if defined. This facilitates testing log recovery in the
   2068	 * event of torn writes. Hence, set the IOABORT state to abort the log
   2069	 * write on I/O completion and shutdown the fs. The subsequent mount
   2070	 * detects the bad CRC and attempts to recover.
   2071	 */
   2072#ifdef DEBUG
   2073	if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
   2074		iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
   2075		iclog->ic_fail_crc = true;
   2076		xfs_warn(log->l_mp,
   2077	"Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
   2078			 be64_to_cpu(iclog->ic_header.h_lsn));
   2079	}
   2080#endif
   2081	xlog_verify_iclog(log, iclog, count);
   2082	xlog_write_iclog(log, iclog, bno, count);
   2083}
   2084
   2085/*
   2086 * Deallocate a log structure
   2087 */
   2088STATIC void
   2089xlog_dealloc_log(
   2090	struct xlog	*log)
   2091{
   2092	xlog_in_core_t	*iclog, *next_iclog;
   2093	int		i;
   2094
   2095	/*
   2096	 * Cycle all the iclogbuf locks to make sure all log IO completion
   2097	 * is done before we tear down these buffers.
   2098	 */
   2099	iclog = log->l_iclog;
   2100	for (i = 0; i < log->l_iclog_bufs; i++) {
   2101		down(&iclog->ic_sema);
   2102		up(&iclog->ic_sema);
   2103		iclog = iclog->ic_next;
   2104	}
   2105
   2106	/*
   2107	 * Destroy the CIL after waiting for iclog IO completion because an
   2108	 * iclog EIO error will try to shut down the log, which accesses the
   2109	 * CIL to wake up the waiters.
   2110	 */
   2111	xlog_cil_destroy(log);
   2112
   2113	iclog = log->l_iclog;
   2114	for (i = 0; i < log->l_iclog_bufs; i++) {
   2115		next_iclog = iclog->ic_next;
   2116		kmem_free(iclog->ic_data);
   2117		kmem_free(iclog);
   2118		iclog = next_iclog;
   2119	}
   2120
   2121	log->l_mp->m_log = NULL;
   2122	destroy_workqueue(log->l_ioend_workqueue);
   2123	kmem_free(log);
   2124}
   2125
   2126/*
   2127 * Update counters atomically now that memcpy is done.
   2128 */
   2129static inline void
   2130xlog_state_finish_copy(
   2131	struct xlog		*log,
   2132	struct xlog_in_core	*iclog,
   2133	int			record_cnt,
   2134	int			copy_bytes)
   2135{
   2136	lockdep_assert_held(&log->l_icloglock);
   2137
   2138	be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
   2139	iclog->ic_offset += copy_bytes;
   2140}
   2141
   2142/*
   2143 * print out info relating to regions written which consume
   2144 * the reservation
   2145 */
   2146void
   2147xlog_print_tic_res(
   2148	struct xfs_mount	*mp,
   2149	struct xlog_ticket	*ticket)
   2150{
   2151	xfs_warn(mp, "ticket reservation summary:");
   2152	xfs_warn(mp, "  unit res    = %d bytes", ticket->t_unit_res);
   2153	xfs_warn(mp, "  current res = %d bytes", ticket->t_curr_res);
   2154	xfs_warn(mp, "  original count  = %d", ticket->t_ocnt);
   2155	xfs_warn(mp, "  remaining count = %d", ticket->t_cnt);
   2156}
   2157
   2158/*
   2159 * Print a summary of the transaction.
   2160 */
   2161void
   2162xlog_print_trans(
   2163	struct xfs_trans	*tp)
   2164{
   2165	struct xfs_mount	*mp = tp->t_mountp;
   2166	struct xfs_log_item	*lip;
   2167
   2168	/* dump core transaction and ticket info */
   2169	xfs_warn(mp, "transaction summary:");
   2170	xfs_warn(mp, "  log res   = %d", tp->t_log_res);
   2171	xfs_warn(mp, "  log count = %d", tp->t_log_count);
   2172	xfs_warn(mp, "  flags     = 0x%x", tp->t_flags);
   2173
   2174	xlog_print_tic_res(mp, tp->t_ticket);
   2175
   2176	/* dump each log item */
   2177	list_for_each_entry(lip, &tp->t_items, li_trans) {
   2178		struct xfs_log_vec	*lv = lip->li_lv;
   2179		struct xfs_log_iovec	*vec;
   2180		int			i;
   2181
   2182		xfs_warn(mp, "log item: ");
   2183		xfs_warn(mp, "  type	= 0x%x", lip->li_type);
   2184		xfs_warn(mp, "  flags	= 0x%lx", lip->li_flags);
   2185		if (!lv)
   2186			continue;
   2187		xfs_warn(mp, "  niovecs	= %d", lv->lv_niovecs);
   2188		xfs_warn(mp, "  size	= %d", lv->lv_size);
   2189		xfs_warn(mp, "  bytes	= %d", lv->lv_bytes);
   2190		xfs_warn(mp, "  buf len	= %d", lv->lv_buf_len);
   2191
   2192		/* dump each iovec for the log item */
   2193		vec = lv->lv_iovecp;
   2194		for (i = 0; i < lv->lv_niovecs; i++) {
   2195			int dumplen = min(vec->i_len, 32);
   2196
   2197			xfs_warn(mp, "  iovec[%d]", i);
   2198			xfs_warn(mp, "    type	= 0x%x", vec->i_type);
   2199			xfs_warn(mp, "    len	= %d", vec->i_len);
   2200			xfs_warn(mp, "    first %d bytes of iovec[%d]:", dumplen, i);
   2201			xfs_hex_dump(vec->i_addr, dumplen);
   2202
   2203			vec++;
   2204		}
   2205	}
   2206}
   2207
   2208static inline void
   2209xlog_write_iovec(
   2210	struct xlog_in_core	*iclog,
   2211	uint32_t		*log_offset,
   2212	void			*data,
   2213	uint32_t		write_len,
   2214	int			*bytes_left,
   2215	uint32_t		*record_cnt,
   2216	uint32_t		*data_cnt)
   2217{
   2218	ASSERT(*log_offset < iclog->ic_log->l_iclog_size);
   2219	ASSERT(*log_offset % sizeof(int32_t) == 0);
   2220	ASSERT(write_len % sizeof(int32_t) == 0);
   2221
   2222	memcpy(iclog->ic_datap + *log_offset, data, write_len);
   2223	*log_offset += write_len;
   2224	*bytes_left -= write_len;
   2225	(*record_cnt)++;
   2226	*data_cnt += write_len;
   2227}
   2228
   2229/*
   2230 * Write log vectors into a single iclog which is guaranteed by the caller
   2231 * to have enough space to write the entire log vector into.
   2232 */
   2233static void
   2234xlog_write_full(
   2235	struct xfs_log_vec	*lv,
   2236	struct xlog_ticket	*ticket,
   2237	struct xlog_in_core	*iclog,
   2238	uint32_t		*log_offset,
   2239	uint32_t		*len,
   2240	uint32_t		*record_cnt,
   2241	uint32_t		*data_cnt)
   2242{
   2243	int			index;
   2244
   2245	ASSERT(*log_offset + *len <= iclog->ic_size ||
   2246		iclog->ic_state == XLOG_STATE_WANT_SYNC);
   2247
   2248	/*
   2249	 * Ordered log vectors have no regions to write so this
   2250	 * loop will naturally skip them.
   2251	 */
   2252	for (index = 0; index < lv->lv_niovecs; index++) {
   2253		struct xfs_log_iovec	*reg = &lv->lv_iovecp[index];
   2254		struct xlog_op_header	*ophdr = reg->i_addr;
   2255
   2256		ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
   2257		xlog_write_iovec(iclog, log_offset, reg->i_addr,
   2258				reg->i_len, len, record_cnt, data_cnt);
   2259	}
   2260}
   2261
   2262static int
   2263xlog_write_get_more_iclog_space(
   2264	struct xlog_ticket	*ticket,
   2265	struct xlog_in_core	**iclogp,
   2266	uint32_t		*log_offset,
   2267	uint32_t		len,
   2268	uint32_t		*record_cnt,
   2269	uint32_t		*data_cnt)
   2270{
   2271	struct xlog_in_core	*iclog = *iclogp;
   2272	struct xlog		*log = iclog->ic_log;
   2273	int			error;
   2274
   2275	spin_lock(&log->l_icloglock);
   2276	ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
   2277	xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
   2278	error = xlog_state_release_iclog(log, iclog);
   2279	spin_unlock(&log->l_icloglock);
   2280	if (error)
   2281		return error;
   2282
   2283	error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
   2284					log_offset);
   2285	if (error)
   2286		return error;
   2287	*record_cnt = 0;
   2288	*data_cnt = 0;
   2289	*iclogp = iclog;
   2290	return 0;
   2291}
   2292
   2293/*
   2294 * Write log vectors into a single iclog which is smaller than the current chain
   2295 * length. We write until we cannot fit a full record into the remaining space
   2296 * and then stop. We return the log vector that is to be written that cannot
   2297 * wholly fit in the iclog.
   2298 */
   2299static int
   2300xlog_write_partial(
   2301	struct xfs_log_vec	*lv,
   2302	struct xlog_ticket	*ticket,
   2303	struct xlog_in_core	**iclogp,
   2304	uint32_t		*log_offset,
   2305	uint32_t		*len,
   2306	uint32_t		*record_cnt,
   2307	uint32_t		*data_cnt)
   2308{
   2309	struct xlog_in_core	*iclog = *iclogp;
   2310	struct xlog_op_header	*ophdr;
   2311	int			index = 0;
   2312	uint32_t		rlen;
   2313	int			error;
   2314
   2315	/* walk the logvec, copying until we run out of space in the iclog */
   2316	for (index = 0; index < lv->lv_niovecs; index++) {
   2317		struct xfs_log_iovec	*reg = &lv->lv_iovecp[index];
   2318		uint32_t		reg_offset = 0;
   2319
   2320		/*
   2321		 * The first region of a continuation must have a non-zero
   2322		 * length otherwise log recovery will just skip over it and
   2323		 * start recovering from the next opheader it finds. Because we
   2324		 * mark the next opheader as a continuation, recovery will then
   2325		 * incorrectly add the continuation to the previous region and
   2326		 * that breaks stuff.
   2327		 *
   2328		 * Hence if there isn't space for region data after the
   2329		 * opheader, then we need to start afresh with a new iclog.
   2330		 */
   2331		if (iclog->ic_size - *log_offset <=
   2332					sizeof(struct xlog_op_header)) {
   2333			error = xlog_write_get_more_iclog_space(ticket,
   2334					&iclog, log_offset, *len, record_cnt,
   2335					data_cnt);
   2336			if (error)
   2337				return error;
   2338		}
   2339
   2340		ophdr = reg->i_addr;
   2341		rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset);
   2342
   2343		ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
   2344		ophdr->oh_len = cpu_to_be32(rlen - sizeof(struct xlog_op_header));
   2345		if (rlen != reg->i_len)
   2346			ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
   2347
   2348		xlog_write_iovec(iclog, log_offset, reg->i_addr,
   2349				rlen, len, record_cnt, data_cnt);
   2350
   2351		/* If we wrote the whole region, move to the next. */
   2352		if (rlen == reg->i_len)
   2353			continue;
   2354
   2355		/*
   2356		 * We now have a partially written iovec, but it can span
   2357		 * multiple iclogs so we loop here. First we release the iclog
   2358		 * we currently have, then we get a new iclog and add a new
   2359		 * opheader. Then we continue copying from where we were until
   2360		 * we either complete the iovec or fill the iclog. If we
   2361		 * complete the iovec, then we increment the index and go right
   2362		 * back to the top of the outer loop. if we fill the iclog, we
   2363		 * run the inner loop again.
   2364		 *
   2365		 * This is complicated by the tail of a region using all the
   2366		 * space in an iclog and hence requiring us to release the iclog
   2367		 * and get a new one before returning to the outer loop. We must
   2368		 * always guarantee that we exit this inner loop with at least
   2369		 * space for log transaction opheaders left in the current
   2370		 * iclog, hence we cannot just terminate the loop at the end
   2371		 * of the of the continuation. So we loop while there is no
   2372		 * space left in the current iclog, and check for the end of the
   2373		 * continuation after getting a new iclog.
   2374		 */
   2375		do {
   2376			/*
   2377			 * Ensure we include the continuation opheader in the
   2378			 * space we need in the new iclog by adding that size
   2379			 * to the length we require. This continuation opheader
   2380			 * needs to be accounted to the ticket as the space it
   2381			 * consumes hasn't been accounted to the lv we are
   2382			 * writing.
   2383			 */
   2384			error = xlog_write_get_more_iclog_space(ticket,
   2385					&iclog, log_offset,
   2386					*len + sizeof(struct xlog_op_header),
   2387					record_cnt, data_cnt);
   2388			if (error)
   2389				return error;
   2390
   2391			ophdr = iclog->ic_datap + *log_offset;
   2392			ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
   2393			ophdr->oh_clientid = XFS_TRANSACTION;
   2394			ophdr->oh_res2 = 0;
   2395			ophdr->oh_flags = XLOG_WAS_CONT_TRANS;
   2396
   2397			ticket->t_curr_res -= sizeof(struct xlog_op_header);
   2398			*log_offset += sizeof(struct xlog_op_header);
   2399			*data_cnt += sizeof(struct xlog_op_header);
   2400
   2401			/*
   2402			 * If rlen fits in the iclog, then end the region
   2403			 * continuation. Otherwise we're going around again.
   2404			 */
   2405			reg_offset += rlen;
   2406			rlen = reg->i_len - reg_offset;
   2407			if (rlen <= iclog->ic_size - *log_offset)
   2408				ophdr->oh_flags |= XLOG_END_TRANS;
   2409			else
   2410				ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
   2411
   2412			rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset);
   2413			ophdr->oh_len = cpu_to_be32(rlen);
   2414
   2415			xlog_write_iovec(iclog, log_offset,
   2416					reg->i_addr + reg_offset,
   2417					rlen, len, record_cnt, data_cnt);
   2418
   2419		} while (ophdr->oh_flags & XLOG_CONTINUE_TRANS);
   2420	}
   2421
   2422	/*
   2423	 * No more iovecs remain in this logvec so return the next log vec to
   2424	 * the caller so it can go back to fast path copying.
   2425	 */
   2426	*iclogp = iclog;
   2427	return 0;
   2428}
   2429
   2430/*
   2431 * Write some region out to in-core log
   2432 *
   2433 * This will be called when writing externally provided regions or when
   2434 * writing out a commit record for a given transaction.
   2435 *
   2436 * General algorithm:
   2437 *	1. Find total length of this write.  This may include adding to the
   2438 *		lengths passed in.
   2439 *	2. Check whether we violate the tickets reservation.
   2440 *	3. While writing to this iclog
   2441 *	    A. Reserve as much space in this iclog as can get
   2442 *	    B. If this is first write, save away start lsn
   2443 *	    C. While writing this region:
   2444 *		1. If first write of transaction, write start record
   2445 *		2. Write log operation header (header per region)
   2446 *		3. Find out if we can fit entire region into this iclog
   2447 *		4. Potentially, verify destination memcpy ptr
   2448 *		5. Memcpy (partial) region
   2449 *		6. If partial copy, release iclog; otherwise, continue
   2450 *			copying more regions into current iclog
   2451 *	4. Mark want sync bit (in simulation mode)
   2452 *	5. Release iclog for potential flush to on-disk log.
   2453 *
   2454 * ERRORS:
   2455 * 1.	Panic if reservation is overrun.  This should never happen since
   2456 *	reservation amounts are generated internal to the filesystem.
   2457 * NOTES:
   2458 * 1. Tickets are single threaded data structures.
   2459 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
   2460 *	syncing routine.  When a single log_write region needs to span
   2461 *	multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
   2462 *	on all log operation writes which don't contain the end of the
   2463 *	region.  The XLOG_END_TRANS bit is used for the in-core log
   2464 *	operation which contains the end of the continued log_write region.
   2465 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
   2466 *	we don't really know exactly how much space will be used.  As a result,
   2467 *	we don't update ic_offset until the end when we know exactly how many
   2468 *	bytes have been written out.
   2469 */
   2470int
   2471xlog_write(
   2472	struct xlog		*log,
   2473	struct xfs_cil_ctx	*ctx,
   2474	struct xfs_log_vec	*log_vector,
   2475	struct xlog_ticket	*ticket,
   2476	uint32_t		len)
   2477
   2478{
   2479	struct xlog_in_core	*iclog = NULL;
   2480	struct xfs_log_vec	*lv = log_vector;
   2481	uint32_t		record_cnt = 0;
   2482	uint32_t		data_cnt = 0;
   2483	int			error = 0;
   2484	int			log_offset;
   2485
   2486	if (ticket->t_curr_res < 0) {
   2487		xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
   2488		     "ctx ticket reservation ran out. Need to up reservation");
   2489		xlog_print_tic_res(log->l_mp, ticket);
   2490		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
   2491	}
   2492
   2493	error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
   2494					   &log_offset);
   2495	if (error)
   2496		return error;
   2497
   2498	ASSERT(log_offset <= iclog->ic_size - 1);
   2499
   2500	/*
   2501	 * If we have a context pointer, pass it the first iclog we are
   2502	 * writing to so it can record state needed for iclog write
   2503	 * ordering.
   2504	 */
   2505	if (ctx)
   2506		xlog_cil_set_ctx_write_state(ctx, iclog);
   2507
   2508	while (lv) {
   2509		/*
   2510		 * If the entire log vec does not fit in the iclog, punt it to
   2511		 * the partial copy loop which can handle this case.
   2512		 */
   2513		if (lv->lv_niovecs &&
   2514		    lv->lv_bytes > iclog->ic_size - log_offset) {
   2515			error = xlog_write_partial(lv, ticket, &iclog,
   2516					&log_offset, &len, &record_cnt,
   2517					&data_cnt);
   2518			if (error) {
   2519				/*
   2520				 * We have no iclog to release, so just return
   2521				 * the error immediately.
   2522				 */
   2523				return error;
   2524			}
   2525		} else {
   2526			xlog_write_full(lv, ticket, iclog, &log_offset,
   2527					 &len, &record_cnt, &data_cnt);
   2528		}
   2529		lv = lv->lv_next;
   2530	}
   2531	ASSERT(len == 0);
   2532
   2533	/*
   2534	 * We've already been guaranteed that the last writes will fit inside
   2535	 * the current iclog, and hence it will already have the space used by
   2536	 * those writes accounted to it. Hence we do not need to update the
   2537	 * iclog with the number of bytes written here.
   2538	 */
   2539	spin_lock(&log->l_icloglock);
   2540	xlog_state_finish_copy(log, iclog, record_cnt, 0);
   2541	error = xlog_state_release_iclog(log, iclog);
   2542	spin_unlock(&log->l_icloglock);
   2543
   2544	return error;
   2545}
   2546
   2547static void
   2548xlog_state_activate_iclog(
   2549	struct xlog_in_core	*iclog,
   2550	int			*iclogs_changed)
   2551{
   2552	ASSERT(list_empty_careful(&iclog->ic_callbacks));
   2553	trace_xlog_iclog_activate(iclog, _RET_IP_);
   2554
   2555	/*
   2556	 * If the number of ops in this iclog indicate it just contains the
   2557	 * dummy transaction, we can change state into IDLE (the second time
   2558	 * around). Otherwise we should change the state into NEED a dummy.
   2559	 * We don't need to cover the dummy.
   2560	 */
   2561	if (*iclogs_changed == 0 &&
   2562	    iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
   2563		*iclogs_changed = 1;
   2564	} else {
   2565		/*
   2566		 * We have two dirty iclogs so start over.  This could also be
   2567		 * num of ops indicating this is not the dummy going out.
   2568		 */
   2569		*iclogs_changed = 2;
   2570	}
   2571
   2572	iclog->ic_state	= XLOG_STATE_ACTIVE;
   2573	iclog->ic_offset = 0;
   2574	iclog->ic_header.h_num_logops = 0;
   2575	memset(iclog->ic_header.h_cycle_data, 0,
   2576		sizeof(iclog->ic_header.h_cycle_data));
   2577	iclog->ic_header.h_lsn = 0;
   2578	iclog->ic_header.h_tail_lsn = 0;
   2579}
   2580
   2581/*
   2582 * Loop through all iclogs and mark all iclogs currently marked DIRTY as
   2583 * ACTIVE after iclog I/O has completed.
   2584 */
   2585static void
   2586xlog_state_activate_iclogs(
   2587	struct xlog		*log,
   2588	int			*iclogs_changed)
   2589{
   2590	struct xlog_in_core	*iclog = log->l_iclog;
   2591
   2592	do {
   2593		if (iclog->ic_state == XLOG_STATE_DIRTY)
   2594			xlog_state_activate_iclog(iclog, iclogs_changed);
   2595		/*
   2596		 * The ordering of marking iclogs ACTIVE must be maintained, so
   2597		 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
   2598		 */
   2599		else if (iclog->ic_state != XLOG_STATE_ACTIVE)
   2600			break;
   2601	} while ((iclog = iclog->ic_next) != log->l_iclog);
   2602}
   2603
   2604static int
   2605xlog_covered_state(
   2606	int			prev_state,
   2607	int			iclogs_changed)
   2608{
   2609	/*
   2610	 * We go to NEED for any non-covering writes. We go to NEED2 if we just
   2611	 * wrote the first covering record (DONE). We go to IDLE if we just
   2612	 * wrote the second covering record (DONE2) and remain in IDLE until a
   2613	 * non-covering write occurs.
   2614	 */
   2615	switch (prev_state) {
   2616	case XLOG_STATE_COVER_IDLE:
   2617		if (iclogs_changed == 1)
   2618			return XLOG_STATE_COVER_IDLE;
   2619		fallthrough;
   2620	case XLOG_STATE_COVER_NEED:
   2621	case XLOG_STATE_COVER_NEED2:
   2622		break;
   2623	case XLOG_STATE_COVER_DONE:
   2624		if (iclogs_changed == 1)
   2625			return XLOG_STATE_COVER_NEED2;
   2626		break;
   2627	case XLOG_STATE_COVER_DONE2:
   2628		if (iclogs_changed == 1)
   2629			return XLOG_STATE_COVER_IDLE;
   2630		break;
   2631	default:
   2632		ASSERT(0);
   2633	}
   2634
   2635	return XLOG_STATE_COVER_NEED;
   2636}
   2637
   2638STATIC void
   2639xlog_state_clean_iclog(
   2640	struct xlog		*log,
   2641	struct xlog_in_core	*dirty_iclog)
   2642{
   2643	int			iclogs_changed = 0;
   2644
   2645	trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
   2646
   2647	dirty_iclog->ic_state = XLOG_STATE_DIRTY;
   2648
   2649	xlog_state_activate_iclogs(log, &iclogs_changed);
   2650	wake_up_all(&dirty_iclog->ic_force_wait);
   2651
   2652	if (iclogs_changed) {
   2653		log->l_covered_state = xlog_covered_state(log->l_covered_state,
   2654				iclogs_changed);
   2655	}
   2656}
   2657
   2658STATIC xfs_lsn_t
   2659xlog_get_lowest_lsn(
   2660	struct xlog		*log)
   2661{
   2662	struct xlog_in_core	*iclog = log->l_iclog;
   2663	xfs_lsn_t		lowest_lsn = 0, lsn;
   2664
   2665	do {
   2666		if (iclog->ic_state == XLOG_STATE_ACTIVE ||
   2667		    iclog->ic_state == XLOG_STATE_DIRTY)
   2668			continue;
   2669
   2670		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
   2671		if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
   2672			lowest_lsn = lsn;
   2673	} while ((iclog = iclog->ic_next) != log->l_iclog);
   2674
   2675	return lowest_lsn;
   2676}
   2677
   2678/*
   2679 * Completion of a iclog IO does not imply that a transaction has completed, as
   2680 * transactions can be large enough to span many iclogs. We cannot change the
   2681 * tail of the log half way through a transaction as this may be the only
   2682 * transaction in the log and moving the tail to point to the middle of it
   2683 * will prevent recovery from finding the start of the transaction. Hence we
   2684 * should only update the last_sync_lsn if this iclog contains transaction
   2685 * completion callbacks on it.
   2686 *
   2687 * We have to do this before we drop the icloglock to ensure we are the only one
   2688 * that can update it.
   2689 *
   2690 * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
   2691 * the reservation grant head pushing. This is due to the fact that the push
   2692 * target is bound by the current last_sync_lsn value. Hence if we have a large
   2693 * amount of log space bound up in this committing transaction then the
   2694 * last_sync_lsn value may be the limiting factor preventing tail pushing from
   2695 * freeing space in the log. Hence once we've updated the last_sync_lsn we
   2696 * should push the AIL to ensure the push target (and hence the grant head) is
   2697 * no longer bound by the old log head location and can move forwards and make
   2698 * progress again.
   2699 */
   2700static void
   2701xlog_state_set_callback(
   2702	struct xlog		*log,
   2703	struct xlog_in_core	*iclog,
   2704	xfs_lsn_t		header_lsn)
   2705{
   2706	trace_xlog_iclog_callback(iclog, _RET_IP_);
   2707	iclog->ic_state = XLOG_STATE_CALLBACK;
   2708
   2709	ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
   2710			   header_lsn) <= 0);
   2711
   2712	if (list_empty_careful(&iclog->ic_callbacks))
   2713		return;
   2714
   2715	atomic64_set(&log->l_last_sync_lsn, header_lsn);
   2716	xlog_grant_push_ail(log, 0);
   2717}
   2718
   2719/*
   2720 * Return true if we need to stop processing, false to continue to the next
   2721 * iclog. The caller will need to run callbacks if the iclog is returned in the
   2722 * XLOG_STATE_CALLBACK state.
   2723 */
   2724static bool
   2725xlog_state_iodone_process_iclog(
   2726	struct xlog		*log,
   2727	struct xlog_in_core	*iclog)
   2728{
   2729	xfs_lsn_t		lowest_lsn;
   2730	xfs_lsn_t		header_lsn;
   2731
   2732	switch (iclog->ic_state) {
   2733	case XLOG_STATE_ACTIVE:
   2734	case XLOG_STATE_DIRTY:
   2735		/*
   2736		 * Skip all iclogs in the ACTIVE & DIRTY states:
   2737		 */
   2738		return false;
   2739	case XLOG_STATE_DONE_SYNC:
   2740		/*
   2741		 * Now that we have an iclog that is in the DONE_SYNC state, do
   2742		 * one more check here to see if we have chased our tail around.
   2743		 * If this is not the lowest lsn iclog, then we will leave it
   2744		 * for another completion to process.
   2745		 */
   2746		header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
   2747		lowest_lsn = xlog_get_lowest_lsn(log);
   2748		if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
   2749			return false;
   2750		xlog_state_set_callback(log, iclog, header_lsn);
   2751		return false;
   2752	default:
   2753		/*
   2754		 * Can only perform callbacks in order.  Since this iclog is not
   2755		 * in the DONE_SYNC state, we skip the rest and just try to
   2756		 * clean up.
   2757		 */
   2758		return true;
   2759	}
   2760}
   2761
   2762/*
   2763 * Loop over all the iclogs, running attached callbacks on them. Return true if
   2764 * we ran any callbacks, indicating that we dropped the icloglock. We don't need
   2765 * to handle transient shutdown state here at all because
   2766 * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
   2767 * cleanup of the callbacks.
   2768 */
   2769static bool
   2770xlog_state_do_iclog_callbacks(
   2771	struct xlog		*log)
   2772		__releases(&log->l_icloglock)
   2773		__acquires(&log->l_icloglock)
   2774{
   2775	struct xlog_in_core	*first_iclog = log->l_iclog;
   2776	struct xlog_in_core	*iclog = first_iclog;
   2777	bool			ran_callback = false;
   2778
   2779	do {
   2780		LIST_HEAD(cb_list);
   2781
   2782		if (xlog_state_iodone_process_iclog(log, iclog))
   2783			break;
   2784		if (iclog->ic_state != XLOG_STATE_CALLBACK) {
   2785			iclog = iclog->ic_next;
   2786			continue;
   2787		}
   2788		list_splice_init(&iclog->ic_callbacks, &cb_list);
   2789		spin_unlock(&log->l_icloglock);
   2790
   2791		trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
   2792		xlog_cil_process_committed(&cb_list);
   2793		trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
   2794		ran_callback = true;
   2795
   2796		spin_lock(&log->l_icloglock);
   2797		xlog_state_clean_iclog(log, iclog);
   2798		iclog = iclog->ic_next;
   2799	} while (iclog != first_iclog);
   2800
   2801	return ran_callback;
   2802}
   2803
   2804
   2805/*
   2806 * Loop running iclog completion callbacks until there are no more iclogs in a
   2807 * state that can run callbacks.
   2808 */
   2809STATIC void
   2810xlog_state_do_callback(
   2811	struct xlog		*log)
   2812{
   2813	int			flushcnt = 0;
   2814	int			repeats = 0;
   2815
   2816	spin_lock(&log->l_icloglock);
   2817	while (xlog_state_do_iclog_callbacks(log)) {
   2818		if (xlog_is_shutdown(log))
   2819			break;
   2820
   2821		if (++repeats > 5000) {
   2822			flushcnt += repeats;
   2823			repeats = 0;
   2824			xfs_warn(log->l_mp,
   2825				"%s: possible infinite loop (%d iterations)",
   2826				__func__, flushcnt);
   2827		}
   2828	}
   2829
   2830	if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
   2831		wake_up_all(&log->l_flush_wait);
   2832
   2833	spin_unlock(&log->l_icloglock);
   2834}
   2835
   2836
   2837/*
   2838 * Finish transitioning this iclog to the dirty state.
   2839 *
   2840 * Callbacks could take time, so they are done outside the scope of the
   2841 * global state machine log lock.
   2842 */
   2843STATIC void
   2844xlog_state_done_syncing(
   2845	struct xlog_in_core	*iclog)
   2846{
   2847	struct xlog		*log = iclog->ic_log;
   2848
   2849	spin_lock(&log->l_icloglock);
   2850	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
   2851	trace_xlog_iclog_sync_done(iclog, _RET_IP_);
   2852
   2853	/*
   2854	 * If we got an error, either on the first buffer, or in the case of
   2855	 * split log writes, on the second, we shut down the file system and
   2856	 * no iclogs should ever be attempted to be written to disk again.
   2857	 */
   2858	if (!xlog_is_shutdown(log)) {
   2859		ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
   2860		iclog->ic_state = XLOG_STATE_DONE_SYNC;
   2861	}
   2862
   2863	/*
   2864	 * Someone could be sleeping prior to writing out the next
   2865	 * iclog buffer, we wake them all, one will get to do the
   2866	 * I/O, the others get to wait for the result.
   2867	 */
   2868	wake_up_all(&iclog->ic_write_wait);
   2869	spin_unlock(&log->l_icloglock);
   2870	xlog_state_do_callback(log);
   2871}
   2872
   2873/*
   2874 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
   2875 * sleep.  We wait on the flush queue on the head iclog as that should be
   2876 * the first iclog to complete flushing. Hence if all iclogs are syncing,
   2877 * we will wait here and all new writes will sleep until a sync completes.
   2878 *
   2879 * The in-core logs are used in a circular fashion. They are not used
   2880 * out-of-order even when an iclog past the head is free.
   2881 *
   2882 * return:
   2883 *	* log_offset where xlog_write() can start writing into the in-core
   2884 *		log's data space.
   2885 *	* in-core log pointer to which xlog_write() should write.
   2886 *	* boolean indicating this is a continued write to an in-core log.
   2887 *		If this is the last write, then the in-core log's offset field
   2888 *		needs to be incremented, depending on the amount of data which
   2889 *		is copied.
   2890 */
   2891STATIC int
   2892xlog_state_get_iclog_space(
   2893	struct xlog		*log,
   2894	int			len,
   2895	struct xlog_in_core	**iclogp,
   2896	struct xlog_ticket	*ticket,
   2897	int			*logoffsetp)
   2898{
   2899	int		  log_offset;
   2900	xlog_rec_header_t *head;
   2901	xlog_in_core_t	  *iclog;
   2902
   2903restart:
   2904	spin_lock(&log->l_icloglock);
   2905	if (xlog_is_shutdown(log)) {
   2906		spin_unlock(&log->l_icloglock);
   2907		return -EIO;
   2908	}
   2909
   2910	iclog = log->l_iclog;
   2911	if (iclog->ic_state != XLOG_STATE_ACTIVE) {
   2912		XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
   2913
   2914		/* Wait for log writes to have flushed */
   2915		xlog_wait(&log->l_flush_wait, &log->l_icloglock);
   2916		goto restart;
   2917	}
   2918
   2919	head = &iclog->ic_header;
   2920
   2921	atomic_inc(&iclog->ic_refcnt);	/* prevents sync */
   2922	log_offset = iclog->ic_offset;
   2923
   2924	trace_xlog_iclog_get_space(iclog, _RET_IP_);
   2925
   2926	/* On the 1st write to an iclog, figure out lsn.  This works
   2927	 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
   2928	 * committing to.  If the offset is set, that's how many blocks
   2929	 * must be written.
   2930	 */
   2931	if (log_offset == 0) {
   2932		ticket->t_curr_res -= log->l_iclog_hsize;
   2933		head->h_cycle = cpu_to_be32(log->l_curr_cycle);
   2934		head->h_lsn = cpu_to_be64(
   2935			xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
   2936		ASSERT(log->l_curr_block >= 0);
   2937	}
   2938
   2939	/* If there is enough room to write everything, then do it.  Otherwise,
   2940	 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
   2941	 * bit is on, so this will get flushed out.  Don't update ic_offset
   2942	 * until you know exactly how many bytes get copied.  Therefore, wait
   2943	 * until later to update ic_offset.
   2944	 *
   2945	 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
   2946	 * can fit into remaining data section.
   2947	 */
   2948	if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
   2949		int		error = 0;
   2950
   2951		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
   2952
   2953		/*
   2954		 * If we are the only one writing to this iclog, sync it to
   2955		 * disk.  We need to do an atomic compare and decrement here to
   2956		 * avoid racing with concurrent atomic_dec_and_lock() calls in
   2957		 * xlog_state_release_iclog() when there is more than one
   2958		 * reference to the iclog.
   2959		 */
   2960		if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
   2961			error = xlog_state_release_iclog(log, iclog);
   2962		spin_unlock(&log->l_icloglock);
   2963		if (error)
   2964			return error;
   2965		goto restart;
   2966	}
   2967
   2968	/* Do we have enough room to write the full amount in the remainder
   2969	 * of this iclog?  Or must we continue a write on the next iclog and
   2970	 * mark this iclog as completely taken?  In the case where we switch
   2971	 * iclogs (to mark it taken), this particular iclog will release/sync
   2972	 * to disk in xlog_write().
   2973	 */
   2974	if (len <= iclog->ic_size - iclog->ic_offset)
   2975		iclog->ic_offset += len;
   2976	else
   2977		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
   2978	*iclogp = iclog;
   2979
   2980	ASSERT(iclog->ic_offset <= iclog->ic_size);
   2981	spin_unlock(&log->l_icloglock);
   2982
   2983	*logoffsetp = log_offset;
   2984	return 0;
   2985}
   2986
   2987/*
   2988 * The first cnt-1 times a ticket goes through here we don't need to move the
   2989 * grant write head because the permanent reservation has reserved cnt times the
   2990 * unit amount.  Release part of current permanent unit reservation and reset
   2991 * current reservation to be one units worth.  Also move grant reservation head
   2992 * forward.
   2993 */
   2994void
   2995xfs_log_ticket_regrant(
   2996	struct xlog		*log,
   2997	struct xlog_ticket	*ticket)
   2998{
   2999	trace_xfs_log_ticket_regrant(log, ticket);
   3000
   3001	if (ticket->t_cnt > 0)
   3002		ticket->t_cnt--;
   3003
   3004	xlog_grant_sub_space(log, &log->l_reserve_head.grant,
   3005					ticket->t_curr_res);
   3006	xlog_grant_sub_space(log, &log->l_write_head.grant,
   3007					ticket->t_curr_res);
   3008	ticket->t_curr_res = ticket->t_unit_res;
   3009
   3010	trace_xfs_log_ticket_regrant_sub(log, ticket);
   3011
   3012	/* just return if we still have some of the pre-reserved space */
   3013	if (!ticket->t_cnt) {
   3014		xlog_grant_add_space(log, &log->l_reserve_head.grant,
   3015				     ticket->t_unit_res);
   3016		trace_xfs_log_ticket_regrant_exit(log, ticket);
   3017
   3018		ticket->t_curr_res = ticket->t_unit_res;
   3019	}
   3020
   3021	xfs_log_ticket_put(ticket);
   3022}
   3023
   3024/*
   3025 * Give back the space left from a reservation.
   3026 *
   3027 * All the information we need to make a correct determination of space left
   3028 * is present.  For non-permanent reservations, things are quite easy.  The
   3029 * count should have been decremented to zero.  We only need to deal with the
   3030 * space remaining in the current reservation part of the ticket.  If the
   3031 * ticket contains a permanent reservation, there may be left over space which
   3032 * needs to be released.  A count of N means that N-1 refills of the current
   3033 * reservation can be done before we need to ask for more space.  The first
   3034 * one goes to fill up the first current reservation.  Once we run out of
   3035 * space, the count will stay at zero and the only space remaining will be
   3036 * in the current reservation field.
   3037 */
   3038void
   3039xfs_log_ticket_ungrant(
   3040	struct xlog		*log,
   3041	struct xlog_ticket	*ticket)
   3042{
   3043	int			bytes;
   3044
   3045	trace_xfs_log_ticket_ungrant(log, ticket);
   3046
   3047	if (ticket->t_cnt > 0)
   3048		ticket->t_cnt--;
   3049
   3050	trace_xfs_log_ticket_ungrant_sub(log, ticket);
   3051
   3052	/*
   3053	 * If this is a permanent reservation ticket, we may be able to free
   3054	 * up more space based on the remaining count.
   3055	 */
   3056	bytes = ticket->t_curr_res;
   3057	if (ticket->t_cnt > 0) {
   3058		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
   3059		bytes += ticket->t_unit_res*ticket->t_cnt;
   3060	}
   3061
   3062	xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
   3063	xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
   3064
   3065	trace_xfs_log_ticket_ungrant_exit(log, ticket);
   3066
   3067	xfs_log_space_wake(log->l_mp);
   3068	xfs_log_ticket_put(ticket);
   3069}
   3070
   3071/*
   3072 * This routine will mark the current iclog in the ring as WANT_SYNC and move
   3073 * the current iclog pointer to the next iclog in the ring.
   3074 */
   3075void
   3076xlog_state_switch_iclogs(
   3077	struct xlog		*log,
   3078	struct xlog_in_core	*iclog,
   3079	int			eventual_size)
   3080{
   3081	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
   3082	assert_spin_locked(&log->l_icloglock);
   3083	trace_xlog_iclog_switch(iclog, _RET_IP_);
   3084
   3085	if (!eventual_size)
   3086		eventual_size = iclog->ic_offset;
   3087	iclog->ic_state = XLOG_STATE_WANT_SYNC;
   3088	iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
   3089	log->l_prev_block = log->l_curr_block;
   3090	log->l_prev_cycle = log->l_curr_cycle;
   3091
   3092	/* roll log?: ic_offset changed later */
   3093	log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
   3094
   3095	/* Round up to next log-sunit */
   3096	if (log->l_iclog_roundoff > BBSIZE) {
   3097		uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
   3098		log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
   3099	}
   3100
   3101	if (log->l_curr_block >= log->l_logBBsize) {
   3102		/*
   3103		 * Rewind the current block before the cycle is bumped to make
   3104		 * sure that the combined LSN never transiently moves forward
   3105		 * when the log wraps to the next cycle. This is to support the
   3106		 * unlocked sample of these fields from xlog_valid_lsn(). Most
   3107		 * other cases should acquire l_icloglock.
   3108		 */
   3109		log->l_curr_block -= log->l_logBBsize;
   3110		ASSERT(log->l_curr_block >= 0);
   3111		smp_wmb();
   3112		log->l_curr_cycle++;
   3113		if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
   3114			log->l_curr_cycle++;
   3115	}
   3116	ASSERT(iclog == log->l_iclog);
   3117	log->l_iclog = iclog->ic_next;
   3118}
   3119
   3120/*
   3121 * Force the iclog to disk and check if the iclog has been completed before
   3122 * xlog_force_iclog() returns. This can happen on synchronous (e.g.
   3123 * pmem) or fast async storage because we drop the icloglock to issue the IO.
   3124 * If completion has already occurred, tell the caller so that it can avoid an
   3125 * unnecessary wait on the iclog.
   3126 */
   3127static int
   3128xlog_force_and_check_iclog(
   3129	struct xlog_in_core	*iclog,
   3130	bool			*completed)
   3131{
   3132	xfs_lsn_t		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
   3133	int			error;
   3134
   3135	*completed = false;
   3136	error = xlog_force_iclog(iclog);
   3137	if (error)
   3138		return error;
   3139
   3140	/*
   3141	 * If the iclog has already been completed and reused the header LSN
   3142	 * will have been rewritten by completion
   3143	 */
   3144	if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
   3145		*completed = true;
   3146	return 0;
   3147}
   3148
   3149/*
   3150 * Write out all data in the in-core log as of this exact moment in time.
   3151 *
   3152 * Data may be written to the in-core log during this call.  However,
   3153 * we don't guarantee this data will be written out.  A change from past
   3154 * implementation means this routine will *not* write out zero length LRs.
   3155 *
   3156 * Basically, we try and perform an intelligent scan of the in-core logs.
   3157 * If we determine there is no flushable data, we just return.  There is no
   3158 * flushable data if:
   3159 *
   3160 *	1. the current iclog is active and has no data; the previous iclog
   3161 *		is in the active or dirty state.
   3162 *	2. the current iclog is drity, and the previous iclog is in the
   3163 *		active or dirty state.
   3164 *
   3165 * We may sleep if:
   3166 *
   3167 *	1. the current iclog is not in the active nor dirty state.
   3168 *	2. the current iclog dirty, and the previous iclog is not in the
   3169 *		active nor dirty state.
   3170 *	3. the current iclog is active, and there is another thread writing
   3171 *		to this particular iclog.
   3172 *	4. a) the current iclog is active and has no other writers
   3173 *	   b) when we return from flushing out this iclog, it is still
   3174 *		not in the active nor dirty state.
   3175 */
   3176int
   3177xfs_log_force(
   3178	struct xfs_mount	*mp,
   3179	uint			flags)
   3180{
   3181	struct xlog		*log = mp->m_log;
   3182	struct xlog_in_core	*iclog;
   3183
   3184	XFS_STATS_INC(mp, xs_log_force);
   3185	trace_xfs_log_force(mp, 0, _RET_IP_);
   3186
   3187	xlog_cil_force(log);
   3188
   3189	spin_lock(&log->l_icloglock);
   3190	if (xlog_is_shutdown(log))
   3191		goto out_error;
   3192
   3193	iclog = log->l_iclog;
   3194	trace_xlog_iclog_force(iclog, _RET_IP_);
   3195
   3196	if (iclog->ic_state == XLOG_STATE_DIRTY ||
   3197	    (iclog->ic_state == XLOG_STATE_ACTIVE &&
   3198	     atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
   3199		/*
   3200		 * If the head is dirty or (active and empty), then we need to
   3201		 * look at the previous iclog.
   3202		 *
   3203		 * If the previous iclog is active or dirty we are done.  There
   3204		 * is nothing to sync out. Otherwise, we attach ourselves to the
   3205		 * previous iclog and go to sleep.
   3206		 */
   3207		iclog = iclog->ic_prev;
   3208	} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
   3209		if (atomic_read(&iclog->ic_refcnt) == 0) {
   3210			/* We have exclusive access to this iclog. */
   3211			bool	completed;
   3212
   3213			if (xlog_force_and_check_iclog(iclog, &completed))
   3214				goto out_error;
   3215
   3216			if (completed)
   3217				goto out_unlock;
   3218		} else {
   3219			/*
   3220			 * Someone else is still writing to this iclog, so we
   3221			 * need to ensure that when they release the iclog it
   3222			 * gets synced immediately as we may be waiting on it.
   3223			 */
   3224			xlog_state_switch_iclogs(log, iclog, 0);
   3225		}
   3226	}
   3227
   3228	/*
   3229	 * The iclog we are about to wait on may contain the checkpoint pushed
   3230	 * by the above xlog_cil_force() call, but it may not have been pushed
   3231	 * to disk yet. Like the ACTIVE case above, we need to make sure caches
   3232	 * are flushed when this iclog is written.
   3233	 */
   3234	if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
   3235		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
   3236
   3237	if (flags & XFS_LOG_SYNC)
   3238		return xlog_wait_on_iclog(iclog);
   3239out_unlock:
   3240	spin_unlock(&log->l_icloglock);
   3241	return 0;
   3242out_error:
   3243	spin_unlock(&log->l_icloglock);
   3244	return -EIO;
   3245}
   3246
   3247/*
   3248 * Force the log to a specific LSN.
   3249 *
   3250 * If an iclog with that lsn can be found:
   3251 *	If it is in the DIRTY state, just return.
   3252 *	If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
   3253 *		state and go to sleep or return.
   3254 *	If it is in any other state, go to sleep or return.
   3255 *
   3256 * Synchronous forces are implemented with a wait queue.  All callers trying
   3257 * to force a given lsn to disk must wait on the queue attached to the
   3258 * specific in-core log.  When given in-core log finally completes its write
   3259 * to disk, that thread will wake up all threads waiting on the queue.
   3260 */
   3261static int
   3262xlog_force_lsn(
   3263	struct xlog		*log,
   3264	xfs_lsn_t		lsn,
   3265	uint			flags,
   3266	int			*log_flushed,
   3267	bool			already_slept)
   3268{
   3269	struct xlog_in_core	*iclog;
   3270	bool			completed;
   3271
   3272	spin_lock(&log->l_icloglock);
   3273	if (xlog_is_shutdown(log))
   3274		goto out_error;
   3275
   3276	iclog = log->l_iclog;
   3277	while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
   3278		trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
   3279		iclog = iclog->ic_next;
   3280		if (iclog == log->l_iclog)
   3281			goto out_unlock;
   3282	}
   3283
   3284	switch (iclog->ic_state) {
   3285	case XLOG_STATE_ACTIVE:
   3286		/*
   3287		 * We sleep here if we haven't already slept (e.g. this is the
   3288		 * first time we've looked at the correct iclog buf) and the
   3289		 * buffer before us is going to be sync'ed.  The reason for this
   3290		 * is that if we are doing sync transactions here, by waiting
   3291		 * for the previous I/O to complete, we can allow a few more
   3292		 * transactions into this iclog before we close it down.
   3293		 *
   3294		 * Otherwise, we mark the buffer WANT_SYNC, and bump up the
   3295		 * refcnt so we can release the log (which drops the ref count).
   3296		 * The state switch keeps new transaction commits from using
   3297		 * this buffer.  When the current commits finish writing into
   3298		 * the buffer, the refcount will drop to zero and the buffer
   3299		 * will go out then.
   3300		 */
   3301		if (!already_slept &&
   3302		    (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
   3303		     iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
   3304			xlog_wait(&iclog->ic_prev->ic_write_wait,
   3305					&log->l_icloglock);
   3306			return -EAGAIN;
   3307		}
   3308		if (xlog_force_and_check_iclog(iclog, &completed))
   3309			goto out_error;
   3310		if (log_flushed)
   3311			*log_flushed = 1;
   3312		if (completed)
   3313			goto out_unlock;
   3314		break;
   3315	case XLOG_STATE_WANT_SYNC:
   3316		/*
   3317		 * This iclog may contain the checkpoint pushed by the
   3318		 * xlog_cil_force_seq() call, but there are other writers still
   3319		 * accessing it so it hasn't been pushed to disk yet. Like the
   3320		 * ACTIVE case above, we need to make sure caches are flushed
   3321		 * when this iclog is written.
   3322		 */
   3323		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
   3324		break;
   3325	default:
   3326		/*
   3327		 * The entire checkpoint was written by the CIL force and is on
   3328		 * its way to disk already. It will be stable when it
   3329		 * completes, so we don't need to manipulate caches here at all.
   3330		 * We just need to wait for completion if necessary.
   3331		 */
   3332		break;
   3333	}
   3334
   3335	if (flags & XFS_LOG_SYNC)
   3336		return xlog_wait_on_iclog(iclog);
   3337out_unlock:
   3338	spin_unlock(&log->l_icloglock);
   3339	return 0;
   3340out_error:
   3341	spin_unlock(&log->l_icloglock);
   3342	return -EIO;
   3343}
   3344
   3345/*
   3346 * Force the log to a specific checkpoint sequence.
   3347 *
   3348 * First force the CIL so that all the required changes have been flushed to the
   3349 * iclogs. If the CIL force completed it will return a commit LSN that indicates
   3350 * the iclog that needs to be flushed to stable storage. If the caller needs
   3351 * a synchronous log force, we will wait on the iclog with the LSN returned by
   3352 * xlog_cil_force_seq() to be completed.
   3353 */
   3354int
   3355xfs_log_force_seq(
   3356	struct xfs_mount	*mp,
   3357	xfs_csn_t		seq,
   3358	uint			flags,
   3359	int			*log_flushed)
   3360{
   3361	struct xlog		*log = mp->m_log;
   3362	xfs_lsn_t		lsn;
   3363	int			ret;
   3364	ASSERT(seq != 0);
   3365
   3366	XFS_STATS_INC(mp, xs_log_force);
   3367	trace_xfs_log_force(mp, seq, _RET_IP_);
   3368
   3369	lsn = xlog_cil_force_seq(log, seq);
   3370	if (lsn == NULLCOMMITLSN)
   3371		return 0;
   3372
   3373	ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
   3374	if (ret == -EAGAIN) {
   3375		XFS_STATS_INC(mp, xs_log_force_sleep);
   3376		ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
   3377	}
   3378	return ret;
   3379}
   3380
   3381/*
   3382 * Free a used ticket when its refcount falls to zero.
   3383 */
   3384void
   3385xfs_log_ticket_put(
   3386	xlog_ticket_t	*ticket)
   3387{
   3388	ASSERT(atomic_read(&ticket->t_ref) > 0);
   3389	if (atomic_dec_and_test(&ticket->t_ref))
   3390		kmem_cache_free(xfs_log_ticket_cache, ticket);
   3391}
   3392
   3393xlog_ticket_t *
   3394xfs_log_ticket_get(
   3395	xlog_ticket_t	*ticket)
   3396{
   3397	ASSERT(atomic_read(&ticket->t_ref) > 0);
   3398	atomic_inc(&ticket->t_ref);
   3399	return ticket;
   3400}
   3401
   3402/*
   3403 * Figure out the total log space unit (in bytes) that would be
   3404 * required for a log ticket.
   3405 */
   3406static int
   3407xlog_calc_unit_res(
   3408	struct xlog		*log,
   3409	int			unit_bytes)
   3410{
   3411	int			iclog_space;
   3412	uint			num_headers;
   3413
   3414	/*
   3415	 * Permanent reservations have up to 'cnt'-1 active log operations
   3416	 * in the log.  A unit in this case is the amount of space for one
   3417	 * of these log operations.  Normal reservations have a cnt of 1
   3418	 * and their unit amount is the total amount of space required.
   3419	 *
   3420	 * The following lines of code account for non-transaction data
   3421	 * which occupy space in the on-disk log.
   3422	 *
   3423	 * Normal form of a transaction is:
   3424	 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
   3425	 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
   3426	 *
   3427	 * We need to account for all the leadup data and trailer data
   3428	 * around the transaction data.
   3429	 * And then we need to account for the worst case in terms of using
   3430	 * more space.
   3431	 * The worst case will happen if:
   3432	 * - the placement of the transaction happens to be such that the
   3433	 *   roundoff is at its maximum
   3434	 * - the transaction data is synced before the commit record is synced
   3435	 *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
   3436	 *   Therefore the commit record is in its own Log Record.
   3437	 *   This can happen as the commit record is called with its
   3438	 *   own region to xlog_write().
   3439	 *   This then means that in the worst case, roundoff can happen for
   3440	 *   the commit-rec as well.
   3441	 *   The commit-rec is smaller than padding in this scenario and so it is
   3442	 *   not added separately.
   3443	 */
   3444
   3445	/* for trans header */
   3446	unit_bytes += sizeof(xlog_op_header_t);
   3447	unit_bytes += sizeof(xfs_trans_header_t);
   3448
   3449	/* for start-rec */
   3450	unit_bytes += sizeof(xlog_op_header_t);
   3451
   3452	/*
   3453	 * for LR headers - the space for data in an iclog is the size minus
   3454	 * the space used for the headers. If we use the iclog size, then we
   3455	 * undercalculate the number of headers required.
   3456	 *
   3457	 * Furthermore - the addition of op headers for split-recs might
   3458	 * increase the space required enough to require more log and op
   3459	 * headers, so take that into account too.
   3460	 *
   3461	 * IMPORTANT: This reservation makes the assumption that if this
   3462	 * transaction is the first in an iclog and hence has the LR headers
   3463	 * accounted to it, then the remaining space in the iclog is
   3464	 * exclusively for this transaction.  i.e. if the transaction is larger
   3465	 * than the iclog, it will be the only thing in that iclog.
   3466	 * Fundamentally, this means we must pass the entire log vector to
   3467	 * xlog_write to guarantee this.
   3468	 */
   3469	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
   3470	num_headers = howmany(unit_bytes, iclog_space);
   3471
   3472	/* for split-recs - ophdrs added when data split over LRs */
   3473	unit_bytes += sizeof(xlog_op_header_t) * num_headers;
   3474
   3475	/* add extra header reservations if we overrun */
   3476	while (!num_headers ||
   3477	       howmany(unit_bytes, iclog_space) > num_headers) {
   3478		unit_bytes += sizeof(xlog_op_header_t);
   3479		num_headers++;
   3480	}
   3481	unit_bytes += log->l_iclog_hsize * num_headers;
   3482
   3483	/* for commit-rec LR header - note: padding will subsume the ophdr */
   3484	unit_bytes += log->l_iclog_hsize;
   3485
   3486	/* roundoff padding for transaction data and one for commit record */
   3487	unit_bytes += 2 * log->l_iclog_roundoff;
   3488
   3489	return unit_bytes;
   3490}
   3491
   3492int
   3493xfs_log_calc_unit_res(
   3494	struct xfs_mount	*mp,
   3495	int			unit_bytes)
   3496{
   3497	return xlog_calc_unit_res(mp->m_log, unit_bytes);
   3498}
   3499
   3500/*
   3501 * Allocate and initialise a new log ticket.
   3502 */
   3503struct xlog_ticket *
   3504xlog_ticket_alloc(
   3505	struct xlog		*log,
   3506	int			unit_bytes,
   3507	int			cnt,
   3508	bool			permanent)
   3509{
   3510	struct xlog_ticket	*tic;
   3511	int			unit_res;
   3512
   3513	tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL);
   3514
   3515	unit_res = xlog_calc_unit_res(log, unit_bytes);
   3516
   3517	atomic_set(&tic->t_ref, 1);
   3518	tic->t_task		= current;
   3519	INIT_LIST_HEAD(&tic->t_queue);
   3520	tic->t_unit_res		= unit_res;
   3521	tic->t_curr_res		= unit_res;
   3522	tic->t_cnt		= cnt;
   3523	tic->t_ocnt		= cnt;
   3524	tic->t_tid		= prandom_u32();
   3525	if (permanent)
   3526		tic->t_flags |= XLOG_TIC_PERM_RESERV;
   3527
   3528	return tic;
   3529}
   3530
   3531#if defined(DEBUG)
   3532/*
   3533 * Check to make sure the grant write head didn't just over lap the tail.  If
   3534 * the cycles are the same, we can't be overlapping.  Otherwise, make sure that
   3535 * the cycles differ by exactly one and check the byte count.
   3536 *
   3537 * This check is run unlocked, so can give false positives. Rather than assert
   3538 * on failures, use a warn-once flag and a panic tag to allow the admin to
   3539 * determine if they want to panic the machine when such an error occurs. For
   3540 * debug kernels this will have the same effect as using an assert but, unlinke
   3541 * an assert, it can be turned off at runtime.
   3542 */
   3543STATIC void
   3544xlog_verify_grant_tail(
   3545	struct xlog	*log)
   3546{
   3547	int		tail_cycle, tail_blocks;
   3548	int		cycle, space;
   3549
   3550	xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
   3551	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
   3552	if (tail_cycle != cycle) {
   3553		if (cycle - 1 != tail_cycle &&
   3554		    !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
   3555			xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
   3556				"%s: cycle - 1 != tail_cycle", __func__);
   3557		}
   3558
   3559		if (space > BBTOB(tail_blocks) &&
   3560		    !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
   3561			xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
   3562				"%s: space > BBTOB(tail_blocks)", __func__);
   3563		}
   3564	}
   3565}
   3566
   3567/* check if it will fit */
   3568STATIC void
   3569xlog_verify_tail_lsn(
   3570	struct xlog		*log,
   3571	struct xlog_in_core	*iclog)
   3572{
   3573	xfs_lsn_t	tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
   3574	int		blocks;
   3575
   3576    if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
   3577	blocks =
   3578	    log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
   3579	if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
   3580		xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
   3581    } else {
   3582	ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
   3583
   3584	if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
   3585		xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
   3586
   3587	blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
   3588	if (blocks < BTOBB(iclog->ic_offset) + 1)
   3589		xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
   3590    }
   3591}
   3592
   3593/*
   3594 * Perform a number of checks on the iclog before writing to disk.
   3595 *
   3596 * 1. Make sure the iclogs are still circular
   3597 * 2. Make sure we have a good magic number
   3598 * 3. Make sure we don't have magic numbers in the data
   3599 * 4. Check fields of each log operation header for:
   3600 *	A. Valid client identifier
   3601 *	B. tid ptr value falls in valid ptr space (user space code)
   3602 *	C. Length in log record header is correct according to the
   3603 *		individual operation headers within record.
   3604 * 5. When a bwrite will occur within 5 blocks of the front of the physical
   3605 *	log, check the preceding blocks of the physical log to make sure all
   3606 *	the cycle numbers agree with the current cycle number.
   3607 */
   3608STATIC void
   3609xlog_verify_iclog(
   3610	struct xlog		*log,
   3611	struct xlog_in_core	*iclog,
   3612	int			count)
   3613{
   3614	xlog_op_header_t	*ophead;
   3615	xlog_in_core_t		*icptr;
   3616	xlog_in_core_2_t	*xhdr;
   3617	void			*base_ptr, *ptr, *p;
   3618	ptrdiff_t		field_offset;
   3619	uint8_t			clientid;
   3620	int			len, i, j, k, op_len;
   3621	int			idx;
   3622
   3623	/* check validity of iclog pointers */
   3624	spin_lock(&log->l_icloglock);
   3625	icptr = log->l_iclog;
   3626	for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
   3627		ASSERT(icptr);
   3628
   3629	if (icptr != log->l_iclog)
   3630		xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
   3631	spin_unlock(&log->l_icloglock);
   3632
   3633	/* check log magic numbers */
   3634	if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
   3635		xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
   3636
   3637	base_ptr = ptr = &iclog->ic_header;
   3638	p = &iclog->ic_header;
   3639	for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
   3640		if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
   3641			xfs_emerg(log->l_mp, "%s: unexpected magic num",
   3642				__func__);
   3643	}
   3644
   3645	/* check fields */
   3646	len = be32_to_cpu(iclog->ic_header.h_num_logops);
   3647	base_ptr = ptr = iclog->ic_datap;
   3648	ophead = ptr;
   3649	xhdr = iclog->ic_data;
   3650	for (i = 0; i < len; i++) {
   3651		ophead = ptr;
   3652
   3653		/* clientid is only 1 byte */
   3654		p = &ophead->oh_clientid;
   3655		field_offset = p - base_ptr;
   3656		if (field_offset & 0x1ff) {
   3657			clientid = ophead->oh_clientid;
   3658		} else {
   3659			idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
   3660			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
   3661				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
   3662				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
   3663				clientid = xlog_get_client_id(
   3664					xhdr[j].hic_xheader.xh_cycle_data[k]);
   3665			} else {
   3666				clientid = xlog_get_client_id(
   3667					iclog->ic_header.h_cycle_data[idx]);
   3668			}
   3669		}
   3670		if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) {
   3671			xfs_warn(log->l_mp,
   3672				"%s: op %d invalid clientid %d op "PTR_FMT" offset 0x%lx",
   3673				__func__, i, clientid, ophead,
   3674				(unsigned long)field_offset);
   3675		}
   3676
   3677		/* check length */
   3678		p = &ophead->oh_len;
   3679		field_offset = p - base_ptr;
   3680		if (field_offset & 0x1ff) {
   3681			op_len = be32_to_cpu(ophead->oh_len);
   3682		} else {
   3683			idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
   3684			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
   3685				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
   3686				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
   3687				op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
   3688			} else {
   3689				op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
   3690			}
   3691		}
   3692		ptr += sizeof(xlog_op_header_t) + op_len;
   3693	}
   3694}
   3695#endif
   3696
   3697/*
   3698 * Perform a forced shutdown on the log.
   3699 *
   3700 * This can be called from low level log code to trigger a shutdown, or from the
   3701 * high level mount shutdown code when the mount shuts down.
   3702 *
   3703 * Our main objectives here are to make sure that:
   3704 *	a. if the shutdown was not due to a log IO error, flush the logs to
   3705 *	   disk. Anything modified after this is ignored.
   3706 *	b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
   3707 *	   parties to find out. Nothing new gets queued after this is done.
   3708 *	c. Tasks sleeping on log reservations, pinned objects and
   3709 *	   other resources get woken up.
   3710 *	d. The mount is also marked as shut down so that log triggered shutdowns
   3711 *	   still behave the same as if they called xfs_forced_shutdown().
   3712 *
   3713 * Return true if the shutdown cause was a log IO error and we actually shut the
   3714 * log down.
   3715 */
   3716bool
   3717xlog_force_shutdown(
   3718	struct xlog	*log,
   3719	uint32_t	shutdown_flags)
   3720{
   3721	bool		log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
   3722
   3723	if (!log)
   3724		return false;
   3725
   3726	/*
   3727	 * Flush all the completed transactions to disk before marking the log
   3728	 * being shut down. We need to do this first as shutting down the log
   3729	 * before the force will prevent the log force from flushing the iclogs
   3730	 * to disk.
   3731	 *
   3732	 * When we are in recovery, there are no transactions to flush, and
   3733	 * we don't want to touch the log because we don't want to perturb the
   3734	 * current head/tail for future recovery attempts. Hence we need to
   3735	 * avoid a log force in this case.
   3736	 *
   3737	 * If we are shutting down due to a log IO error, then we must avoid
   3738	 * trying to write the log as that may just result in more IO errors and
   3739	 * an endless shutdown/force loop.
   3740	 */
   3741	if (!log_error && !xlog_in_recovery(log))
   3742		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
   3743
   3744	/*
   3745	 * Atomically set the shutdown state. If the shutdown state is already
   3746	 * set, there someone else is performing the shutdown and so we are done
   3747	 * here. This should never happen because we should only ever get called
   3748	 * once by the first shutdown caller.
   3749	 *
   3750	 * Much of the log state machine transitions assume that shutdown state
   3751	 * cannot change once they hold the log->l_icloglock. Hence we need to
   3752	 * hold that lock here, even though we use the atomic test_and_set_bit()
   3753	 * operation to set the shutdown state.
   3754	 */
   3755	spin_lock(&log->l_icloglock);
   3756	if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
   3757		spin_unlock(&log->l_icloglock);
   3758		return false;
   3759	}
   3760	spin_unlock(&log->l_icloglock);
   3761
   3762	/*
   3763	 * If this log shutdown also sets the mount shutdown state, issue a
   3764	 * shutdown warning message.
   3765	 */
   3766	if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
   3767		xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
   3768"Filesystem has been shut down due to log error (0x%x).",
   3769				shutdown_flags);
   3770		xfs_alert(log->l_mp,
   3771"Please unmount the filesystem and rectify the problem(s).");
   3772		if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
   3773			xfs_stack_trace();
   3774	}
   3775
   3776	/*
   3777	 * We don't want anybody waiting for log reservations after this. That
   3778	 * means we have to wake up everybody queued up on reserveq as well as
   3779	 * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
   3780	 * we don't enqueue anything once the SHUTDOWN flag is set, and this
   3781	 * action is protected by the grant locks.
   3782	 */
   3783	xlog_grant_head_wake_all(&log->l_reserve_head);
   3784	xlog_grant_head_wake_all(&log->l_write_head);
   3785
   3786	/*
   3787	 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
   3788	 * as if the log writes were completed. The abort handling in the log
   3789	 * item committed callback functions will do this again under lock to
   3790	 * avoid races.
   3791	 */
   3792	spin_lock(&log->l_cilp->xc_push_lock);
   3793	wake_up_all(&log->l_cilp->xc_start_wait);
   3794	wake_up_all(&log->l_cilp->xc_commit_wait);
   3795	spin_unlock(&log->l_cilp->xc_push_lock);
   3796
   3797	spin_lock(&log->l_icloglock);
   3798	xlog_state_shutdown_callbacks(log);
   3799	spin_unlock(&log->l_icloglock);
   3800
   3801	wake_up_var(&log->l_opstate);
   3802	return log_error;
   3803}
   3804
   3805STATIC int
   3806xlog_iclogs_empty(
   3807	struct xlog	*log)
   3808{
   3809	xlog_in_core_t	*iclog;
   3810
   3811	iclog = log->l_iclog;
   3812	do {
   3813		/* endianness does not matter here, zero is zero in
   3814		 * any language.
   3815		 */
   3816		if (iclog->ic_header.h_num_logops)
   3817			return 0;
   3818		iclog = iclog->ic_next;
   3819	} while (iclog != log->l_iclog);
   3820	return 1;
   3821}
   3822
   3823/*
   3824 * Verify that an LSN stamped into a piece of metadata is valid. This is
   3825 * intended for use in read verifiers on v5 superblocks.
   3826 */
   3827bool
   3828xfs_log_check_lsn(
   3829	struct xfs_mount	*mp,
   3830	xfs_lsn_t		lsn)
   3831{
   3832	struct xlog		*log = mp->m_log;
   3833	bool			valid;
   3834
   3835	/*
   3836	 * norecovery mode skips mount-time log processing and unconditionally
   3837	 * resets the in-core LSN. We can't validate in this mode, but
   3838	 * modifications are not allowed anyways so just return true.
   3839	 */
   3840	if (xfs_has_norecovery(mp))
   3841		return true;
   3842
   3843	/*
   3844	 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
   3845	 * handled by recovery and thus safe to ignore here.
   3846	 */
   3847	if (lsn == NULLCOMMITLSN)
   3848		return true;
   3849
   3850	valid = xlog_valid_lsn(mp->m_log, lsn);
   3851
   3852	/* warn the user about what's gone wrong before verifier failure */
   3853	if (!valid) {
   3854		spin_lock(&log->l_icloglock);
   3855		xfs_warn(mp,
   3856"Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
   3857"Please unmount and run xfs_repair (>= v4.3) to resolve.",
   3858			 CYCLE_LSN(lsn), BLOCK_LSN(lsn),
   3859			 log->l_curr_cycle, log->l_curr_block);
   3860		spin_unlock(&log->l_icloglock);
   3861	}
   3862
   3863	return valid;
   3864}
   3865
   3866/*
   3867 * Notify the log that we're about to start using a feature that is protected
   3868 * by a log incompat feature flag.  This will prevent log covering from
   3869 * clearing those flags.
   3870 */
   3871void
   3872xlog_use_incompat_feat(
   3873	struct xlog		*log)
   3874{
   3875	down_read(&log->l_incompat_users);
   3876}
   3877
   3878/* Notify the log that we've finished using log incompat features. */
   3879void
   3880xlog_drop_incompat_feat(
   3881	struct xlog		*log)
   3882{
   3883	up_read(&log->l_incompat_users);
   3884}