cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

super.c (204649B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  linux/fs/ext4/super.c
      4 *
      5 * Copyright (C) 1992, 1993, 1994, 1995
      6 * Remy Card (card@masi.ibp.fr)
      7 * Laboratoire MASI - Institut Blaise Pascal
      8 * Universite Pierre et Marie Curie (Paris VI)
      9 *
     10 *  from
     11 *
     12 *  linux/fs/minix/inode.c
     13 *
     14 *  Copyright (C) 1991, 1992  Linus Torvalds
     15 *
     16 *  Big-endian to little-endian byte-swapping/bitmaps by
     17 *        David S. Miller (davem@caip.rutgers.edu), 1995
     18 */
     19
     20#include <linux/module.h>
     21#include <linux/string.h>
     22#include <linux/fs.h>
     23#include <linux/time.h>
     24#include <linux/vmalloc.h>
     25#include <linux/slab.h>
     26#include <linux/init.h>
     27#include <linux/blkdev.h>
     28#include <linux/backing-dev.h>
     29#include <linux/parser.h>
     30#include <linux/buffer_head.h>
     31#include <linux/exportfs.h>
     32#include <linux/vfs.h>
     33#include <linux/random.h>
     34#include <linux/mount.h>
     35#include <linux/namei.h>
     36#include <linux/quotaops.h>
     37#include <linux/seq_file.h>
     38#include <linux/ctype.h>
     39#include <linux/log2.h>
     40#include <linux/crc16.h>
     41#include <linux/dax.h>
     42#include <linux/uaccess.h>
     43#include <linux/iversion.h>
     44#include <linux/unicode.h>
     45#include <linux/part_stat.h>
     46#include <linux/kthread.h>
     47#include <linux/freezer.h>
     48#include <linux/fsnotify.h>
     49#include <linux/fs_context.h>
     50#include <linux/fs_parser.h>
     51
     52#include "ext4.h"
     53#include "ext4_extents.h"	/* Needed for trace points definition */
     54#include "ext4_jbd2.h"
     55#include "xattr.h"
     56#include "acl.h"
     57#include "mballoc.h"
     58#include "fsmap.h"
     59
     60#define CREATE_TRACE_POINTS
     61#include <trace/events/ext4.h>
     62
     63static struct ext4_lazy_init *ext4_li_info;
     64static DEFINE_MUTEX(ext4_li_mtx);
     65static struct ratelimit_state ext4_mount_msg_ratelimit;
     66
     67static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
     68			     unsigned long journal_devnum);
     69static int ext4_show_options(struct seq_file *seq, struct dentry *root);
     70static void ext4_update_super(struct super_block *sb);
     71static int ext4_commit_super(struct super_block *sb);
     72static int ext4_mark_recovery_complete(struct super_block *sb,
     73					struct ext4_super_block *es);
     74static int ext4_clear_journal_err(struct super_block *sb,
     75				  struct ext4_super_block *es);
     76static int ext4_sync_fs(struct super_block *sb, int wait);
     77static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
     78static int ext4_unfreeze(struct super_block *sb);
     79static int ext4_freeze(struct super_block *sb);
     80static inline int ext2_feature_set_ok(struct super_block *sb);
     81static inline int ext3_feature_set_ok(struct super_block *sb);
     82static void ext4_destroy_lazyinit_thread(void);
     83static void ext4_unregister_li_request(struct super_block *sb);
     84static void ext4_clear_request_list(void);
     85static struct inode *ext4_get_journal_inode(struct super_block *sb,
     86					    unsigned int journal_inum);
     87static int ext4_validate_options(struct fs_context *fc);
     88static int ext4_check_opt_consistency(struct fs_context *fc,
     89				      struct super_block *sb);
     90static void ext4_apply_options(struct fs_context *fc, struct super_block *sb);
     91static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param);
     92static int ext4_get_tree(struct fs_context *fc);
     93static int ext4_reconfigure(struct fs_context *fc);
     94static void ext4_fc_free(struct fs_context *fc);
     95static int ext4_init_fs_context(struct fs_context *fc);
     96static const struct fs_parameter_spec ext4_param_specs[];
     97
     98/*
     99 * Lock ordering
    100 *
    101 * page fault path:
    102 * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start
    103 *   -> page lock -> i_data_sem (rw)
    104 *
    105 * buffered write path:
    106 * sb_start_write -> i_mutex -> mmap_lock
    107 * sb_start_write -> i_mutex -> transaction start -> page lock ->
    108 *   i_data_sem (rw)
    109 *
    110 * truncate:
    111 * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) ->
    112 *   page lock
    113 * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start ->
    114 *   i_data_sem (rw)
    115 *
    116 * direct IO:
    117 * sb_start_write -> i_mutex -> mmap_lock
    118 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
    119 *
    120 * writepages:
    121 * transaction start -> page lock(s) -> i_data_sem (rw)
    122 */
    123
    124static const struct fs_context_operations ext4_context_ops = {
    125	.parse_param	= ext4_parse_param,
    126	.get_tree	= ext4_get_tree,
    127	.reconfigure	= ext4_reconfigure,
    128	.free		= ext4_fc_free,
    129};
    130
    131
    132#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
    133static struct file_system_type ext2_fs_type = {
    134	.owner			= THIS_MODULE,
    135	.name			= "ext2",
    136	.init_fs_context	= ext4_init_fs_context,
    137	.parameters		= ext4_param_specs,
    138	.kill_sb		= kill_block_super,
    139	.fs_flags		= FS_REQUIRES_DEV,
    140};
    141MODULE_ALIAS_FS("ext2");
    142MODULE_ALIAS("ext2");
    143#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
    144#else
    145#define IS_EXT2_SB(sb) (0)
    146#endif
    147
    148
    149static struct file_system_type ext3_fs_type = {
    150	.owner			= THIS_MODULE,
    151	.name			= "ext3",
    152	.init_fs_context	= ext4_init_fs_context,
    153	.parameters		= ext4_param_specs,
    154	.kill_sb		= kill_block_super,
    155	.fs_flags		= FS_REQUIRES_DEV,
    156};
    157MODULE_ALIAS_FS("ext3");
    158MODULE_ALIAS("ext3");
    159#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
    160
    161
    162static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,
    163				  bh_end_io_t *end_io)
    164{
    165	/*
    166	 * buffer's verified bit is no longer valid after reading from
    167	 * disk again due to write out error, clear it to make sure we
    168	 * recheck the buffer contents.
    169	 */
    170	clear_buffer_verified(bh);
    171
    172	bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
    173	get_bh(bh);
    174	submit_bh(REQ_OP_READ, op_flags, bh);
    175}
    176
    177void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
    178			 bh_end_io_t *end_io)
    179{
    180	BUG_ON(!buffer_locked(bh));
    181
    182	if (ext4_buffer_uptodate(bh)) {
    183		unlock_buffer(bh);
    184		return;
    185	}
    186	__ext4_read_bh(bh, op_flags, end_io);
    187}
    188
    189int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io)
    190{
    191	BUG_ON(!buffer_locked(bh));
    192
    193	if (ext4_buffer_uptodate(bh)) {
    194		unlock_buffer(bh);
    195		return 0;
    196	}
    197
    198	__ext4_read_bh(bh, op_flags, end_io);
    199
    200	wait_on_buffer(bh);
    201	if (buffer_uptodate(bh))
    202		return 0;
    203	return -EIO;
    204}
    205
    206int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait)
    207{
    208	if (trylock_buffer(bh)) {
    209		if (wait)
    210			return ext4_read_bh(bh, op_flags, NULL);
    211		ext4_read_bh_nowait(bh, op_flags, NULL);
    212		return 0;
    213	}
    214	if (wait) {
    215		wait_on_buffer(bh);
    216		if (buffer_uptodate(bh))
    217			return 0;
    218		return -EIO;
    219	}
    220	return 0;
    221}
    222
    223/*
    224 * This works like __bread_gfp() except it uses ERR_PTR for error
    225 * returns.  Currently with sb_bread it's impossible to distinguish
    226 * between ENOMEM and EIO situations (since both result in a NULL
    227 * return.
    228 */
    229static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
    230					       sector_t block, int op_flags,
    231					       gfp_t gfp)
    232{
    233	struct buffer_head *bh;
    234	int ret;
    235
    236	bh = sb_getblk_gfp(sb, block, gfp);
    237	if (bh == NULL)
    238		return ERR_PTR(-ENOMEM);
    239	if (ext4_buffer_uptodate(bh))
    240		return bh;
    241
    242	ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
    243	if (ret) {
    244		put_bh(bh);
    245		return ERR_PTR(ret);
    246	}
    247	return bh;
    248}
    249
    250struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
    251				   int op_flags)
    252{
    253	return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE);
    254}
    255
    256struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
    257					    sector_t block)
    258{
    259	return __ext4_sb_bread_gfp(sb, block, 0, 0);
    260}
    261
    262void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
    263{
    264	struct buffer_head *bh = sb_getblk_gfp(sb, block, 0);
    265
    266	if (likely(bh)) {
    267		ext4_read_bh_lock(bh, REQ_RAHEAD, false);
    268		brelse(bh);
    269	}
    270}
    271
    272static int ext4_verify_csum_type(struct super_block *sb,
    273				 struct ext4_super_block *es)
    274{
    275	if (!ext4_has_feature_metadata_csum(sb))
    276		return 1;
    277
    278	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
    279}
    280
    281__le32 ext4_superblock_csum(struct super_block *sb,
    282			    struct ext4_super_block *es)
    283{
    284	struct ext4_sb_info *sbi = EXT4_SB(sb);
    285	int offset = offsetof(struct ext4_super_block, s_checksum);
    286	__u32 csum;
    287
    288	csum = ext4_chksum(sbi, ~0, (char *)es, offset);
    289
    290	return cpu_to_le32(csum);
    291}
    292
    293static int ext4_superblock_csum_verify(struct super_block *sb,
    294				       struct ext4_super_block *es)
    295{
    296	if (!ext4_has_metadata_csum(sb))
    297		return 1;
    298
    299	return es->s_checksum == ext4_superblock_csum(sb, es);
    300}
    301
    302void ext4_superblock_csum_set(struct super_block *sb)
    303{
    304	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
    305
    306	if (!ext4_has_metadata_csum(sb))
    307		return;
    308
    309	es->s_checksum = ext4_superblock_csum(sb, es);
    310}
    311
    312ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
    313			       struct ext4_group_desc *bg)
    314{
    315	return le32_to_cpu(bg->bg_block_bitmap_lo) |
    316		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
    317		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
    318}
    319
    320ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
    321			       struct ext4_group_desc *bg)
    322{
    323	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
    324		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
    325		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
    326}
    327
    328ext4_fsblk_t ext4_inode_table(struct super_block *sb,
    329			      struct ext4_group_desc *bg)
    330{
    331	return le32_to_cpu(bg->bg_inode_table_lo) |
    332		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
    333		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
    334}
    335
    336__u32 ext4_free_group_clusters(struct super_block *sb,
    337			       struct ext4_group_desc *bg)
    338{
    339	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
    340		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
    341		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
    342}
    343
    344__u32 ext4_free_inodes_count(struct super_block *sb,
    345			      struct ext4_group_desc *bg)
    346{
    347	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
    348		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
    349		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
    350}
    351
    352__u32 ext4_used_dirs_count(struct super_block *sb,
    353			      struct ext4_group_desc *bg)
    354{
    355	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
    356		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
    357		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
    358}
    359
    360__u32 ext4_itable_unused_count(struct super_block *sb,
    361			      struct ext4_group_desc *bg)
    362{
    363	return le16_to_cpu(bg->bg_itable_unused_lo) |
    364		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
    365		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
    366}
    367
    368void ext4_block_bitmap_set(struct super_block *sb,
    369			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
    370{
    371	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
    372	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
    373		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
    374}
    375
    376void ext4_inode_bitmap_set(struct super_block *sb,
    377			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
    378{
    379	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
    380	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
    381		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
    382}
    383
    384void ext4_inode_table_set(struct super_block *sb,
    385			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
    386{
    387	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
    388	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
    389		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
    390}
    391
    392void ext4_free_group_clusters_set(struct super_block *sb,
    393				  struct ext4_group_desc *bg, __u32 count)
    394{
    395	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
    396	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
    397		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
    398}
    399
    400void ext4_free_inodes_set(struct super_block *sb,
    401			  struct ext4_group_desc *bg, __u32 count)
    402{
    403	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
    404	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
    405		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
    406}
    407
    408void ext4_used_dirs_set(struct super_block *sb,
    409			  struct ext4_group_desc *bg, __u32 count)
    410{
    411	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
    412	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
    413		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
    414}
    415
    416void ext4_itable_unused_set(struct super_block *sb,
    417			  struct ext4_group_desc *bg, __u32 count)
    418{
    419	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
    420	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
    421		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
    422}
    423
    424static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now)
    425{
    426	now = clamp_val(now, 0, (1ull << 40) - 1);
    427
    428	*lo = cpu_to_le32(lower_32_bits(now));
    429	*hi = upper_32_bits(now);
    430}
    431
    432static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
    433{
    434	return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
    435}
    436#define ext4_update_tstamp(es, tstamp) \
    437	__ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \
    438			     ktime_get_real_seconds())
    439#define ext4_get_tstamp(es, tstamp) \
    440	__ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
    441
    442/*
    443 * The del_gendisk() function uninitializes the disk-specific data
    444 * structures, including the bdi structure, without telling anyone
    445 * else.  Once this happens, any attempt to call mark_buffer_dirty()
    446 * (for example, by ext4_commit_super), will cause a kernel OOPS.
    447 * This is a kludge to prevent these oops until we can put in a proper
    448 * hook in del_gendisk() to inform the VFS and file system layers.
    449 */
    450static int block_device_ejected(struct super_block *sb)
    451{
    452	struct inode *bd_inode = sb->s_bdev->bd_inode;
    453	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
    454
    455	return bdi->dev == NULL;
    456}
    457
    458static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
    459{
    460	struct super_block		*sb = journal->j_private;
    461	struct ext4_sb_info		*sbi = EXT4_SB(sb);
    462	int				error = is_journal_aborted(journal);
    463	struct ext4_journal_cb_entry	*jce;
    464
    465	BUG_ON(txn->t_state == T_FINISHED);
    466
    467	ext4_process_freed_data(sb, txn->t_tid);
    468
    469	spin_lock(&sbi->s_md_lock);
    470	while (!list_empty(&txn->t_private_list)) {
    471		jce = list_entry(txn->t_private_list.next,
    472				 struct ext4_journal_cb_entry, jce_list);
    473		list_del_init(&jce->jce_list);
    474		spin_unlock(&sbi->s_md_lock);
    475		jce->jce_func(sb, jce, error);
    476		spin_lock(&sbi->s_md_lock);
    477	}
    478	spin_unlock(&sbi->s_md_lock);
    479}
    480
    481/*
    482 * This writepage callback for write_cache_pages()
    483 * takes care of a few cases after page cleaning.
    484 *
    485 * write_cache_pages() already checks for dirty pages
    486 * and calls clear_page_dirty_for_io(), which we want,
    487 * to write protect the pages.
    488 *
    489 * However, we may have to redirty a page (see below.)
    490 */
    491static int ext4_journalled_writepage_callback(struct page *page,
    492					      struct writeback_control *wbc,
    493					      void *data)
    494{
    495	transaction_t *transaction = (transaction_t *) data;
    496	struct buffer_head *bh, *head;
    497	struct journal_head *jh;
    498
    499	bh = head = page_buffers(page);
    500	do {
    501		/*
    502		 * We have to redirty a page in these cases:
    503		 * 1) If buffer is dirty, it means the page was dirty because it
    504		 * contains a buffer that needs checkpointing. So the dirty bit
    505		 * needs to be preserved so that checkpointing writes the buffer
    506		 * properly.
    507		 * 2) If buffer is not part of the committing transaction
    508		 * (we may have just accidentally come across this buffer because
    509		 * inode range tracking is not exact) or if the currently running
    510		 * transaction already contains this buffer as well, dirty bit
    511		 * needs to be preserved so that the buffer gets writeprotected
    512		 * properly on running transaction's commit.
    513		 */
    514		jh = bh2jh(bh);
    515		if (buffer_dirty(bh) ||
    516		    (jh && (jh->b_transaction != transaction ||
    517			    jh->b_next_transaction))) {
    518			redirty_page_for_writepage(wbc, page);
    519			goto out;
    520		}
    521	} while ((bh = bh->b_this_page) != head);
    522
    523out:
    524	return AOP_WRITEPAGE_ACTIVATE;
    525}
    526
    527static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
    528{
    529	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
    530	struct writeback_control wbc = {
    531		.sync_mode =  WB_SYNC_ALL,
    532		.nr_to_write = LONG_MAX,
    533		.range_start = jinode->i_dirty_start,
    534		.range_end = jinode->i_dirty_end,
    535        };
    536
    537	return write_cache_pages(mapping, &wbc,
    538				 ext4_journalled_writepage_callback,
    539				 jinode->i_transaction);
    540}
    541
    542static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
    543{
    544	int ret;
    545
    546	if (ext4_should_journal_data(jinode->i_vfs_inode))
    547		ret = ext4_journalled_submit_inode_data_buffers(jinode);
    548	else
    549		ret = jbd2_journal_submit_inode_data_buffers(jinode);
    550
    551	return ret;
    552}
    553
    554static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
    555{
    556	int ret = 0;
    557
    558	if (!ext4_should_journal_data(jinode->i_vfs_inode))
    559		ret = jbd2_journal_finish_inode_data_buffers(jinode);
    560
    561	return ret;
    562}
    563
    564static bool system_going_down(void)
    565{
    566	return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
    567		|| system_state == SYSTEM_RESTART;
    568}
    569
    570struct ext4_err_translation {
    571	int code;
    572	int errno;
    573};
    574
    575#define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err }
    576
    577static struct ext4_err_translation err_translation[] = {
    578	EXT4_ERR_TRANSLATE(EIO),
    579	EXT4_ERR_TRANSLATE(ENOMEM),
    580	EXT4_ERR_TRANSLATE(EFSBADCRC),
    581	EXT4_ERR_TRANSLATE(EFSCORRUPTED),
    582	EXT4_ERR_TRANSLATE(ENOSPC),
    583	EXT4_ERR_TRANSLATE(ENOKEY),
    584	EXT4_ERR_TRANSLATE(EROFS),
    585	EXT4_ERR_TRANSLATE(EFBIG),
    586	EXT4_ERR_TRANSLATE(EEXIST),
    587	EXT4_ERR_TRANSLATE(ERANGE),
    588	EXT4_ERR_TRANSLATE(EOVERFLOW),
    589	EXT4_ERR_TRANSLATE(EBUSY),
    590	EXT4_ERR_TRANSLATE(ENOTDIR),
    591	EXT4_ERR_TRANSLATE(ENOTEMPTY),
    592	EXT4_ERR_TRANSLATE(ESHUTDOWN),
    593	EXT4_ERR_TRANSLATE(EFAULT),
    594};
    595
    596static int ext4_errno_to_code(int errno)
    597{
    598	int i;
    599
    600	for (i = 0; i < ARRAY_SIZE(err_translation); i++)
    601		if (err_translation[i].errno == errno)
    602			return err_translation[i].code;
    603	return EXT4_ERR_UNKNOWN;
    604}
    605
    606static void save_error_info(struct super_block *sb, int error,
    607			    __u32 ino, __u64 block,
    608			    const char *func, unsigned int line)
    609{
    610	struct ext4_sb_info *sbi = EXT4_SB(sb);
    611
    612	/* We default to EFSCORRUPTED error... */
    613	if (error == 0)
    614		error = EFSCORRUPTED;
    615
    616	spin_lock(&sbi->s_error_lock);
    617	sbi->s_add_error_count++;
    618	sbi->s_last_error_code = error;
    619	sbi->s_last_error_line = line;
    620	sbi->s_last_error_ino = ino;
    621	sbi->s_last_error_block = block;
    622	sbi->s_last_error_func = func;
    623	sbi->s_last_error_time = ktime_get_real_seconds();
    624	if (!sbi->s_first_error_time) {
    625		sbi->s_first_error_code = error;
    626		sbi->s_first_error_line = line;
    627		sbi->s_first_error_ino = ino;
    628		sbi->s_first_error_block = block;
    629		sbi->s_first_error_func = func;
    630		sbi->s_first_error_time = sbi->s_last_error_time;
    631	}
    632	spin_unlock(&sbi->s_error_lock);
    633}
    634
    635/* Deal with the reporting of failure conditions on a filesystem such as
    636 * inconsistencies detected or read IO failures.
    637 *
    638 * On ext2, we can store the error state of the filesystem in the
    639 * superblock.  That is not possible on ext4, because we may have other
    640 * write ordering constraints on the superblock which prevent us from
    641 * writing it out straight away; and given that the journal is about to
    642 * be aborted, we can't rely on the current, or future, transactions to
    643 * write out the superblock safely.
    644 *
    645 * We'll just use the jbd2_journal_abort() error code to record an error in
    646 * the journal instead.  On recovery, the journal will complain about
    647 * that error until we've noted it down and cleared it.
    648 *
    649 * If force_ro is set, we unconditionally force the filesystem into an
    650 * ABORT|READONLY state, unless the error response on the fs has been set to
    651 * panic in which case we take the easy way out and panic immediately. This is
    652 * used to deal with unrecoverable failures such as journal IO errors or ENOMEM
    653 * at a critical moment in log management.
    654 */
    655static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
    656			      __u32 ino, __u64 block,
    657			      const char *func, unsigned int line)
    658{
    659	journal_t *journal = EXT4_SB(sb)->s_journal;
    660	bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT);
    661
    662	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
    663	if (test_opt(sb, WARN_ON_ERROR))
    664		WARN_ON_ONCE(1);
    665
    666	if (!continue_fs && !sb_rdonly(sb)) {
    667		ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
    668		if (journal)
    669			jbd2_journal_abort(journal, -EIO);
    670	}
    671
    672	if (!bdev_read_only(sb->s_bdev)) {
    673		save_error_info(sb, error, ino, block, func, line);
    674		/*
    675		 * In case the fs should keep running, we need to writeout
    676		 * superblock through the journal. Due to lock ordering
    677		 * constraints, it may not be safe to do it right here so we
    678		 * defer superblock flushing to a workqueue.
    679		 */
    680		if (continue_fs && journal)
    681			schedule_work(&EXT4_SB(sb)->s_error_work);
    682		else
    683			ext4_commit_super(sb);
    684	}
    685
    686	/*
    687	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
    688	 * could panic during 'reboot -f' as the underlying device got already
    689	 * disabled.
    690	 */
    691	if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
    692		panic("EXT4-fs (device %s): panic forced after error\n",
    693			sb->s_id);
    694	}
    695
    696	if (sb_rdonly(sb) || continue_fs)
    697		return;
    698
    699	ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
    700	/*
    701	 * Make sure updated value of ->s_mount_flags will be visible before
    702	 * ->s_flags update
    703	 */
    704	smp_wmb();
    705	sb->s_flags |= SB_RDONLY;
    706}
    707
    708static void flush_stashed_error_work(struct work_struct *work)
    709{
    710	struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
    711						s_error_work);
    712	journal_t *journal = sbi->s_journal;
    713	handle_t *handle;
    714
    715	/*
    716	 * If the journal is still running, we have to write out superblock
    717	 * through the journal to avoid collisions of other journalled sb
    718	 * updates.
    719	 *
    720	 * We use directly jbd2 functions here to avoid recursing back into
    721	 * ext4 error handling code during handling of previous errors.
    722	 */
    723	if (!sb_rdonly(sbi->s_sb) && journal) {
    724		struct buffer_head *sbh = sbi->s_sbh;
    725		handle = jbd2_journal_start(journal, 1);
    726		if (IS_ERR(handle))
    727			goto write_directly;
    728		if (jbd2_journal_get_write_access(handle, sbh)) {
    729			jbd2_journal_stop(handle);
    730			goto write_directly;
    731		}
    732		ext4_update_super(sbi->s_sb);
    733		if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
    734			ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
    735				 "superblock detected");
    736			clear_buffer_write_io_error(sbh);
    737			set_buffer_uptodate(sbh);
    738		}
    739
    740		if (jbd2_journal_dirty_metadata(handle, sbh)) {
    741			jbd2_journal_stop(handle);
    742			goto write_directly;
    743		}
    744		jbd2_journal_stop(handle);
    745		ext4_notify_error_sysfs(sbi);
    746		return;
    747	}
    748write_directly:
    749	/*
    750	 * Write through journal failed. Write sb directly to get error info
    751	 * out and hope for the best.
    752	 */
    753	ext4_commit_super(sbi->s_sb);
    754	ext4_notify_error_sysfs(sbi);
    755}
    756
    757#define ext4_error_ratelimit(sb)					\
    758		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
    759			     "EXT4-fs error")
    760
    761void __ext4_error(struct super_block *sb, const char *function,
    762		  unsigned int line, bool force_ro, int error, __u64 block,
    763		  const char *fmt, ...)
    764{
    765	struct va_format vaf;
    766	va_list args;
    767
    768	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
    769		return;
    770
    771	trace_ext4_error(sb, function, line);
    772	if (ext4_error_ratelimit(sb)) {
    773		va_start(args, fmt);
    774		vaf.fmt = fmt;
    775		vaf.va = &args;
    776		printk(KERN_CRIT
    777		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
    778		       sb->s_id, function, line, current->comm, &vaf);
    779		va_end(args);
    780	}
    781	fsnotify_sb_error(sb, NULL, error ? error : EFSCORRUPTED);
    782
    783	ext4_handle_error(sb, force_ro, error, 0, block, function, line);
    784}
    785
    786void __ext4_error_inode(struct inode *inode, const char *function,
    787			unsigned int line, ext4_fsblk_t block, int error,
    788			const char *fmt, ...)
    789{
    790	va_list args;
    791	struct va_format vaf;
    792
    793	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
    794		return;
    795
    796	trace_ext4_error(inode->i_sb, function, line);
    797	if (ext4_error_ratelimit(inode->i_sb)) {
    798		va_start(args, fmt);
    799		vaf.fmt = fmt;
    800		vaf.va = &args;
    801		if (block)
    802			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
    803			       "inode #%lu: block %llu: comm %s: %pV\n",
    804			       inode->i_sb->s_id, function, line, inode->i_ino,
    805			       block, current->comm, &vaf);
    806		else
    807			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
    808			       "inode #%lu: comm %s: %pV\n",
    809			       inode->i_sb->s_id, function, line, inode->i_ino,
    810			       current->comm, &vaf);
    811		va_end(args);
    812	}
    813	fsnotify_sb_error(inode->i_sb, inode, error ? error : EFSCORRUPTED);
    814
    815	ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block,
    816			  function, line);
    817}
    818
    819void __ext4_error_file(struct file *file, const char *function,
    820		       unsigned int line, ext4_fsblk_t block,
    821		       const char *fmt, ...)
    822{
    823	va_list args;
    824	struct va_format vaf;
    825	struct inode *inode = file_inode(file);
    826	char pathname[80], *path;
    827
    828	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
    829		return;
    830
    831	trace_ext4_error(inode->i_sb, function, line);
    832	if (ext4_error_ratelimit(inode->i_sb)) {
    833		path = file_path(file, pathname, sizeof(pathname));
    834		if (IS_ERR(path))
    835			path = "(unknown)";
    836		va_start(args, fmt);
    837		vaf.fmt = fmt;
    838		vaf.va = &args;
    839		if (block)
    840			printk(KERN_CRIT
    841			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
    842			       "block %llu: comm %s: path %s: %pV\n",
    843			       inode->i_sb->s_id, function, line, inode->i_ino,
    844			       block, current->comm, path, &vaf);
    845		else
    846			printk(KERN_CRIT
    847			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
    848			       "comm %s: path %s: %pV\n",
    849			       inode->i_sb->s_id, function, line, inode->i_ino,
    850			       current->comm, path, &vaf);
    851		va_end(args);
    852	}
    853	fsnotify_sb_error(inode->i_sb, inode, EFSCORRUPTED);
    854
    855	ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block,
    856			  function, line);
    857}
    858
    859const char *ext4_decode_error(struct super_block *sb, int errno,
    860			      char nbuf[16])
    861{
    862	char *errstr = NULL;
    863
    864	switch (errno) {
    865	case -EFSCORRUPTED:
    866		errstr = "Corrupt filesystem";
    867		break;
    868	case -EFSBADCRC:
    869		errstr = "Filesystem failed CRC";
    870		break;
    871	case -EIO:
    872		errstr = "IO failure";
    873		break;
    874	case -ENOMEM:
    875		errstr = "Out of memory";
    876		break;
    877	case -EROFS:
    878		if (!sb || (EXT4_SB(sb)->s_journal &&
    879			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
    880			errstr = "Journal has aborted";
    881		else
    882			errstr = "Readonly filesystem";
    883		break;
    884	default:
    885		/* If the caller passed in an extra buffer for unknown
    886		 * errors, textualise them now.  Else we just return
    887		 * NULL. */
    888		if (nbuf) {
    889			/* Check for truncated error codes... */
    890			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
    891				errstr = nbuf;
    892		}
    893		break;
    894	}
    895
    896	return errstr;
    897}
    898
    899/* __ext4_std_error decodes expected errors from journaling functions
    900 * automatically and invokes the appropriate error response.  */
    901
    902void __ext4_std_error(struct super_block *sb, const char *function,
    903		      unsigned int line, int errno)
    904{
    905	char nbuf[16];
    906	const char *errstr;
    907
    908	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
    909		return;
    910
    911	/* Special case: if the error is EROFS, and we're not already
    912	 * inside a transaction, then there's really no point in logging
    913	 * an error. */
    914	if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
    915		return;
    916
    917	if (ext4_error_ratelimit(sb)) {
    918		errstr = ext4_decode_error(sb, errno, nbuf);
    919		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
    920		       sb->s_id, function, line, errstr);
    921	}
    922	fsnotify_sb_error(sb, NULL, errno ? errno : EFSCORRUPTED);
    923
    924	ext4_handle_error(sb, false, -errno, 0, 0, function, line);
    925}
    926
    927void __ext4_msg(struct super_block *sb,
    928		const char *prefix, const char *fmt, ...)
    929{
    930	struct va_format vaf;
    931	va_list args;
    932
    933	if (sb) {
    934		atomic_inc(&EXT4_SB(sb)->s_msg_count);
    935		if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state),
    936				  "EXT4-fs"))
    937			return;
    938	}
    939
    940	va_start(args, fmt);
    941	vaf.fmt = fmt;
    942	vaf.va = &args;
    943	if (sb)
    944		printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
    945	else
    946		printk("%sEXT4-fs: %pV\n", prefix, &vaf);
    947	va_end(args);
    948}
    949
    950static int ext4_warning_ratelimit(struct super_block *sb)
    951{
    952	atomic_inc(&EXT4_SB(sb)->s_warning_count);
    953	return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
    954			    "EXT4-fs warning");
    955}
    956
    957void __ext4_warning(struct super_block *sb, const char *function,
    958		    unsigned int line, const char *fmt, ...)
    959{
    960	struct va_format vaf;
    961	va_list args;
    962
    963	if (!ext4_warning_ratelimit(sb))
    964		return;
    965
    966	va_start(args, fmt);
    967	vaf.fmt = fmt;
    968	vaf.va = &args;
    969	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
    970	       sb->s_id, function, line, &vaf);
    971	va_end(args);
    972}
    973
    974void __ext4_warning_inode(const struct inode *inode, const char *function,
    975			  unsigned int line, const char *fmt, ...)
    976{
    977	struct va_format vaf;
    978	va_list args;
    979
    980	if (!ext4_warning_ratelimit(inode->i_sb))
    981		return;
    982
    983	va_start(args, fmt);
    984	vaf.fmt = fmt;
    985	vaf.va = &args;
    986	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
    987	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
    988	       function, line, inode->i_ino, current->comm, &vaf);
    989	va_end(args);
    990}
    991
    992void __ext4_grp_locked_error(const char *function, unsigned int line,
    993			     struct super_block *sb, ext4_group_t grp,
    994			     unsigned long ino, ext4_fsblk_t block,
    995			     const char *fmt, ...)
    996__releases(bitlock)
    997__acquires(bitlock)
    998{
    999	struct va_format vaf;
   1000	va_list args;
   1001
   1002	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
   1003		return;
   1004
   1005	trace_ext4_error(sb, function, line);
   1006	if (ext4_error_ratelimit(sb)) {
   1007		va_start(args, fmt);
   1008		vaf.fmt = fmt;
   1009		vaf.va = &args;
   1010		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
   1011		       sb->s_id, function, line, grp);
   1012		if (ino)
   1013			printk(KERN_CONT "inode %lu: ", ino);
   1014		if (block)
   1015			printk(KERN_CONT "block %llu:",
   1016			       (unsigned long long) block);
   1017		printk(KERN_CONT "%pV\n", &vaf);
   1018		va_end(args);
   1019	}
   1020
   1021	if (test_opt(sb, ERRORS_CONT)) {
   1022		if (test_opt(sb, WARN_ON_ERROR))
   1023			WARN_ON_ONCE(1);
   1024		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
   1025		if (!bdev_read_only(sb->s_bdev)) {
   1026			save_error_info(sb, EFSCORRUPTED, ino, block, function,
   1027					line);
   1028			schedule_work(&EXT4_SB(sb)->s_error_work);
   1029		}
   1030		return;
   1031	}
   1032	ext4_unlock_group(sb, grp);
   1033	ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line);
   1034	/*
   1035	 * We only get here in the ERRORS_RO case; relocking the group
   1036	 * may be dangerous, but nothing bad will happen since the
   1037	 * filesystem will have already been marked read/only and the
   1038	 * journal has been aborted.  We return 1 as a hint to callers
   1039	 * who might what to use the return value from
   1040	 * ext4_grp_locked_error() to distinguish between the
   1041	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
   1042	 * aggressively from the ext4 function in question, with a
   1043	 * more appropriate error code.
   1044	 */
   1045	ext4_lock_group(sb, grp);
   1046	return;
   1047}
   1048
   1049void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
   1050				     ext4_group_t group,
   1051				     unsigned int flags)
   1052{
   1053	struct ext4_sb_info *sbi = EXT4_SB(sb);
   1054	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
   1055	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
   1056	int ret;
   1057
   1058	if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
   1059		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
   1060					    &grp->bb_state);
   1061		if (!ret)
   1062			percpu_counter_sub(&sbi->s_freeclusters_counter,
   1063					   grp->bb_free);
   1064	}
   1065
   1066	if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
   1067		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
   1068					    &grp->bb_state);
   1069		if (!ret && gdp) {
   1070			int count;
   1071
   1072			count = ext4_free_inodes_count(sb, gdp);
   1073			percpu_counter_sub(&sbi->s_freeinodes_counter,
   1074					   count);
   1075		}
   1076	}
   1077}
   1078
   1079void ext4_update_dynamic_rev(struct super_block *sb)
   1080{
   1081	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
   1082
   1083	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
   1084		return;
   1085
   1086	ext4_warning(sb,
   1087		     "updating to rev %d because of new feature flag, "
   1088		     "running e2fsck is recommended",
   1089		     EXT4_DYNAMIC_REV);
   1090
   1091	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
   1092	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
   1093	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
   1094	/* leave es->s_feature_*compat flags alone */
   1095	/* es->s_uuid will be set by e2fsck if empty */
   1096
   1097	/*
   1098	 * The rest of the superblock fields should be zero, and if not it
   1099	 * means they are likely already in use, so leave them alone.  We
   1100	 * can leave it up to e2fsck to clean up any inconsistencies there.
   1101	 */
   1102}
   1103
   1104/*
   1105 * Open the external journal device
   1106 */
   1107static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
   1108{
   1109	struct block_device *bdev;
   1110
   1111	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
   1112	if (IS_ERR(bdev))
   1113		goto fail;
   1114	return bdev;
   1115
   1116fail:
   1117	ext4_msg(sb, KERN_ERR,
   1118		 "failed to open journal device unknown-block(%u,%u) %ld",
   1119		 MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
   1120	return NULL;
   1121}
   1122
   1123/*
   1124 * Release the journal device
   1125 */
   1126static void ext4_blkdev_put(struct block_device *bdev)
   1127{
   1128	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
   1129}
   1130
   1131static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
   1132{
   1133	struct block_device *bdev;
   1134	bdev = sbi->s_journal_bdev;
   1135	if (bdev) {
   1136		ext4_blkdev_put(bdev);
   1137		sbi->s_journal_bdev = NULL;
   1138	}
   1139}
   1140
   1141static inline struct inode *orphan_list_entry(struct list_head *l)
   1142{
   1143	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
   1144}
   1145
   1146static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
   1147{
   1148	struct list_head *l;
   1149
   1150	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
   1151		 le32_to_cpu(sbi->s_es->s_last_orphan));
   1152
   1153	printk(KERN_ERR "sb_info orphan list:\n");
   1154	list_for_each(l, &sbi->s_orphan) {
   1155		struct inode *inode = orphan_list_entry(l);
   1156		printk(KERN_ERR "  "
   1157		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
   1158		       inode->i_sb->s_id, inode->i_ino, inode,
   1159		       inode->i_mode, inode->i_nlink,
   1160		       NEXT_ORPHAN(inode));
   1161	}
   1162}
   1163
   1164#ifdef CONFIG_QUOTA
   1165static int ext4_quota_off(struct super_block *sb, int type);
   1166
   1167static inline void ext4_quota_off_umount(struct super_block *sb)
   1168{
   1169	int type;
   1170
   1171	/* Use our quota_off function to clear inode flags etc. */
   1172	for (type = 0; type < EXT4_MAXQUOTAS; type++)
   1173		ext4_quota_off(sb, type);
   1174}
   1175
   1176/*
   1177 * This is a helper function which is used in the mount/remount
   1178 * codepaths (which holds s_umount) to fetch the quota file name.
   1179 */
   1180static inline char *get_qf_name(struct super_block *sb,
   1181				struct ext4_sb_info *sbi,
   1182				int type)
   1183{
   1184	return rcu_dereference_protected(sbi->s_qf_names[type],
   1185					 lockdep_is_held(&sb->s_umount));
   1186}
   1187#else
   1188static inline void ext4_quota_off_umount(struct super_block *sb)
   1189{
   1190}
   1191#endif
   1192
   1193static void ext4_put_super(struct super_block *sb)
   1194{
   1195	struct ext4_sb_info *sbi = EXT4_SB(sb);
   1196	struct ext4_super_block *es = sbi->s_es;
   1197	struct buffer_head **group_desc;
   1198	struct flex_groups **flex_groups;
   1199	int aborted = 0;
   1200	int i, err;
   1201
   1202	/*
   1203	 * Unregister sysfs before destroying jbd2 journal.
   1204	 * Since we could still access attr_journal_task attribute via sysfs
   1205	 * path which could have sbi->s_journal->j_task as NULL
   1206	 * Unregister sysfs before flush sbi->s_error_work.
   1207	 * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
   1208	 * read metadata verify failed then will queue error work.
   1209	 * flush_stashed_error_work will call start_this_handle may trigger
   1210	 * BUG_ON.
   1211	 */
   1212	ext4_unregister_sysfs(sb);
   1213
   1214	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs unmount"))
   1215		ext4_msg(sb, KERN_INFO, "unmounting filesystem.");
   1216
   1217	ext4_unregister_li_request(sb);
   1218	ext4_quota_off_umount(sb);
   1219
   1220	flush_work(&sbi->s_error_work);
   1221	destroy_workqueue(sbi->rsv_conversion_wq);
   1222	ext4_release_orphan_info(sb);
   1223
   1224	if (sbi->s_journal) {
   1225		aborted = is_journal_aborted(sbi->s_journal);
   1226		err = jbd2_journal_destroy(sbi->s_journal);
   1227		sbi->s_journal = NULL;
   1228		if ((err < 0) && !aborted) {
   1229			ext4_abort(sb, -err, "Couldn't clean up the journal");
   1230		}
   1231	}
   1232
   1233	ext4_es_unregister_shrinker(sbi);
   1234	del_timer_sync(&sbi->s_err_report);
   1235	ext4_release_system_zone(sb);
   1236	ext4_mb_release(sb);
   1237	ext4_ext_release(sb);
   1238
   1239	if (!sb_rdonly(sb) && !aborted) {
   1240		ext4_clear_feature_journal_needs_recovery(sb);
   1241		ext4_clear_feature_orphan_present(sb);
   1242		es->s_state = cpu_to_le16(sbi->s_mount_state);
   1243	}
   1244	if (!sb_rdonly(sb))
   1245		ext4_commit_super(sb);
   1246
   1247	rcu_read_lock();
   1248	group_desc = rcu_dereference(sbi->s_group_desc);
   1249	for (i = 0; i < sbi->s_gdb_count; i++)
   1250		brelse(group_desc[i]);
   1251	kvfree(group_desc);
   1252	flex_groups = rcu_dereference(sbi->s_flex_groups);
   1253	if (flex_groups) {
   1254		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
   1255			kvfree(flex_groups[i]);
   1256		kvfree(flex_groups);
   1257	}
   1258	rcu_read_unlock();
   1259	percpu_counter_destroy(&sbi->s_freeclusters_counter);
   1260	percpu_counter_destroy(&sbi->s_freeinodes_counter);
   1261	percpu_counter_destroy(&sbi->s_dirs_counter);
   1262	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
   1263	percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
   1264	percpu_free_rwsem(&sbi->s_writepages_rwsem);
   1265#ifdef CONFIG_QUOTA
   1266	for (i = 0; i < EXT4_MAXQUOTAS; i++)
   1267		kfree(get_qf_name(sb, sbi, i));
   1268#endif
   1269
   1270	/* Debugging code just in case the in-memory inode orphan list
   1271	 * isn't empty.  The on-disk one can be non-empty if we've
   1272	 * detected an error and taken the fs readonly, but the
   1273	 * in-memory list had better be clean by this point. */
   1274	if (!list_empty(&sbi->s_orphan))
   1275		dump_orphan_list(sb, sbi);
   1276	ASSERT(list_empty(&sbi->s_orphan));
   1277
   1278	sync_blockdev(sb->s_bdev);
   1279	invalidate_bdev(sb->s_bdev);
   1280	if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
   1281		/*
   1282		 * Invalidate the journal device's buffers.  We don't want them
   1283		 * floating about in memory - the physical journal device may
   1284		 * hotswapped, and it breaks the `ro-after' testing code.
   1285		 */
   1286		sync_blockdev(sbi->s_journal_bdev);
   1287		invalidate_bdev(sbi->s_journal_bdev);
   1288		ext4_blkdev_remove(sbi);
   1289	}
   1290
   1291	ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
   1292	sbi->s_ea_inode_cache = NULL;
   1293
   1294	ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
   1295	sbi->s_ea_block_cache = NULL;
   1296
   1297	ext4_stop_mmpd(sbi);
   1298
   1299	brelse(sbi->s_sbh);
   1300	sb->s_fs_info = NULL;
   1301	/*
   1302	 * Now that we are completely done shutting down the
   1303	 * superblock, we need to actually destroy the kobject.
   1304	 */
   1305	kobject_put(&sbi->s_kobj);
   1306	wait_for_completion(&sbi->s_kobj_unregister);
   1307	if (sbi->s_chksum_driver)
   1308		crypto_free_shash(sbi->s_chksum_driver);
   1309	kfree(sbi->s_blockgroup_lock);
   1310	fs_put_dax(sbi->s_daxdev);
   1311	fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
   1312#if IS_ENABLED(CONFIG_UNICODE)
   1313	utf8_unload(sb->s_encoding);
   1314#endif
   1315	kfree(sbi);
   1316}
   1317
   1318static struct kmem_cache *ext4_inode_cachep;
   1319
   1320/*
   1321 * Called inside transaction, so use GFP_NOFS
   1322 */
   1323static struct inode *ext4_alloc_inode(struct super_block *sb)
   1324{
   1325	struct ext4_inode_info *ei;
   1326
   1327	ei = alloc_inode_sb(sb, ext4_inode_cachep, GFP_NOFS);
   1328	if (!ei)
   1329		return NULL;
   1330
   1331	inode_set_iversion(&ei->vfs_inode, 1);
   1332	spin_lock_init(&ei->i_raw_lock);
   1333	INIT_LIST_HEAD(&ei->i_prealloc_list);
   1334	atomic_set(&ei->i_prealloc_active, 0);
   1335	spin_lock_init(&ei->i_prealloc_lock);
   1336	ext4_es_init_tree(&ei->i_es_tree);
   1337	rwlock_init(&ei->i_es_lock);
   1338	INIT_LIST_HEAD(&ei->i_es_list);
   1339	ei->i_es_all_nr = 0;
   1340	ei->i_es_shk_nr = 0;
   1341	ei->i_es_shrink_lblk = 0;
   1342	ei->i_reserved_data_blocks = 0;
   1343	spin_lock_init(&(ei->i_block_reservation_lock));
   1344	ext4_init_pending_tree(&ei->i_pending_tree);
   1345#ifdef CONFIG_QUOTA
   1346	ei->i_reserved_quota = 0;
   1347	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
   1348#endif
   1349	ei->jinode = NULL;
   1350	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
   1351	spin_lock_init(&ei->i_completed_io_lock);
   1352	ei->i_sync_tid = 0;
   1353	ei->i_datasync_tid = 0;
   1354	atomic_set(&ei->i_unwritten, 0);
   1355	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
   1356	ext4_fc_init_inode(&ei->vfs_inode);
   1357	mutex_init(&ei->i_fc_lock);
   1358	return &ei->vfs_inode;
   1359}
   1360
   1361static int ext4_drop_inode(struct inode *inode)
   1362{
   1363	int drop = generic_drop_inode(inode);
   1364
   1365	if (!drop)
   1366		drop = fscrypt_drop_inode(inode);
   1367
   1368	trace_ext4_drop_inode(inode, drop);
   1369	return drop;
   1370}
   1371
   1372static void ext4_free_in_core_inode(struct inode *inode)
   1373{
   1374	fscrypt_free_inode(inode);
   1375	if (!list_empty(&(EXT4_I(inode)->i_fc_list))) {
   1376		pr_warn("%s: inode %ld still in fc list",
   1377			__func__, inode->i_ino);
   1378	}
   1379	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
   1380}
   1381
   1382static void ext4_destroy_inode(struct inode *inode)
   1383{
   1384	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
   1385		ext4_msg(inode->i_sb, KERN_ERR,
   1386			 "Inode %lu (%p): orphan list check failed!",
   1387			 inode->i_ino, EXT4_I(inode));
   1388		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
   1389				EXT4_I(inode), sizeof(struct ext4_inode_info),
   1390				true);
   1391		dump_stack();
   1392	}
   1393
   1394	if (EXT4_I(inode)->i_reserved_data_blocks)
   1395		ext4_msg(inode->i_sb, KERN_ERR,
   1396			 "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
   1397			 inode->i_ino, EXT4_I(inode),
   1398			 EXT4_I(inode)->i_reserved_data_blocks);
   1399}
   1400
   1401static void init_once(void *foo)
   1402{
   1403	struct ext4_inode_info *ei = foo;
   1404
   1405	INIT_LIST_HEAD(&ei->i_orphan);
   1406	init_rwsem(&ei->xattr_sem);
   1407	init_rwsem(&ei->i_data_sem);
   1408	inode_init_once(&ei->vfs_inode);
   1409	ext4_fc_init_inode(&ei->vfs_inode);
   1410}
   1411
   1412static int __init init_inodecache(void)
   1413{
   1414	ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
   1415				sizeof(struct ext4_inode_info), 0,
   1416				(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
   1417					SLAB_ACCOUNT),
   1418				offsetof(struct ext4_inode_info, i_data),
   1419				sizeof_field(struct ext4_inode_info, i_data),
   1420				init_once);
   1421	if (ext4_inode_cachep == NULL)
   1422		return -ENOMEM;
   1423	return 0;
   1424}
   1425
   1426static void destroy_inodecache(void)
   1427{
   1428	/*
   1429	 * Make sure all delayed rcu free inodes are flushed before we
   1430	 * destroy cache.
   1431	 */
   1432	rcu_barrier();
   1433	kmem_cache_destroy(ext4_inode_cachep);
   1434}
   1435
   1436void ext4_clear_inode(struct inode *inode)
   1437{
   1438	ext4_fc_del(inode);
   1439	invalidate_inode_buffers(inode);
   1440	clear_inode(inode);
   1441	ext4_discard_preallocations(inode, 0);
   1442	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
   1443	dquot_drop(inode);
   1444	if (EXT4_I(inode)->jinode) {
   1445		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
   1446					       EXT4_I(inode)->jinode);
   1447		jbd2_free_inode(EXT4_I(inode)->jinode);
   1448		EXT4_I(inode)->jinode = NULL;
   1449	}
   1450	fscrypt_put_encryption_info(inode);
   1451	fsverity_cleanup_inode(inode);
   1452}
   1453
   1454static struct inode *ext4_nfs_get_inode(struct super_block *sb,
   1455					u64 ino, u32 generation)
   1456{
   1457	struct inode *inode;
   1458
   1459	/*
   1460	 * Currently we don't know the generation for parent directory, so
   1461	 * a generation of 0 means "accept any"
   1462	 */
   1463	inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
   1464	if (IS_ERR(inode))
   1465		return ERR_CAST(inode);
   1466	if (generation && inode->i_generation != generation) {
   1467		iput(inode);
   1468		return ERR_PTR(-ESTALE);
   1469	}
   1470
   1471	return inode;
   1472}
   1473
   1474static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
   1475					int fh_len, int fh_type)
   1476{
   1477	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
   1478				    ext4_nfs_get_inode);
   1479}
   1480
   1481static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
   1482					int fh_len, int fh_type)
   1483{
   1484	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
   1485				    ext4_nfs_get_inode);
   1486}
   1487
   1488static int ext4_nfs_commit_metadata(struct inode *inode)
   1489{
   1490	struct writeback_control wbc = {
   1491		.sync_mode = WB_SYNC_ALL
   1492	};
   1493
   1494	trace_ext4_nfs_commit_metadata(inode);
   1495	return ext4_write_inode(inode, &wbc);
   1496}
   1497
   1498#ifdef CONFIG_QUOTA
   1499static const char * const quotatypes[] = INITQFNAMES;
   1500#define QTYPE2NAME(t) (quotatypes[t])
   1501
   1502static int ext4_write_dquot(struct dquot *dquot);
   1503static int ext4_acquire_dquot(struct dquot *dquot);
   1504static int ext4_release_dquot(struct dquot *dquot);
   1505static int ext4_mark_dquot_dirty(struct dquot *dquot);
   1506static int ext4_write_info(struct super_block *sb, int type);
   1507static int ext4_quota_on(struct super_block *sb, int type, int format_id,
   1508			 const struct path *path);
   1509static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
   1510			       size_t len, loff_t off);
   1511static ssize_t ext4_quota_write(struct super_block *sb, int type,
   1512				const char *data, size_t len, loff_t off);
   1513static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
   1514			     unsigned int flags);
   1515
   1516static struct dquot **ext4_get_dquots(struct inode *inode)
   1517{
   1518	return EXT4_I(inode)->i_dquot;
   1519}
   1520
   1521static const struct dquot_operations ext4_quota_operations = {
   1522	.get_reserved_space	= ext4_get_reserved_space,
   1523	.write_dquot		= ext4_write_dquot,
   1524	.acquire_dquot		= ext4_acquire_dquot,
   1525	.release_dquot		= ext4_release_dquot,
   1526	.mark_dirty		= ext4_mark_dquot_dirty,
   1527	.write_info		= ext4_write_info,
   1528	.alloc_dquot		= dquot_alloc,
   1529	.destroy_dquot		= dquot_destroy,
   1530	.get_projid		= ext4_get_projid,
   1531	.get_inode_usage	= ext4_get_inode_usage,
   1532	.get_next_id		= dquot_get_next_id,
   1533};
   1534
   1535static const struct quotactl_ops ext4_qctl_operations = {
   1536	.quota_on	= ext4_quota_on,
   1537	.quota_off	= ext4_quota_off,
   1538	.quota_sync	= dquot_quota_sync,
   1539	.get_state	= dquot_get_state,
   1540	.set_info	= dquot_set_dqinfo,
   1541	.get_dqblk	= dquot_get_dqblk,
   1542	.set_dqblk	= dquot_set_dqblk,
   1543	.get_nextdqblk	= dquot_get_next_dqblk,
   1544};
   1545#endif
   1546
   1547static const struct super_operations ext4_sops = {
   1548	.alloc_inode	= ext4_alloc_inode,
   1549	.free_inode	= ext4_free_in_core_inode,
   1550	.destroy_inode	= ext4_destroy_inode,
   1551	.write_inode	= ext4_write_inode,
   1552	.dirty_inode	= ext4_dirty_inode,
   1553	.drop_inode	= ext4_drop_inode,
   1554	.evict_inode	= ext4_evict_inode,
   1555	.put_super	= ext4_put_super,
   1556	.sync_fs	= ext4_sync_fs,
   1557	.freeze_fs	= ext4_freeze,
   1558	.unfreeze_fs	= ext4_unfreeze,
   1559	.statfs		= ext4_statfs,
   1560	.show_options	= ext4_show_options,
   1561#ifdef CONFIG_QUOTA
   1562	.quota_read	= ext4_quota_read,
   1563	.quota_write	= ext4_quota_write,
   1564	.get_dquots	= ext4_get_dquots,
   1565#endif
   1566};
   1567
   1568static const struct export_operations ext4_export_ops = {
   1569	.fh_to_dentry = ext4_fh_to_dentry,
   1570	.fh_to_parent = ext4_fh_to_parent,
   1571	.get_parent = ext4_get_parent,
   1572	.commit_metadata = ext4_nfs_commit_metadata,
   1573};
   1574
   1575enum {
   1576	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
   1577	Opt_resgid, Opt_resuid, Opt_sb,
   1578	Opt_nouid32, Opt_debug, Opt_removed,
   1579	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
   1580	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
   1581	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
   1582	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
   1583	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
   1584	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
   1585	Opt_inlinecrypt,
   1586	Opt_usrjquota, Opt_grpjquota, Opt_quota,
   1587	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
   1588	Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version,
   1589	Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
   1590	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
   1591	Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_debug_want_extra_isize,
   1592	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
   1593	Opt_inode_readahead_blks, Opt_journal_ioprio,
   1594	Opt_dioread_nolock, Opt_dioread_lock,
   1595	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
   1596	Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
   1597	Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan,
   1598	Opt_errors, Opt_data, Opt_data_err, Opt_jqfmt, Opt_dax_type,
   1599#ifdef CONFIG_EXT4_DEBUG
   1600	Opt_fc_debug_max_replay, Opt_fc_debug_force
   1601#endif
   1602};
   1603
   1604static const struct constant_table ext4_param_errors[] = {
   1605	{"continue",	EXT4_MOUNT_ERRORS_CONT},
   1606	{"panic",	EXT4_MOUNT_ERRORS_PANIC},
   1607	{"remount-ro",	EXT4_MOUNT_ERRORS_RO},
   1608	{}
   1609};
   1610
   1611static const struct constant_table ext4_param_data[] = {
   1612	{"journal",	EXT4_MOUNT_JOURNAL_DATA},
   1613	{"ordered",	EXT4_MOUNT_ORDERED_DATA},
   1614	{"writeback",	EXT4_MOUNT_WRITEBACK_DATA},
   1615	{}
   1616};
   1617
   1618static const struct constant_table ext4_param_data_err[] = {
   1619	{"abort",	Opt_data_err_abort},
   1620	{"ignore",	Opt_data_err_ignore},
   1621	{}
   1622};
   1623
   1624static const struct constant_table ext4_param_jqfmt[] = {
   1625	{"vfsold",	QFMT_VFS_OLD},
   1626	{"vfsv0",	QFMT_VFS_V0},
   1627	{"vfsv1",	QFMT_VFS_V1},
   1628	{}
   1629};
   1630
   1631static const struct constant_table ext4_param_dax[] = {
   1632	{"always",	Opt_dax_always},
   1633	{"inode",	Opt_dax_inode},
   1634	{"never",	Opt_dax_never},
   1635	{}
   1636};
   1637
   1638/* String parameter that allows empty argument */
   1639#define fsparam_string_empty(NAME, OPT) \
   1640	__fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
   1641
   1642/*
   1643 * Mount option specification
   1644 * We don't use fsparam_flag_no because of the way we set the
   1645 * options and the way we show them in _ext4_show_options(). To
   1646 * keep the changes to a minimum, let's keep the negative options
   1647 * separate for now.
   1648 */
   1649static const struct fs_parameter_spec ext4_param_specs[] = {
   1650	fsparam_flag	("bsddf",		Opt_bsd_df),
   1651	fsparam_flag	("minixdf",		Opt_minix_df),
   1652	fsparam_flag	("grpid",		Opt_grpid),
   1653	fsparam_flag	("bsdgroups",		Opt_grpid),
   1654	fsparam_flag	("nogrpid",		Opt_nogrpid),
   1655	fsparam_flag	("sysvgroups",		Opt_nogrpid),
   1656	fsparam_u32	("resgid",		Opt_resgid),
   1657	fsparam_u32	("resuid",		Opt_resuid),
   1658	fsparam_u32	("sb",			Opt_sb),
   1659	fsparam_enum	("errors",		Opt_errors, ext4_param_errors),
   1660	fsparam_flag	("nouid32",		Opt_nouid32),
   1661	fsparam_flag	("debug",		Opt_debug),
   1662	fsparam_flag	("oldalloc",		Opt_removed),
   1663	fsparam_flag	("orlov",		Opt_removed),
   1664	fsparam_flag	("user_xattr",		Opt_user_xattr),
   1665	fsparam_flag	("nouser_xattr",	Opt_nouser_xattr),
   1666	fsparam_flag	("acl",			Opt_acl),
   1667	fsparam_flag	("noacl",		Opt_noacl),
   1668	fsparam_flag	("norecovery",		Opt_noload),
   1669	fsparam_flag	("noload",		Opt_noload),
   1670	fsparam_flag	("bh",			Opt_removed),
   1671	fsparam_flag	("nobh",		Opt_removed),
   1672	fsparam_u32	("commit",		Opt_commit),
   1673	fsparam_u32	("min_batch_time",	Opt_min_batch_time),
   1674	fsparam_u32	("max_batch_time",	Opt_max_batch_time),
   1675	fsparam_u32	("journal_dev",		Opt_journal_dev),
   1676	fsparam_bdev	("journal_path",	Opt_journal_path),
   1677	fsparam_flag	("journal_checksum",	Opt_journal_checksum),
   1678	fsparam_flag	("nojournal_checksum",	Opt_nojournal_checksum),
   1679	fsparam_flag	("journal_async_commit",Opt_journal_async_commit),
   1680	fsparam_flag	("abort",		Opt_abort),
   1681	fsparam_enum	("data",		Opt_data, ext4_param_data),
   1682	fsparam_enum	("data_err",		Opt_data_err,
   1683						ext4_param_data_err),
   1684	fsparam_string_empty
   1685			("usrjquota",		Opt_usrjquota),
   1686	fsparam_string_empty
   1687			("grpjquota",		Opt_grpjquota),
   1688	fsparam_enum	("jqfmt",		Opt_jqfmt, ext4_param_jqfmt),
   1689	fsparam_flag	("grpquota",		Opt_grpquota),
   1690	fsparam_flag	("quota",		Opt_quota),
   1691	fsparam_flag	("noquota",		Opt_noquota),
   1692	fsparam_flag	("usrquota",		Opt_usrquota),
   1693	fsparam_flag	("prjquota",		Opt_prjquota),
   1694	fsparam_flag	("barrier",		Opt_barrier),
   1695	fsparam_u32	("barrier",		Opt_barrier),
   1696	fsparam_flag	("nobarrier",		Opt_nobarrier),
   1697	fsparam_flag	("i_version",		Opt_i_version),
   1698	fsparam_flag	("dax",			Opt_dax),
   1699	fsparam_enum	("dax",			Opt_dax_type, ext4_param_dax),
   1700	fsparam_u32	("stripe",		Opt_stripe),
   1701	fsparam_flag	("delalloc",		Opt_delalloc),
   1702	fsparam_flag	("nodelalloc",		Opt_nodelalloc),
   1703	fsparam_flag	("warn_on_error",	Opt_warn_on_error),
   1704	fsparam_flag	("nowarn_on_error",	Opt_nowarn_on_error),
   1705	fsparam_u32	("debug_want_extra_isize",
   1706						Opt_debug_want_extra_isize),
   1707	fsparam_flag	("mblk_io_submit",	Opt_removed),
   1708	fsparam_flag	("nomblk_io_submit",	Opt_removed),
   1709	fsparam_flag	("block_validity",	Opt_block_validity),
   1710	fsparam_flag	("noblock_validity",	Opt_noblock_validity),
   1711	fsparam_u32	("inode_readahead_blks",
   1712						Opt_inode_readahead_blks),
   1713	fsparam_u32	("journal_ioprio",	Opt_journal_ioprio),
   1714	fsparam_u32	("auto_da_alloc",	Opt_auto_da_alloc),
   1715	fsparam_flag	("auto_da_alloc",	Opt_auto_da_alloc),
   1716	fsparam_flag	("noauto_da_alloc",	Opt_noauto_da_alloc),
   1717	fsparam_flag	("dioread_nolock",	Opt_dioread_nolock),
   1718	fsparam_flag	("nodioread_nolock",	Opt_dioread_lock),
   1719	fsparam_flag	("dioread_lock",	Opt_dioread_lock),
   1720	fsparam_flag	("discard",		Opt_discard),
   1721	fsparam_flag	("nodiscard",		Opt_nodiscard),
   1722	fsparam_u32	("init_itable",		Opt_init_itable),
   1723	fsparam_flag	("init_itable",		Opt_init_itable),
   1724	fsparam_flag	("noinit_itable",	Opt_noinit_itable),
   1725#ifdef CONFIG_EXT4_DEBUG
   1726	fsparam_flag	("fc_debug_force",	Opt_fc_debug_force),
   1727	fsparam_u32	("fc_debug_max_replay",	Opt_fc_debug_max_replay),
   1728#endif
   1729	fsparam_u32	("max_dir_size_kb",	Opt_max_dir_size_kb),
   1730	fsparam_flag	("test_dummy_encryption",
   1731						Opt_test_dummy_encryption),
   1732	fsparam_string	("test_dummy_encryption",
   1733						Opt_test_dummy_encryption),
   1734	fsparam_flag	("inlinecrypt",		Opt_inlinecrypt),
   1735	fsparam_flag	("nombcache",		Opt_nombcache),
   1736	fsparam_flag	("no_mbcache",		Opt_nombcache),	/* for backward compatibility */
   1737	fsparam_flag	("prefetch_block_bitmaps",
   1738						Opt_removed),
   1739	fsparam_flag	("no_prefetch_block_bitmaps",
   1740						Opt_no_prefetch_block_bitmaps),
   1741	fsparam_s32	("mb_optimize_scan",	Opt_mb_optimize_scan),
   1742	fsparam_string	("check",		Opt_removed),	/* mount option from ext2/3 */
   1743	fsparam_flag	("nocheck",		Opt_removed),	/* mount option from ext2/3 */
   1744	fsparam_flag	("reservation",		Opt_removed),	/* mount option from ext2/3 */
   1745	fsparam_flag	("noreservation",	Opt_removed),	/* mount option from ext2/3 */
   1746	fsparam_u32	("journal",		Opt_removed),	/* mount option from ext2/3 */
   1747	{}
   1748};
   1749
   1750#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
   1751
   1752static const char deprecated_msg[] =
   1753	"Mount option \"%s\" will be removed by %s\n"
   1754	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
   1755
   1756#define MOPT_SET	0x0001
   1757#define MOPT_CLEAR	0x0002
   1758#define MOPT_NOSUPPORT	0x0004
   1759#define MOPT_EXPLICIT	0x0008
   1760#ifdef CONFIG_QUOTA
   1761#define MOPT_Q		0
   1762#define MOPT_QFMT	0x0010
   1763#else
   1764#define MOPT_Q		MOPT_NOSUPPORT
   1765#define MOPT_QFMT	MOPT_NOSUPPORT
   1766#endif
   1767#define MOPT_NO_EXT2	0x0020
   1768#define MOPT_NO_EXT3	0x0040
   1769#define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
   1770#define MOPT_SKIP	0x0080
   1771#define	MOPT_2		0x0100
   1772
   1773static const struct mount_opts {
   1774	int	token;
   1775	int	mount_opt;
   1776	int	flags;
   1777} ext4_mount_opts[] = {
   1778	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
   1779	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
   1780	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
   1781	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
   1782	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
   1783	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
   1784	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
   1785	 MOPT_EXT4_ONLY | MOPT_SET},
   1786	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
   1787	 MOPT_EXT4_ONLY | MOPT_CLEAR},
   1788	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
   1789	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
   1790	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
   1791	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
   1792	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
   1793	 MOPT_EXT4_ONLY | MOPT_CLEAR},
   1794	{Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
   1795	{Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
   1796	{Opt_commit, 0, MOPT_NO_EXT2},
   1797	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
   1798	 MOPT_EXT4_ONLY | MOPT_CLEAR},
   1799	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
   1800	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
   1801	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
   1802				    EXT4_MOUNT_JOURNAL_CHECKSUM),
   1803	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
   1804	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
   1805	{Opt_data_err, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2},
   1806	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
   1807	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
   1808	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
   1809	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
   1810	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
   1811	{Opt_dax_type, 0, MOPT_EXT4_ONLY},
   1812	{Opt_journal_dev, 0, MOPT_NO_EXT2},
   1813	{Opt_journal_path, 0, MOPT_NO_EXT2},
   1814	{Opt_journal_ioprio, 0, MOPT_NO_EXT2},
   1815	{Opt_data, 0, MOPT_NO_EXT2},
   1816	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
   1817	{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
   1818#ifdef CONFIG_EXT4_FS_POSIX_ACL
   1819	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
   1820	{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
   1821#else
   1822	{Opt_acl, 0, MOPT_NOSUPPORT},
   1823	{Opt_noacl, 0, MOPT_NOSUPPORT},
   1824#endif
   1825	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
   1826	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
   1827	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
   1828	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
   1829							MOPT_SET | MOPT_Q},
   1830	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
   1831							MOPT_SET | MOPT_Q},
   1832	{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
   1833							MOPT_SET | MOPT_Q},
   1834	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
   1835		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
   1836							MOPT_CLEAR | MOPT_Q},
   1837	{Opt_usrjquota, 0, MOPT_Q},
   1838	{Opt_grpjquota, 0, MOPT_Q},
   1839	{Opt_jqfmt, 0, MOPT_QFMT},
   1840	{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
   1841	{Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS,
   1842	 MOPT_SET},
   1843#ifdef CONFIG_EXT4_DEBUG
   1844	{Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
   1845	 MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
   1846#endif
   1847	{Opt_err, 0, 0}
   1848};
   1849
   1850#if IS_ENABLED(CONFIG_UNICODE)
   1851static const struct ext4_sb_encodings {
   1852	__u16 magic;
   1853	char *name;
   1854	unsigned int version;
   1855} ext4_sb_encoding_map[] = {
   1856	{EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
   1857};
   1858
   1859static const struct ext4_sb_encodings *
   1860ext4_sb_read_encoding(const struct ext4_super_block *es)
   1861{
   1862	__u16 magic = le16_to_cpu(es->s_encoding);
   1863	int i;
   1864
   1865	for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
   1866		if (magic == ext4_sb_encoding_map[i].magic)
   1867			return &ext4_sb_encoding_map[i];
   1868
   1869	return NULL;
   1870}
   1871#endif
   1872
   1873#define EXT4_SPEC_JQUOTA			(1 <<  0)
   1874#define EXT4_SPEC_JQFMT				(1 <<  1)
   1875#define EXT4_SPEC_DATAJ				(1 <<  2)
   1876#define EXT4_SPEC_SB_BLOCK			(1 <<  3)
   1877#define EXT4_SPEC_JOURNAL_DEV			(1 <<  4)
   1878#define EXT4_SPEC_JOURNAL_IOPRIO		(1 <<  5)
   1879#define EXT4_SPEC_s_want_extra_isize		(1 <<  7)
   1880#define EXT4_SPEC_s_max_batch_time		(1 <<  8)
   1881#define EXT4_SPEC_s_min_batch_time		(1 <<  9)
   1882#define EXT4_SPEC_s_inode_readahead_blks	(1 << 10)
   1883#define EXT4_SPEC_s_li_wait_mult		(1 << 11)
   1884#define EXT4_SPEC_s_max_dir_size_kb		(1 << 12)
   1885#define EXT4_SPEC_s_stripe			(1 << 13)
   1886#define EXT4_SPEC_s_resuid			(1 << 14)
   1887#define EXT4_SPEC_s_resgid			(1 << 15)
   1888#define EXT4_SPEC_s_commit_interval		(1 << 16)
   1889#define EXT4_SPEC_s_fc_debug_max_replay		(1 << 17)
   1890#define EXT4_SPEC_s_sb_block			(1 << 18)
   1891#define EXT4_SPEC_mb_optimize_scan		(1 << 19)
   1892
   1893struct ext4_fs_context {
   1894	char		*s_qf_names[EXT4_MAXQUOTAS];
   1895	struct fscrypt_dummy_policy dummy_enc_policy;
   1896	int		s_jquota_fmt;	/* Format of quota to use */
   1897#ifdef CONFIG_EXT4_DEBUG
   1898	int s_fc_debug_max_replay;
   1899#endif
   1900	unsigned short	qname_spec;
   1901	unsigned long	vals_s_flags;	/* Bits to set in s_flags */
   1902	unsigned long	mask_s_flags;	/* Bits changed in s_flags */
   1903	unsigned long	journal_devnum;
   1904	unsigned long	s_commit_interval;
   1905	unsigned long	s_stripe;
   1906	unsigned int	s_inode_readahead_blks;
   1907	unsigned int	s_want_extra_isize;
   1908	unsigned int	s_li_wait_mult;
   1909	unsigned int	s_max_dir_size_kb;
   1910	unsigned int	journal_ioprio;
   1911	unsigned int	vals_s_mount_opt;
   1912	unsigned int	mask_s_mount_opt;
   1913	unsigned int	vals_s_mount_opt2;
   1914	unsigned int	mask_s_mount_opt2;
   1915	unsigned long	vals_s_mount_flags;
   1916	unsigned long	mask_s_mount_flags;
   1917	unsigned int	opt_flags;	/* MOPT flags */
   1918	unsigned int	spec;
   1919	u32		s_max_batch_time;
   1920	u32		s_min_batch_time;
   1921	kuid_t		s_resuid;
   1922	kgid_t		s_resgid;
   1923	ext4_fsblk_t	s_sb_block;
   1924};
   1925
   1926static void ext4_fc_free(struct fs_context *fc)
   1927{
   1928	struct ext4_fs_context *ctx = fc->fs_private;
   1929	int i;
   1930
   1931	if (!ctx)
   1932		return;
   1933
   1934	for (i = 0; i < EXT4_MAXQUOTAS; i++)
   1935		kfree(ctx->s_qf_names[i]);
   1936
   1937	fscrypt_free_dummy_policy(&ctx->dummy_enc_policy);
   1938	kfree(ctx);
   1939}
   1940
   1941int ext4_init_fs_context(struct fs_context *fc)
   1942{
   1943	struct ext4_fs_context *ctx;
   1944
   1945	ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL);
   1946	if (!ctx)
   1947		return -ENOMEM;
   1948
   1949	fc->fs_private = ctx;
   1950	fc->ops = &ext4_context_ops;
   1951
   1952	return 0;
   1953}
   1954
   1955#ifdef CONFIG_QUOTA
   1956/*
   1957 * Note the name of the specified quota file.
   1958 */
   1959static int note_qf_name(struct fs_context *fc, int qtype,
   1960		       struct fs_parameter *param)
   1961{
   1962	struct ext4_fs_context *ctx = fc->fs_private;
   1963	char *qname;
   1964
   1965	if (param->size < 1) {
   1966		ext4_msg(NULL, KERN_ERR, "Missing quota name");
   1967		return -EINVAL;
   1968	}
   1969	if (strchr(param->string, '/')) {
   1970		ext4_msg(NULL, KERN_ERR,
   1971			 "quotafile must be on filesystem root");
   1972		return -EINVAL;
   1973	}
   1974	if (ctx->s_qf_names[qtype]) {
   1975		if (strcmp(ctx->s_qf_names[qtype], param->string) != 0) {
   1976			ext4_msg(NULL, KERN_ERR,
   1977				 "%s quota file already specified",
   1978				 QTYPE2NAME(qtype));
   1979			return -EINVAL;
   1980		}
   1981		return 0;
   1982	}
   1983
   1984	qname = kmemdup_nul(param->string, param->size, GFP_KERNEL);
   1985	if (!qname) {
   1986		ext4_msg(NULL, KERN_ERR,
   1987			 "Not enough memory for storing quotafile name");
   1988		return -ENOMEM;
   1989	}
   1990	ctx->s_qf_names[qtype] = qname;
   1991	ctx->qname_spec |= 1 << qtype;
   1992	ctx->spec |= EXT4_SPEC_JQUOTA;
   1993	return 0;
   1994}
   1995
   1996/*
   1997 * Clear the name of the specified quota file.
   1998 */
   1999static int unnote_qf_name(struct fs_context *fc, int qtype)
   2000{
   2001	struct ext4_fs_context *ctx = fc->fs_private;
   2002
   2003	if (ctx->s_qf_names[qtype])
   2004		kfree(ctx->s_qf_names[qtype]);
   2005
   2006	ctx->s_qf_names[qtype] = NULL;
   2007	ctx->qname_spec |= 1 << qtype;
   2008	ctx->spec |= EXT4_SPEC_JQUOTA;
   2009	return 0;
   2010}
   2011#endif
   2012
   2013static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param,
   2014					    struct ext4_fs_context *ctx)
   2015{
   2016	int err;
   2017
   2018	if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
   2019		ext4_msg(NULL, KERN_WARNING,
   2020			 "test_dummy_encryption option not supported");
   2021		return -EINVAL;
   2022	}
   2023	err = fscrypt_parse_test_dummy_encryption(param,
   2024						  &ctx->dummy_enc_policy);
   2025	if (err == -EINVAL) {
   2026		ext4_msg(NULL, KERN_WARNING,
   2027			 "Value of option \"%s\" is unrecognized", param->key);
   2028	} else if (err == -EEXIST) {
   2029		ext4_msg(NULL, KERN_WARNING,
   2030			 "Conflicting test_dummy_encryption options");
   2031		return -EINVAL;
   2032	}
   2033	return err;
   2034}
   2035
   2036#define EXT4_SET_CTX(name)						\
   2037static inline void ctx_set_##name(struct ext4_fs_context *ctx,		\
   2038				  unsigned long flag)			\
   2039{									\
   2040	ctx->mask_s_##name |= flag;					\
   2041	ctx->vals_s_##name |= flag;					\
   2042}
   2043
   2044#define EXT4_CLEAR_CTX(name)						\
   2045static inline void ctx_clear_##name(struct ext4_fs_context *ctx,	\
   2046				    unsigned long flag)			\
   2047{									\
   2048	ctx->mask_s_##name |= flag;					\
   2049	ctx->vals_s_##name &= ~flag;					\
   2050}
   2051
   2052#define EXT4_TEST_CTX(name)						\
   2053static inline unsigned long						\
   2054ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag)	\
   2055{									\
   2056	return (ctx->vals_s_##name & flag);				\
   2057}
   2058
   2059EXT4_SET_CTX(flags); /* set only */
   2060EXT4_SET_CTX(mount_opt);
   2061EXT4_CLEAR_CTX(mount_opt);
   2062EXT4_TEST_CTX(mount_opt);
   2063EXT4_SET_CTX(mount_opt2);
   2064EXT4_CLEAR_CTX(mount_opt2);
   2065EXT4_TEST_CTX(mount_opt2);
   2066
   2067static inline void ctx_set_mount_flag(struct ext4_fs_context *ctx, int bit)
   2068{
   2069	set_bit(bit, &ctx->mask_s_mount_flags);
   2070	set_bit(bit, &ctx->vals_s_mount_flags);
   2071}
   2072
   2073static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
   2074{
   2075	struct ext4_fs_context *ctx = fc->fs_private;
   2076	struct fs_parse_result result;
   2077	const struct mount_opts *m;
   2078	int is_remount;
   2079	kuid_t uid;
   2080	kgid_t gid;
   2081	int token;
   2082
   2083	token = fs_parse(fc, ext4_param_specs, param, &result);
   2084	if (token < 0)
   2085		return token;
   2086	is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
   2087
   2088	for (m = ext4_mount_opts; m->token != Opt_err; m++)
   2089		if (token == m->token)
   2090			break;
   2091
   2092	ctx->opt_flags |= m->flags;
   2093
   2094	if (m->flags & MOPT_EXPLICIT) {
   2095		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
   2096			ctx_set_mount_opt2(ctx, EXT4_MOUNT2_EXPLICIT_DELALLOC);
   2097		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
   2098			ctx_set_mount_opt2(ctx,
   2099				       EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM);
   2100		} else
   2101			return -EINVAL;
   2102	}
   2103
   2104	if (m->flags & MOPT_NOSUPPORT) {
   2105		ext4_msg(NULL, KERN_ERR, "%s option not supported",
   2106			 param->key);
   2107		return 0;
   2108	}
   2109
   2110	switch (token) {
   2111#ifdef CONFIG_QUOTA
   2112	case Opt_usrjquota:
   2113		if (!*param->string)
   2114			return unnote_qf_name(fc, USRQUOTA);
   2115		else
   2116			return note_qf_name(fc, USRQUOTA, param);
   2117	case Opt_grpjquota:
   2118		if (!*param->string)
   2119			return unnote_qf_name(fc, GRPQUOTA);
   2120		else
   2121			return note_qf_name(fc, GRPQUOTA, param);
   2122#endif
   2123	case Opt_noacl:
   2124	case Opt_nouser_xattr:
   2125		ext4_msg(NULL, KERN_WARNING, deprecated_msg, param->key, "3.5");
   2126		break;
   2127	case Opt_sb:
   2128		if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
   2129			ext4_msg(NULL, KERN_WARNING,
   2130				 "Ignoring %s option on remount", param->key);
   2131		} else {
   2132			ctx->s_sb_block = result.uint_32;
   2133			ctx->spec |= EXT4_SPEC_s_sb_block;
   2134		}
   2135		return 0;
   2136	case Opt_removed:
   2137		ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option",
   2138			 param->key);
   2139		return 0;
   2140	case Opt_abort:
   2141		ctx_set_mount_flag(ctx, EXT4_MF_FS_ABORTED);
   2142		return 0;
   2143	case Opt_i_version:
   2144		ext4_msg(NULL, KERN_WARNING, deprecated_msg, param->key, "5.20");
   2145		ext4_msg(NULL, KERN_WARNING, "Use iversion instead\n");
   2146		ctx_set_flags(ctx, SB_I_VERSION);
   2147		return 0;
   2148	case Opt_inlinecrypt:
   2149#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
   2150		ctx_set_flags(ctx, SB_INLINECRYPT);
   2151#else
   2152		ext4_msg(NULL, KERN_ERR, "inline encryption not supported");
   2153#endif
   2154		return 0;
   2155	case Opt_errors:
   2156		ctx_clear_mount_opt(ctx, EXT4_MOUNT_ERRORS_MASK);
   2157		ctx_set_mount_opt(ctx, result.uint_32);
   2158		return 0;
   2159#ifdef CONFIG_QUOTA
   2160	case Opt_jqfmt:
   2161		ctx->s_jquota_fmt = result.uint_32;
   2162		ctx->spec |= EXT4_SPEC_JQFMT;
   2163		return 0;
   2164#endif
   2165	case Opt_data:
   2166		ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS);
   2167		ctx_set_mount_opt(ctx, result.uint_32);
   2168		ctx->spec |= EXT4_SPEC_DATAJ;
   2169		return 0;
   2170	case Opt_commit:
   2171		if (result.uint_32 == 0)
   2172			ctx->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE;
   2173		else if (result.uint_32 > INT_MAX / HZ) {
   2174			ext4_msg(NULL, KERN_ERR,
   2175				 "Invalid commit interval %d, "
   2176				 "must be smaller than %d",
   2177				 result.uint_32, INT_MAX / HZ);
   2178			return -EINVAL;
   2179		}
   2180		ctx->s_commit_interval = HZ * result.uint_32;
   2181		ctx->spec |= EXT4_SPEC_s_commit_interval;
   2182		return 0;
   2183	case Opt_debug_want_extra_isize:
   2184		if ((result.uint_32 & 1) || (result.uint_32 < 4)) {
   2185			ext4_msg(NULL, KERN_ERR,
   2186				 "Invalid want_extra_isize %d", result.uint_32);
   2187			return -EINVAL;
   2188		}
   2189		ctx->s_want_extra_isize = result.uint_32;
   2190		ctx->spec |= EXT4_SPEC_s_want_extra_isize;
   2191		return 0;
   2192	case Opt_max_batch_time:
   2193		ctx->s_max_batch_time = result.uint_32;
   2194		ctx->spec |= EXT4_SPEC_s_max_batch_time;
   2195		return 0;
   2196	case Opt_min_batch_time:
   2197		ctx->s_min_batch_time = result.uint_32;
   2198		ctx->spec |= EXT4_SPEC_s_min_batch_time;
   2199		return 0;
   2200	case Opt_inode_readahead_blks:
   2201		if (result.uint_32 &&
   2202		    (result.uint_32 > (1 << 30) ||
   2203		     !is_power_of_2(result.uint_32))) {
   2204			ext4_msg(NULL, KERN_ERR,
   2205				 "EXT4-fs: inode_readahead_blks must be "
   2206				 "0 or a power of 2 smaller than 2^31");
   2207			return -EINVAL;
   2208		}
   2209		ctx->s_inode_readahead_blks = result.uint_32;
   2210		ctx->spec |= EXT4_SPEC_s_inode_readahead_blks;
   2211		return 0;
   2212	case Opt_init_itable:
   2213		ctx_set_mount_opt(ctx, EXT4_MOUNT_INIT_INODE_TABLE);
   2214		ctx->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
   2215		if (param->type == fs_value_is_string)
   2216			ctx->s_li_wait_mult = result.uint_32;
   2217		ctx->spec |= EXT4_SPEC_s_li_wait_mult;
   2218		return 0;
   2219	case Opt_max_dir_size_kb:
   2220		ctx->s_max_dir_size_kb = result.uint_32;
   2221		ctx->spec |= EXT4_SPEC_s_max_dir_size_kb;
   2222		return 0;
   2223#ifdef CONFIG_EXT4_DEBUG
   2224	case Opt_fc_debug_max_replay:
   2225		ctx->s_fc_debug_max_replay = result.uint_32;
   2226		ctx->spec |= EXT4_SPEC_s_fc_debug_max_replay;
   2227		return 0;
   2228#endif
   2229	case Opt_stripe:
   2230		ctx->s_stripe = result.uint_32;
   2231		ctx->spec |= EXT4_SPEC_s_stripe;
   2232		return 0;
   2233	case Opt_resuid:
   2234		uid = make_kuid(current_user_ns(), result.uint_32);
   2235		if (!uid_valid(uid)) {
   2236			ext4_msg(NULL, KERN_ERR, "Invalid uid value %d",
   2237				 result.uint_32);
   2238			return -EINVAL;
   2239		}
   2240		ctx->s_resuid = uid;
   2241		ctx->spec |= EXT4_SPEC_s_resuid;
   2242		return 0;
   2243	case Opt_resgid:
   2244		gid = make_kgid(current_user_ns(), result.uint_32);
   2245		if (!gid_valid(gid)) {
   2246			ext4_msg(NULL, KERN_ERR, "Invalid gid value %d",
   2247				 result.uint_32);
   2248			return -EINVAL;
   2249		}
   2250		ctx->s_resgid = gid;
   2251		ctx->spec |= EXT4_SPEC_s_resgid;
   2252		return 0;
   2253	case Opt_journal_dev:
   2254		if (is_remount) {
   2255			ext4_msg(NULL, KERN_ERR,
   2256				 "Cannot specify journal on remount");
   2257			return -EINVAL;
   2258		}
   2259		ctx->journal_devnum = result.uint_32;
   2260		ctx->spec |= EXT4_SPEC_JOURNAL_DEV;
   2261		return 0;
   2262	case Opt_journal_path:
   2263	{
   2264		struct inode *journal_inode;
   2265		struct path path;
   2266		int error;
   2267
   2268		if (is_remount) {
   2269			ext4_msg(NULL, KERN_ERR,
   2270				 "Cannot specify journal on remount");
   2271			return -EINVAL;
   2272		}
   2273
   2274		error = fs_lookup_param(fc, param, 1, &path);
   2275		if (error) {
   2276			ext4_msg(NULL, KERN_ERR, "error: could not find "
   2277				 "journal device path");
   2278			return -EINVAL;
   2279		}
   2280
   2281		journal_inode = d_inode(path.dentry);
   2282		ctx->journal_devnum = new_encode_dev(journal_inode->i_rdev);
   2283		ctx->spec |= EXT4_SPEC_JOURNAL_DEV;
   2284		path_put(&path);
   2285		return 0;
   2286	}
   2287	case Opt_journal_ioprio:
   2288		if (result.uint_32 > 7) {
   2289			ext4_msg(NULL, KERN_ERR, "Invalid journal IO priority"
   2290				 " (must be 0-7)");
   2291			return -EINVAL;
   2292		}
   2293		ctx->journal_ioprio =
   2294			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, result.uint_32);
   2295		ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO;
   2296		return 0;
   2297	case Opt_test_dummy_encryption:
   2298		return ext4_parse_test_dummy_encryption(param, ctx);
   2299	case Opt_dax:
   2300	case Opt_dax_type:
   2301#ifdef CONFIG_FS_DAX
   2302	{
   2303		int type = (token == Opt_dax) ?
   2304			   Opt_dax : result.uint_32;
   2305
   2306		switch (type) {
   2307		case Opt_dax:
   2308		case Opt_dax_always:
   2309			ctx_set_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
   2310			ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
   2311			break;
   2312		case Opt_dax_never:
   2313			ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
   2314			ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
   2315			break;
   2316		case Opt_dax_inode:
   2317			ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
   2318			ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
   2319			/* Strictly for printing options */
   2320			ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE);
   2321			break;
   2322		}
   2323		return 0;
   2324	}
   2325#else
   2326		ext4_msg(NULL, KERN_INFO, "dax option not supported");
   2327		return -EINVAL;
   2328#endif
   2329	case Opt_data_err:
   2330		if (result.uint_32 == Opt_data_err_abort)
   2331			ctx_set_mount_opt(ctx, m->mount_opt);
   2332		else if (result.uint_32 == Opt_data_err_ignore)
   2333			ctx_clear_mount_opt(ctx, m->mount_opt);
   2334		return 0;
   2335	case Opt_mb_optimize_scan:
   2336		if (result.int_32 == 1) {
   2337			ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
   2338			ctx->spec |= EXT4_SPEC_mb_optimize_scan;
   2339		} else if (result.int_32 == 0) {
   2340			ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
   2341			ctx->spec |= EXT4_SPEC_mb_optimize_scan;
   2342		} else {
   2343			ext4_msg(NULL, KERN_WARNING,
   2344				 "mb_optimize_scan should be set to 0 or 1.");
   2345			return -EINVAL;
   2346		}
   2347		return 0;
   2348	}
   2349
   2350	/*
   2351	 * At this point we should only be getting options requiring MOPT_SET,
   2352	 * or MOPT_CLEAR. Anything else is a bug
   2353	 */
   2354	if (m->token == Opt_err) {
   2355		ext4_msg(NULL, KERN_WARNING, "buggy handling of option %s",
   2356			 param->key);
   2357		WARN_ON(1);
   2358		return -EINVAL;
   2359	}
   2360
   2361	else {
   2362		unsigned int set = 0;
   2363
   2364		if ((param->type == fs_value_is_flag) ||
   2365		    result.uint_32 > 0)
   2366			set = 1;
   2367
   2368		if (m->flags & MOPT_CLEAR)
   2369			set = !set;
   2370		else if (unlikely(!(m->flags & MOPT_SET))) {
   2371			ext4_msg(NULL, KERN_WARNING,
   2372				 "buggy handling of option %s",
   2373				 param->key);
   2374			WARN_ON(1);
   2375			return -EINVAL;
   2376		}
   2377		if (m->flags & MOPT_2) {
   2378			if (set != 0)
   2379				ctx_set_mount_opt2(ctx, m->mount_opt);
   2380			else
   2381				ctx_clear_mount_opt2(ctx, m->mount_opt);
   2382		} else {
   2383			if (set != 0)
   2384				ctx_set_mount_opt(ctx, m->mount_opt);
   2385			else
   2386				ctx_clear_mount_opt(ctx, m->mount_opt);
   2387		}
   2388	}
   2389
   2390	return 0;
   2391}
   2392
   2393static int parse_options(struct fs_context *fc, char *options)
   2394{
   2395	struct fs_parameter param;
   2396	int ret;
   2397	char *key;
   2398
   2399	if (!options)
   2400		return 0;
   2401
   2402	while ((key = strsep(&options, ",")) != NULL) {
   2403		if (*key) {
   2404			size_t v_len = 0;
   2405			char *value = strchr(key, '=');
   2406
   2407			param.type = fs_value_is_flag;
   2408			param.string = NULL;
   2409
   2410			if (value) {
   2411				if (value == key)
   2412					continue;
   2413
   2414				*value++ = 0;
   2415				v_len = strlen(value);
   2416				param.string = kmemdup_nul(value, v_len,
   2417							   GFP_KERNEL);
   2418				if (!param.string)
   2419					return -ENOMEM;
   2420				param.type = fs_value_is_string;
   2421			}
   2422
   2423			param.key = key;
   2424			param.size = v_len;
   2425
   2426			ret = ext4_parse_param(fc, &param);
   2427			if (param.string)
   2428				kfree(param.string);
   2429			if (ret < 0)
   2430				return ret;
   2431		}
   2432	}
   2433
   2434	ret = ext4_validate_options(fc);
   2435	if (ret < 0)
   2436		return ret;
   2437
   2438	return 0;
   2439}
   2440
   2441static int parse_apply_sb_mount_options(struct super_block *sb,
   2442					struct ext4_fs_context *m_ctx)
   2443{
   2444	struct ext4_sb_info *sbi = EXT4_SB(sb);
   2445	char *s_mount_opts = NULL;
   2446	struct ext4_fs_context *s_ctx = NULL;
   2447	struct fs_context *fc = NULL;
   2448	int ret = -ENOMEM;
   2449
   2450	if (!sbi->s_es->s_mount_opts[0])
   2451		return 0;
   2452
   2453	s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
   2454				sizeof(sbi->s_es->s_mount_opts),
   2455				GFP_KERNEL);
   2456	if (!s_mount_opts)
   2457		return ret;
   2458
   2459	fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL);
   2460	if (!fc)
   2461		goto out_free;
   2462
   2463	s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL);
   2464	if (!s_ctx)
   2465		goto out_free;
   2466
   2467	fc->fs_private = s_ctx;
   2468	fc->s_fs_info = sbi;
   2469
   2470	ret = parse_options(fc, s_mount_opts);
   2471	if (ret < 0)
   2472		goto parse_failed;
   2473
   2474	ret = ext4_check_opt_consistency(fc, sb);
   2475	if (ret < 0) {
   2476parse_failed:
   2477		ext4_msg(sb, KERN_WARNING,
   2478			 "failed to parse options in superblock: %s",
   2479			 s_mount_opts);
   2480		ret = 0;
   2481		goto out_free;
   2482	}
   2483
   2484	if (s_ctx->spec & EXT4_SPEC_JOURNAL_DEV)
   2485		m_ctx->journal_devnum = s_ctx->journal_devnum;
   2486	if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)
   2487		m_ctx->journal_ioprio = s_ctx->journal_ioprio;
   2488
   2489	ext4_apply_options(fc, sb);
   2490	ret = 0;
   2491
   2492out_free:
   2493	if (fc) {
   2494		ext4_fc_free(fc);
   2495		kfree(fc);
   2496	}
   2497	kfree(s_mount_opts);
   2498	return ret;
   2499}
   2500
   2501static void ext4_apply_quota_options(struct fs_context *fc,
   2502				     struct super_block *sb)
   2503{
   2504#ifdef CONFIG_QUOTA
   2505	bool quota_feature = ext4_has_feature_quota(sb);
   2506	struct ext4_fs_context *ctx = fc->fs_private;
   2507	struct ext4_sb_info *sbi = EXT4_SB(sb);
   2508	char *qname;
   2509	int i;
   2510
   2511	if (quota_feature)
   2512		return;
   2513
   2514	if (ctx->spec & EXT4_SPEC_JQUOTA) {
   2515		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
   2516			if (!(ctx->qname_spec & (1 << i)))
   2517				continue;
   2518
   2519			qname = ctx->s_qf_names[i]; /* May be NULL */
   2520			if (qname)
   2521				set_opt(sb, QUOTA);
   2522			ctx->s_qf_names[i] = NULL;
   2523			qname = rcu_replace_pointer(sbi->s_qf_names[i], qname,
   2524						lockdep_is_held(&sb->s_umount));
   2525			if (qname)
   2526				kfree_rcu(qname);
   2527		}
   2528	}
   2529
   2530	if (ctx->spec & EXT4_SPEC_JQFMT)
   2531		sbi->s_jquota_fmt = ctx->s_jquota_fmt;
   2532#endif
   2533}
   2534
   2535/*
   2536 * Check quota settings consistency.
   2537 */
   2538static int ext4_check_quota_consistency(struct fs_context *fc,
   2539					struct super_block *sb)
   2540{
   2541#ifdef CONFIG_QUOTA
   2542	struct ext4_fs_context *ctx = fc->fs_private;
   2543	struct ext4_sb_info *sbi = EXT4_SB(sb);
   2544	bool quota_feature = ext4_has_feature_quota(sb);
   2545	bool quota_loaded = sb_any_quota_loaded(sb);
   2546	bool usr_qf_name, grp_qf_name, usrquota, grpquota;
   2547	int quota_flags, i;
   2548
   2549	/*
   2550	 * We do the test below only for project quotas. 'usrquota' and
   2551	 * 'grpquota' mount options are allowed even without quota feature
   2552	 * to support legacy quotas in quota files.
   2553	 */
   2554	if (ctx_test_mount_opt(ctx, EXT4_MOUNT_PRJQUOTA) &&
   2555	    !ext4_has_feature_project(sb)) {
   2556		ext4_msg(NULL, KERN_ERR, "Project quota feature not enabled. "
   2557			 "Cannot enable project quota enforcement.");
   2558		return -EINVAL;
   2559	}
   2560
   2561	quota_flags = EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
   2562		      EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA;
   2563	if (quota_loaded &&
   2564	    ctx->mask_s_mount_opt & quota_flags &&
   2565	    !ctx_test_mount_opt(ctx, quota_flags))
   2566		goto err_quota_change;
   2567
   2568	if (ctx->spec & EXT4_SPEC_JQUOTA) {
   2569
   2570		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
   2571			if (!(ctx->qname_spec & (1 << i)))
   2572				continue;
   2573
   2574			if (quota_loaded &&
   2575			    !!sbi->s_qf_names[i] != !!ctx->s_qf_names[i])
   2576				goto err_jquota_change;
   2577
   2578			if (sbi->s_qf_names[i] && ctx->s_qf_names[i] &&
   2579			    strcmp(get_qf_name(sb, sbi, i),
   2580				   ctx->s_qf_names[i]) != 0)
   2581				goto err_jquota_specified;
   2582		}
   2583
   2584		if (quota_feature) {
   2585			ext4_msg(NULL, KERN_INFO,
   2586				 "Journaled quota options ignored when "
   2587				 "QUOTA feature is enabled");
   2588			return 0;
   2589		}
   2590	}
   2591
   2592	if (ctx->spec & EXT4_SPEC_JQFMT) {
   2593		if (sbi->s_jquota_fmt != ctx->s_jquota_fmt && quota_loaded)
   2594			goto err_jquota_change;
   2595		if (quota_feature) {
   2596			ext4_msg(NULL, KERN_INFO, "Quota format mount options "
   2597				 "ignored when QUOTA feature is enabled");
   2598			return 0;
   2599		}
   2600	}
   2601
   2602	/* Make sure we don't mix old and new quota format */
   2603	usr_qf_name = (get_qf_name(sb, sbi, USRQUOTA) ||
   2604		       ctx->s_qf_names[USRQUOTA]);
   2605	grp_qf_name = (get_qf_name(sb, sbi, GRPQUOTA) ||
   2606		       ctx->s_qf_names[GRPQUOTA]);
   2607
   2608	usrquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) ||
   2609		    test_opt(sb, USRQUOTA));
   2610
   2611	grpquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) ||
   2612		    test_opt(sb, GRPQUOTA));
   2613
   2614	if (usr_qf_name) {
   2615		ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA);
   2616		usrquota = false;
   2617	}
   2618	if (grp_qf_name) {
   2619		ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA);
   2620		grpquota = false;
   2621	}
   2622
   2623	if (usr_qf_name || grp_qf_name) {
   2624		if (usrquota || grpquota) {
   2625			ext4_msg(NULL, KERN_ERR, "old and new quota "
   2626				 "format mixing");
   2627			return -EINVAL;
   2628		}
   2629
   2630		if (!(ctx->spec & EXT4_SPEC_JQFMT || sbi->s_jquota_fmt)) {
   2631			ext4_msg(NULL, KERN_ERR, "journaled quota format "
   2632				 "not specified");
   2633			return -EINVAL;
   2634		}
   2635	}
   2636
   2637	return 0;
   2638
   2639err_quota_change:
   2640	ext4_msg(NULL, KERN_ERR,
   2641		 "Cannot change quota options when quota turned on");
   2642	return -EINVAL;
   2643err_jquota_change:
   2644	ext4_msg(NULL, KERN_ERR, "Cannot change journaled quota "
   2645		 "options when quota turned on");
   2646	return -EINVAL;
   2647err_jquota_specified:
   2648	ext4_msg(NULL, KERN_ERR, "%s quota file already specified",
   2649		 QTYPE2NAME(i));
   2650	return -EINVAL;
   2651#else
   2652	return 0;
   2653#endif
   2654}
   2655
   2656static int ext4_check_test_dummy_encryption(const struct fs_context *fc,
   2657					    struct super_block *sb)
   2658{
   2659	const struct ext4_fs_context *ctx = fc->fs_private;
   2660	const struct ext4_sb_info *sbi = EXT4_SB(sb);
   2661	int err;
   2662
   2663	if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy))
   2664		return 0;
   2665
   2666	if (!ext4_has_feature_encrypt(sb)) {
   2667		ext4_msg(NULL, KERN_WARNING,
   2668			 "test_dummy_encryption requires encrypt feature");
   2669		return -EINVAL;
   2670	}
   2671	/*
   2672	 * This mount option is just for testing, and it's not worthwhile to
   2673	 * implement the extra complexity (e.g. RCU protection) that would be
   2674	 * needed to allow it to be set or changed during remount.  We do allow
   2675	 * it to be specified during remount, but only if there is no change.
   2676	 */
   2677	if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
   2678		if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
   2679						 &ctx->dummy_enc_policy))
   2680			return 0;
   2681		ext4_msg(NULL, KERN_WARNING,
   2682			 "Can't set or change test_dummy_encryption on remount");
   2683		return -EINVAL;
   2684	}
   2685	/* Also make sure s_mount_opts didn't contain a conflicting value. */
   2686	if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) {
   2687		if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
   2688						 &ctx->dummy_enc_policy))
   2689			return 0;
   2690		ext4_msg(NULL, KERN_WARNING,
   2691			 "Conflicting test_dummy_encryption options");
   2692		return -EINVAL;
   2693	}
   2694	/*
   2695	 * fscrypt_add_test_dummy_key() technically changes the super_block, so
   2696	 * technically it should be delayed until ext4_apply_options() like the
   2697	 * other changes.  But since we never get here for remounts (see above),
   2698	 * and this is the last chance to report errors, we do it here.
   2699	 */
   2700	err = fscrypt_add_test_dummy_key(sb, &ctx->dummy_enc_policy);
   2701	if (err)
   2702		ext4_msg(NULL, KERN_WARNING,
   2703			 "Error adding test dummy encryption key [%d]", err);
   2704	return err;
   2705}
   2706
   2707static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx,
   2708					     struct super_block *sb)
   2709{
   2710	if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) ||
   2711	    /* if already set, it was already verified to be the same */
   2712	    fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy))
   2713		return;
   2714	EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy;
   2715	memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy));
   2716	ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
   2717}
   2718
   2719static int ext4_check_opt_consistency(struct fs_context *fc,
   2720				      struct super_block *sb)
   2721{
   2722	struct ext4_fs_context *ctx = fc->fs_private;
   2723	struct ext4_sb_info *sbi = fc->s_fs_info;
   2724	int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
   2725	int err;
   2726
   2727	if ((ctx->opt_flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
   2728		ext4_msg(NULL, KERN_ERR,
   2729			 "Mount option(s) incompatible with ext2");
   2730		return -EINVAL;
   2731	}
   2732	if ((ctx->opt_flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
   2733		ext4_msg(NULL, KERN_ERR,
   2734			 "Mount option(s) incompatible with ext3");
   2735		return -EINVAL;
   2736	}
   2737
   2738	if (ctx->s_want_extra_isize >
   2739	    (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE)) {
   2740		ext4_msg(NULL, KERN_ERR,
   2741			 "Invalid want_extra_isize %d",
   2742			 ctx->s_want_extra_isize);
   2743		return -EINVAL;
   2744	}
   2745
   2746	if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DIOREAD_NOLOCK)) {
   2747		int blocksize =
   2748			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
   2749		if (blocksize < PAGE_SIZE)
   2750			ext4_msg(NULL, KERN_WARNING, "Warning: mounting with an "
   2751				 "experimental mount option 'dioread_nolock' "
   2752				 "for blocksize < PAGE_SIZE");
   2753	}
   2754
   2755	err = ext4_check_test_dummy_encryption(fc, sb);
   2756	if (err)
   2757		return err;
   2758
   2759	if ((ctx->spec & EXT4_SPEC_DATAJ) && is_remount) {
   2760		if (!sbi->s_journal) {
   2761			ext4_msg(NULL, KERN_WARNING,
   2762				 "Remounting file system with no journal "
   2763				 "so ignoring journalled data option");
   2764			ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS);
   2765		} else if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS) !=
   2766			   test_opt(sb, DATA_FLAGS)) {
   2767			ext4_msg(NULL, KERN_ERR, "Cannot change data mode "
   2768				 "on remount");
   2769			return -EINVAL;
   2770		}
   2771	}
   2772
   2773	if (is_remount) {
   2774		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
   2775		    (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
   2776			ext4_msg(NULL, KERN_ERR, "can't mount with "
   2777				 "both data=journal and dax");
   2778			return -EINVAL;
   2779		}
   2780
   2781		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
   2782		    (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
   2783		     (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
   2784fail_dax_change_remount:
   2785			ext4_msg(NULL, KERN_ERR, "can't change "
   2786				 "dax mount option while remounting");
   2787			return -EINVAL;
   2788		} else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER) &&
   2789			 (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
   2790			  (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) {
   2791			goto fail_dax_change_remount;
   2792		} else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE) &&
   2793			   ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
   2794			    (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
   2795			    !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) {
   2796			goto fail_dax_change_remount;
   2797		}
   2798	}
   2799
   2800	return ext4_check_quota_consistency(fc, sb);
   2801}
   2802
   2803static void ext4_apply_options(struct fs_context *fc, struct super_block *sb)
   2804{
   2805	struct ext4_fs_context *ctx = fc->fs_private;
   2806	struct ext4_sb_info *sbi = fc->s_fs_info;
   2807
   2808	sbi->s_mount_opt &= ~ctx->mask_s_mount_opt;
   2809	sbi->s_mount_opt |= ctx->vals_s_mount_opt;
   2810	sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2;
   2811	sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2;
   2812	sbi->s_mount_flags &= ~ctx->mask_s_mount_flags;
   2813	sbi->s_mount_flags |= ctx->vals_s_mount_flags;
   2814	sb->s_flags &= ~ctx->mask_s_flags;
   2815	sb->s_flags |= ctx->vals_s_flags;
   2816
   2817	/*
   2818	 * i_version differs from common mount option iversion so we have
   2819	 * to let vfs know that it was set, otherwise it would get cleared
   2820	 * on remount
   2821	 */
   2822	if (ctx->mask_s_flags & SB_I_VERSION)
   2823		fc->sb_flags |= SB_I_VERSION;
   2824
   2825#define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; })
   2826	APPLY(s_commit_interval);
   2827	APPLY(s_stripe);
   2828	APPLY(s_max_batch_time);
   2829	APPLY(s_min_batch_time);
   2830	APPLY(s_want_extra_isize);
   2831	APPLY(s_inode_readahead_blks);
   2832	APPLY(s_max_dir_size_kb);
   2833	APPLY(s_li_wait_mult);
   2834	APPLY(s_resgid);
   2835	APPLY(s_resuid);
   2836
   2837#ifdef CONFIG_EXT4_DEBUG
   2838	APPLY(s_fc_debug_max_replay);
   2839#endif
   2840
   2841	ext4_apply_quota_options(fc, sb);
   2842	ext4_apply_test_dummy_encryption(ctx, sb);
   2843}
   2844
   2845
   2846static int ext4_validate_options(struct fs_context *fc)
   2847{
   2848#ifdef CONFIG_QUOTA
   2849	struct ext4_fs_context *ctx = fc->fs_private;
   2850	char *usr_qf_name, *grp_qf_name;
   2851
   2852	usr_qf_name = ctx->s_qf_names[USRQUOTA];
   2853	grp_qf_name = ctx->s_qf_names[GRPQUOTA];
   2854
   2855	if (usr_qf_name || grp_qf_name) {
   2856		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) && usr_qf_name)
   2857			ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA);
   2858
   2859		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) && grp_qf_name)
   2860			ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA);
   2861
   2862		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) ||
   2863		    ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA)) {
   2864			ext4_msg(NULL, KERN_ERR, "old and new quota "
   2865				 "format mixing");
   2866			return -EINVAL;
   2867		}
   2868	}
   2869#endif
   2870	return 1;
   2871}
   2872
   2873static inline void ext4_show_quota_options(struct seq_file *seq,
   2874					   struct super_block *sb)
   2875{
   2876#if defined(CONFIG_QUOTA)
   2877	struct ext4_sb_info *sbi = EXT4_SB(sb);
   2878	char *usr_qf_name, *grp_qf_name;
   2879
   2880	if (sbi->s_jquota_fmt) {
   2881		char *fmtname = "";
   2882
   2883		switch (sbi->s_jquota_fmt) {
   2884		case QFMT_VFS_OLD:
   2885			fmtname = "vfsold";
   2886			break;
   2887		case QFMT_VFS_V0:
   2888			fmtname = "vfsv0";
   2889			break;
   2890		case QFMT_VFS_V1:
   2891			fmtname = "vfsv1";
   2892			break;
   2893		}
   2894		seq_printf(seq, ",jqfmt=%s", fmtname);
   2895	}
   2896
   2897	rcu_read_lock();
   2898	usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
   2899	grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
   2900	if (usr_qf_name)
   2901		seq_show_option(seq, "usrjquota", usr_qf_name);
   2902	if (grp_qf_name)
   2903		seq_show_option(seq, "grpjquota", grp_qf_name);
   2904	rcu_read_unlock();
   2905#endif
   2906}
   2907
   2908static const char *token2str(int token)
   2909{
   2910	const struct fs_parameter_spec *spec;
   2911
   2912	for (spec = ext4_param_specs; spec->name != NULL; spec++)
   2913		if (spec->opt == token && !spec->type)
   2914			break;
   2915	return spec->name;
   2916}
   2917
   2918/*
   2919 * Show an option if
   2920 *  - it's set to a non-default value OR
   2921 *  - if the per-sb default is different from the global default
   2922 */
   2923static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
   2924			      int nodefs)
   2925{
   2926	struct ext4_sb_info *sbi = EXT4_SB(sb);
   2927	struct ext4_super_block *es = sbi->s_es;
   2928	int def_errors, def_mount_opt = sbi->s_def_mount_opt;
   2929	const struct mount_opts *m;
   2930	char sep = nodefs ? '\n' : ',';
   2931
   2932#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
   2933#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
   2934
   2935	if (sbi->s_sb_block != 1)
   2936		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
   2937
   2938	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
   2939		int want_set = m->flags & MOPT_SET;
   2940		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
   2941		    m->flags & MOPT_SKIP)
   2942			continue;
   2943		if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
   2944			continue; /* skip if same as the default */
   2945		if ((want_set &&
   2946		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
   2947		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
   2948			continue; /* select Opt_noFoo vs Opt_Foo */
   2949		SEQ_OPTS_PRINT("%s", token2str(m->token));
   2950	}
   2951
   2952	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
   2953	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
   2954		SEQ_OPTS_PRINT("resuid=%u",
   2955				from_kuid_munged(&init_user_ns, sbi->s_resuid));
   2956	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
   2957	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
   2958		SEQ_OPTS_PRINT("resgid=%u",
   2959				from_kgid_munged(&init_user_ns, sbi->s_resgid));
   2960	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
   2961	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
   2962		SEQ_OPTS_PUTS("errors=remount-ro");
   2963	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
   2964		SEQ_OPTS_PUTS("errors=continue");
   2965	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
   2966		SEQ_OPTS_PUTS("errors=panic");
   2967	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
   2968		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
   2969	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
   2970		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
   2971	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
   2972		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
   2973	if (sb->s_flags & SB_I_VERSION)
   2974		SEQ_OPTS_PUTS("i_version");
   2975	if (nodefs || sbi->s_stripe)
   2976		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
   2977	if (nodefs || EXT4_MOUNT_DATA_FLAGS &
   2978			(sbi->s_mount_opt ^ def_mount_opt)) {
   2979		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
   2980			SEQ_OPTS_PUTS("data=journal");
   2981		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
   2982			SEQ_OPTS_PUTS("data=ordered");
   2983		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
   2984			SEQ_OPTS_PUTS("data=writeback");
   2985	}
   2986	if (nodefs ||
   2987	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
   2988		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
   2989			       sbi->s_inode_readahead_blks);
   2990
   2991	if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
   2992		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
   2993		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
   2994	if (nodefs || sbi->s_max_dir_size_kb)
   2995		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
   2996	if (test_opt(sb, DATA_ERR_ABORT))
   2997		SEQ_OPTS_PUTS("data_err=abort");
   2998
   2999	fscrypt_show_test_dummy_encryption(seq, sep, sb);
   3000
   3001	if (sb->s_flags & SB_INLINECRYPT)
   3002		SEQ_OPTS_PUTS("inlinecrypt");
   3003
   3004	if (test_opt(sb, DAX_ALWAYS)) {
   3005		if (IS_EXT2_SB(sb))
   3006			SEQ_OPTS_PUTS("dax");
   3007		else
   3008			SEQ_OPTS_PUTS("dax=always");
   3009	} else if (test_opt2(sb, DAX_NEVER)) {
   3010		SEQ_OPTS_PUTS("dax=never");
   3011	} else if (test_opt2(sb, DAX_INODE)) {
   3012		SEQ_OPTS_PUTS("dax=inode");
   3013	}
   3014	ext4_show_quota_options(seq, sb);
   3015	return 0;
   3016}
   3017
   3018static int ext4_show_options(struct seq_file *seq, struct dentry *root)
   3019{
   3020	return _ext4_show_options(seq, root->d_sb, 0);
   3021}
   3022
   3023int ext4_seq_options_show(struct seq_file *seq, void *offset)
   3024{
   3025	struct super_block *sb = seq->private;
   3026	int rc;
   3027
   3028	seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
   3029	rc = _ext4_show_options(seq, sb, 1);
   3030	seq_puts(seq, "\n");
   3031	return rc;
   3032}
   3033
   3034static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
   3035			    int read_only)
   3036{
   3037	struct ext4_sb_info *sbi = EXT4_SB(sb);
   3038	int err = 0;
   3039
   3040	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
   3041		ext4_msg(sb, KERN_ERR, "revision level too high, "
   3042			 "forcing read-only mode");
   3043		err = -EROFS;
   3044		goto done;
   3045	}
   3046	if (read_only)
   3047		goto done;
   3048	if (!(sbi->s_mount_state & EXT4_VALID_FS))
   3049		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
   3050			 "running e2fsck is recommended");
   3051	else if (sbi->s_mount_state & EXT4_ERROR_FS)
   3052		ext4_msg(sb, KERN_WARNING,
   3053			 "warning: mounting fs with errors, "
   3054			 "running e2fsck is recommended");
   3055	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
   3056		 le16_to_cpu(es->s_mnt_count) >=
   3057		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
   3058		ext4_msg(sb, KERN_WARNING,
   3059			 "warning: maximal mount count reached, "
   3060			 "running e2fsck is recommended");
   3061	else if (le32_to_cpu(es->s_checkinterval) &&
   3062		 (ext4_get_tstamp(es, s_lastcheck) +
   3063		  le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
   3064		ext4_msg(sb, KERN_WARNING,
   3065			 "warning: checktime reached, "
   3066			 "running e2fsck is recommended");
   3067	if (!sbi->s_journal)
   3068		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
   3069	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
   3070		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
   3071	le16_add_cpu(&es->s_mnt_count, 1);
   3072	ext4_update_tstamp(es, s_mtime);
   3073	if (sbi->s_journal) {
   3074		ext4_set_feature_journal_needs_recovery(sb);
   3075		if (ext4_has_feature_orphan_file(sb))
   3076			ext4_set_feature_orphan_present(sb);
   3077	}
   3078
   3079	err = ext4_commit_super(sb);
   3080done:
   3081	if (test_opt(sb, DEBUG))
   3082		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
   3083				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
   3084			sb->s_blocksize,
   3085			sbi->s_groups_count,
   3086			EXT4_BLOCKS_PER_GROUP(sb),
   3087			EXT4_INODES_PER_GROUP(sb),
   3088			sbi->s_mount_opt, sbi->s_mount_opt2);
   3089	return err;
   3090}
   3091
   3092int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
   3093{
   3094	struct ext4_sb_info *sbi = EXT4_SB(sb);
   3095	struct flex_groups **old_groups, **new_groups;
   3096	int size, i, j;
   3097
   3098	if (!sbi->s_log_groups_per_flex)
   3099		return 0;
   3100
   3101	size = ext4_flex_group(sbi, ngroup - 1) + 1;
   3102	if (size <= sbi->s_flex_groups_allocated)
   3103		return 0;
   3104
   3105	new_groups = kvzalloc(roundup_pow_of_two(size *
   3106			      sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
   3107	if (!new_groups) {
   3108		ext4_msg(sb, KERN_ERR,
   3109			 "not enough memory for %d flex group pointers", size);
   3110		return -ENOMEM;
   3111	}
   3112	for (i = sbi->s_flex_groups_allocated; i < size; i++) {
   3113		new_groups[i] = kvzalloc(roundup_pow_of_two(
   3114					 sizeof(struct flex_groups)),
   3115					 GFP_KERNEL);
   3116		if (!new_groups[i]) {
   3117			for (j = sbi->s_flex_groups_allocated; j < i; j++)
   3118				kvfree(new_groups[j]);
   3119			kvfree(new_groups);
   3120			ext4_msg(sb, KERN_ERR,
   3121				 "not enough memory for %d flex groups", size);
   3122			return -ENOMEM;
   3123		}
   3124	}
   3125	rcu_read_lock();
   3126	old_groups = rcu_dereference(sbi->s_flex_groups);
   3127	if (old_groups)
   3128		memcpy(new_groups, old_groups,
   3129		       (sbi->s_flex_groups_allocated *
   3130			sizeof(struct flex_groups *)));
   3131	rcu_read_unlock();
   3132	rcu_assign_pointer(sbi->s_flex_groups, new_groups);
   3133	sbi->s_flex_groups_allocated = size;
   3134	if (old_groups)
   3135		ext4_kvfree_array_rcu(old_groups);
   3136	return 0;
   3137}
   3138
   3139static int ext4_fill_flex_info(struct super_block *sb)
   3140{
   3141	struct ext4_sb_info *sbi = EXT4_SB(sb);
   3142	struct ext4_group_desc *gdp = NULL;
   3143	struct flex_groups *fg;
   3144	ext4_group_t flex_group;
   3145	int i, err;
   3146
   3147	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
   3148	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
   3149		sbi->s_log_groups_per_flex = 0;
   3150		return 1;
   3151	}
   3152
   3153	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
   3154	if (err)
   3155		goto failed;
   3156
   3157	for (i = 0; i < sbi->s_groups_count; i++) {
   3158		gdp = ext4_get_group_desc(sb, i, NULL);
   3159
   3160		flex_group = ext4_flex_group(sbi, i);
   3161		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
   3162		atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
   3163		atomic64_add(ext4_free_group_clusters(sb, gdp),
   3164			     &fg->free_clusters);
   3165		atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
   3166	}
   3167
   3168	return 1;
   3169failed:
   3170	return 0;
   3171}
   3172
   3173static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
   3174				   struct ext4_group_desc *gdp)
   3175{
   3176	int offset = offsetof(struct ext4_group_desc, bg_checksum);
   3177	__u16 crc = 0;
   3178	__le32 le_group = cpu_to_le32(block_group);
   3179	struct ext4_sb_info *sbi = EXT4_SB(sb);
   3180
   3181	if (ext4_has_metadata_csum(sbi->s_sb)) {
   3182		/* Use new metadata_csum algorithm */
   3183		__u32 csum32;
   3184		__u16 dummy_csum = 0;
   3185
   3186		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
   3187				     sizeof(le_group));
   3188		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
   3189		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
   3190				     sizeof(dummy_csum));
   3191		offset += sizeof(dummy_csum);
   3192		if (offset < sbi->s_desc_size)
   3193			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
   3194					     sbi->s_desc_size - offset);
   3195
   3196		crc = csum32 & 0xFFFF;
   3197		goto out;
   3198	}
   3199
   3200	/* old crc16 code */
   3201	if (!ext4_has_feature_gdt_csum(sb))
   3202		return 0;
   3203
   3204	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
   3205	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
   3206	crc = crc16(crc, (__u8 *)gdp, offset);
   3207	offset += sizeof(gdp->bg_checksum); /* skip checksum */
   3208	/* for checksum of struct ext4_group_desc do the rest...*/
   3209	if (ext4_has_feature_64bit(sb) &&
   3210	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
   3211		crc = crc16(crc, (__u8 *)gdp + offset,
   3212			    le16_to_cpu(sbi->s_es->s_desc_size) -
   3213				offset);
   3214
   3215out:
   3216	return cpu_to_le16(crc);
   3217}
   3218
   3219int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
   3220				struct ext4_group_desc *gdp)
   3221{
   3222	if (ext4_has_group_desc_csum(sb) &&
   3223	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
   3224		return 0;
   3225
   3226	return 1;
   3227}
   3228
   3229void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
   3230			      struct ext4_group_desc *gdp)
   3231{
   3232	if (!ext4_has_group_desc_csum(sb))
   3233		return;
   3234	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
   3235}
   3236
   3237/* Called at mount-time, super-block is locked */
   3238static int ext4_check_descriptors(struct super_block *sb,
   3239				  ext4_fsblk_t sb_block,
   3240				  ext4_group_t *first_not_zeroed)
   3241{
   3242	struct ext4_sb_info *sbi = EXT4_SB(sb);
   3243	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
   3244	ext4_fsblk_t last_block;
   3245	ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
   3246	ext4_fsblk_t block_bitmap;
   3247	ext4_fsblk_t inode_bitmap;
   3248	ext4_fsblk_t inode_table;
   3249	int flexbg_flag = 0;
   3250	ext4_group_t i, grp = sbi->s_groups_count;
   3251
   3252	if (ext4_has_feature_flex_bg(sb))
   3253		flexbg_flag = 1;
   3254
   3255	ext4_debug("Checking group descriptors");
   3256
   3257	for (i = 0; i < sbi->s_groups_count; i++) {
   3258		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
   3259
   3260		if (i == sbi->s_groups_count - 1 || flexbg_flag)
   3261			last_block = ext4_blocks_count(sbi->s_es) - 1;
   3262		else
   3263			last_block = first_block +
   3264				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
   3265
   3266		if ((grp == sbi->s_groups_count) &&
   3267		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
   3268			grp = i;
   3269
   3270		block_bitmap = ext4_block_bitmap(sb, gdp);
   3271		if (block_bitmap == sb_block) {
   3272			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
   3273				 "Block bitmap for group %u overlaps "
   3274				 "superblock", i);
   3275			if (!sb_rdonly(sb))
   3276				return 0;
   3277		}
   3278		if (block_bitmap >= sb_block + 1 &&
   3279		    block_bitmap <= last_bg_block) {
   3280			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
   3281				 "Block bitmap for group %u overlaps "
   3282				 "block group descriptors", i);
   3283			if (!sb_rdonly(sb))
   3284				return 0;
   3285		}
   3286		if (block_bitmap < first_block || block_bitmap > last_block) {
   3287			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
   3288			       "Block bitmap for group %u not in group "
   3289			       "(block %llu)!", i, block_bitmap);
   3290			return 0;
   3291		}
   3292		inode_bitmap = ext4_inode_bitmap(sb, gdp);
   3293		if (inode_bitmap == sb_block) {
   3294			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
   3295				 "Inode bitmap for group %u overlaps "
   3296				 "superblock", i);
   3297			if (!sb_rdonly(sb))
   3298				return 0;
   3299		}
   3300		if (inode_bitmap >= sb_block + 1 &&
   3301		    inode_bitmap <= last_bg_block) {
   3302			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
   3303				 "Inode bitmap for group %u overlaps "
   3304				 "block group descriptors", i);
   3305			if (!sb_rdonly(sb))
   3306				return 0;
   3307		}
   3308		if (inode_bitmap < first_block || inode_bitmap > last_block) {
   3309			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
   3310			       "Inode bitmap for group %u not in group "
   3311			       "(block %llu)!", i, inode_bitmap);
   3312			return 0;
   3313		}
   3314		inode_table = ext4_inode_table(sb, gdp);
   3315		if (inode_table == sb_block) {
   3316			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
   3317				 "Inode table for group %u overlaps "
   3318				 "superblock", i);
   3319			if (!sb_rdonly(sb))
   3320				return 0;
   3321		}
   3322		if (inode_table >= sb_block + 1 &&
   3323		    inode_table <= last_bg_block) {
   3324			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
   3325				 "Inode table for group %u overlaps "
   3326				 "block group descriptors", i);
   3327			if (!sb_rdonly(sb))
   3328				return 0;
   3329		}
   3330		if (inode_table < first_block ||
   3331		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
   3332			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
   3333			       "Inode table for group %u not in group "
   3334			       "(block %llu)!", i, inode_table);
   3335			return 0;
   3336		}
   3337		ext4_lock_group(sb, i);
   3338		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
   3339			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
   3340				 "Checksum for group %u failed (%u!=%u)",
   3341				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
   3342				     gdp)), le16_to_cpu(gdp->bg_checksum));
   3343			if (!sb_rdonly(sb)) {
   3344				ext4_unlock_group(sb, i);
   3345				return 0;
   3346			}
   3347		}
   3348		ext4_unlock_group(sb, i);
   3349		if (!flexbg_flag)
   3350			first_block += EXT4_BLOCKS_PER_GROUP(sb);
   3351	}
   3352	if (NULL != first_not_zeroed)
   3353		*first_not_zeroed = grp;
   3354	return 1;
   3355}
   3356
   3357/*
   3358 * Maximal extent format file size.
   3359 * Resulting logical blkno at s_maxbytes must fit in our on-disk
   3360 * extent format containers, within a sector_t, and within i_blocks
   3361 * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
   3362 * so that won't be a limiting factor.
   3363 *
   3364 * However there is other limiting factor. We do store extents in the form
   3365 * of starting block and length, hence the resulting length of the extent
   3366 * covering maximum file size must fit into on-disk format containers as
   3367 * well. Given that length is always by 1 unit bigger than max unit (because
   3368 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
   3369 *
   3370 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
   3371 */
   3372static loff_t ext4_max_size(int blkbits, int has_huge_files)
   3373{
   3374	loff_t res;
   3375	loff_t upper_limit = MAX_LFS_FILESIZE;
   3376
   3377	BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64));
   3378
   3379	if (!has_huge_files) {
   3380		upper_limit = (1LL << 32) - 1;
   3381
   3382		/* total blocks in file system block size */
   3383		upper_limit >>= (blkbits - 9);
   3384		upper_limit <<= blkbits;
   3385	}
   3386
   3387	/*
   3388	 * 32-bit extent-start container, ee_block. We lower the maxbytes
   3389	 * by one fs block, so ee_len can cover the extent of maximum file
   3390	 * size
   3391	 */
   3392	res = (1LL << 32) - 1;
   3393	res <<= blkbits;
   3394
   3395	/* Sanity check against vm- & vfs- imposed limits */
   3396	if (res > upper_limit)
   3397		res = upper_limit;
   3398
   3399	return res;
   3400}
   3401
   3402/*
   3403 * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
   3404 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
   3405 * We need to be 1 filesystem block less than the 2^48 sector limit.
   3406 */
   3407static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
   3408{
   3409	loff_t upper_limit, res = EXT4_NDIR_BLOCKS;
   3410	int meta_blocks;
   3411	unsigned int ppb = 1 << (bits - 2);
   3412
   3413	/*
   3414	 * This is calculated to be the largest file size for a dense, block
   3415	 * mapped file such that the file's total number of 512-byte sectors,
   3416	 * including data and all indirect blocks, does not exceed (2^48 - 1).
   3417	 *
   3418	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
   3419	 * number of 512-byte sectors of the file.
   3420	 */
   3421	if (!has_huge_files) {
   3422		/*
   3423		 * !has_huge_files or implies that the inode i_block field
   3424		 * represents total file blocks in 2^32 512-byte sectors ==
   3425		 * size of vfs inode i_blocks * 8
   3426		 */
   3427		upper_limit = (1LL << 32) - 1;
   3428
   3429		/* total blocks in file system block size */
   3430		upper_limit >>= (bits - 9);
   3431
   3432	} else {
   3433		/*
   3434		 * We use 48 bit ext4_inode i_blocks
   3435		 * With EXT4_HUGE_FILE_FL set the i_blocks
   3436		 * represent total number of blocks in
   3437		 * file system block size
   3438		 */
   3439		upper_limit = (1LL << 48) - 1;
   3440
   3441	}
   3442
   3443	/* Compute how many blocks we can address by block tree */
   3444	res += ppb;
   3445	res += ppb * ppb;
   3446	res += ((loff_t)ppb) * ppb * ppb;
   3447	/* Compute how many metadata blocks are needed */
   3448	meta_blocks = 1;
   3449	meta_blocks += 1 + ppb;
   3450	meta_blocks += 1 + ppb + ppb * ppb;
   3451	/* Does block tree limit file size? */
   3452	if (res + meta_blocks <= upper_limit)
   3453		goto check_lfs;
   3454
   3455	res = upper_limit;
   3456	/* How many metadata blocks are needed for addressing upper_limit? */
   3457	upper_limit -= EXT4_NDIR_BLOCKS;
   3458	/* indirect blocks */
   3459	meta_blocks = 1;
   3460	upper_limit -= ppb;
   3461	/* double indirect blocks */
   3462	if (upper_limit < ppb * ppb) {
   3463		meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb);
   3464		res -= meta_blocks;
   3465		goto check_lfs;
   3466	}
   3467	meta_blocks += 1 + ppb;
   3468	upper_limit -= ppb * ppb;
   3469	/* tripple indirect blocks for the rest */
   3470	meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb) +
   3471		DIV_ROUND_UP_ULL(upper_limit, ppb*ppb);
   3472	res -= meta_blocks;
   3473check_lfs:
   3474	res <<= bits;
   3475	if (res > MAX_LFS_FILESIZE)
   3476		res = MAX_LFS_FILESIZE;
   3477
   3478	return res;
   3479}
   3480
   3481static ext4_fsblk_t descriptor_loc(struct super_block *sb,
   3482				   ext4_fsblk_t logical_sb_block, int nr)
   3483{
   3484	struct ext4_sb_info *sbi = EXT4_SB(sb);
   3485	ext4_group_t bg, first_meta_bg;
   3486	int has_super = 0;
   3487
   3488	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
   3489
   3490	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
   3491		return logical_sb_block + nr + 1;
   3492	bg = sbi->s_desc_per_block * nr;
   3493	if (ext4_bg_has_super(sb, bg))
   3494		has_super = 1;
   3495
   3496	/*
   3497	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
   3498	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
   3499	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
   3500	 * compensate.
   3501	 */
   3502	if (sb->s_blocksize == 1024 && nr == 0 &&
   3503	    le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
   3504		has_super++;
   3505
   3506	return (has_super + ext4_group_first_block_no(sb, bg));
   3507}
   3508
   3509/**
   3510 * ext4_get_stripe_size: Get the stripe size.
   3511 * @sbi: In memory super block info
   3512 *
   3513 * If we have specified it via mount option, then
   3514 * use the mount option value. If the value specified at mount time is
   3515 * greater than the blocks per group use the super block value.
   3516 * If the super block value is greater than blocks per group return 0.
   3517 * Allocator needs it be less than blocks per group.
   3518 *
   3519 */
   3520static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
   3521{
   3522	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
   3523	unsigned long stripe_width =
   3524			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
   3525	int ret;
   3526
   3527	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
   3528		ret = sbi->s_stripe;
   3529	else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
   3530		ret = stripe_width;
   3531	else if (stride && stride <= sbi->s_blocks_per_group)
   3532		ret = stride;
   3533	else
   3534		ret = 0;
   3535
   3536	/*
   3537	 * If the stripe width is 1, this makes no sense and
   3538	 * we set it to 0 to turn off stripe handling code.
   3539	 */
   3540	if (ret <= 1)
   3541		ret = 0;
   3542
   3543	return ret;
   3544}
   3545
   3546/*
   3547 * Check whether this filesystem can be mounted based on
   3548 * the features present and the RDONLY/RDWR mount requested.
   3549 * Returns 1 if this filesystem can be mounted as requested,
   3550 * 0 if it cannot be.
   3551 */
   3552int ext4_feature_set_ok(struct super_block *sb, int readonly)
   3553{
   3554	if (ext4_has_unknown_ext4_incompat_features(sb)) {
   3555		ext4_msg(sb, KERN_ERR,
   3556			"Couldn't mount because of "
   3557			"unsupported optional features (%x)",
   3558			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
   3559			~EXT4_FEATURE_INCOMPAT_SUPP));
   3560		return 0;
   3561	}
   3562
   3563#if !IS_ENABLED(CONFIG_UNICODE)
   3564	if (ext4_has_feature_casefold(sb)) {
   3565		ext4_msg(sb, KERN_ERR,
   3566			 "Filesystem with casefold feature cannot be "
   3567			 "mounted without CONFIG_UNICODE");
   3568		return 0;
   3569	}
   3570#endif
   3571
   3572	if (readonly)
   3573		return 1;
   3574
   3575	if (ext4_has_feature_readonly(sb)) {
   3576		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
   3577		sb->s_flags |= SB_RDONLY;
   3578		return 1;
   3579	}
   3580
   3581	/* Check that feature set is OK for a read-write mount */
   3582	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
   3583		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
   3584			 "unsupported optional features (%x)",
   3585			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
   3586				~EXT4_FEATURE_RO_COMPAT_SUPP));
   3587		return 0;
   3588	}
   3589	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
   3590		ext4_msg(sb, KERN_ERR,
   3591			 "Can't support bigalloc feature without "
   3592			 "extents feature\n");
   3593		return 0;
   3594	}
   3595
   3596#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
   3597	if (!readonly && (ext4_has_feature_quota(sb) ||
   3598			  ext4_has_feature_project(sb))) {
   3599		ext4_msg(sb, KERN_ERR,
   3600			 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
   3601		return 0;
   3602	}
   3603#endif  /* CONFIG_QUOTA */
   3604	return 1;
   3605}
   3606
   3607/*
   3608 * This function is called once a day if we have errors logged
   3609 * on the file system
   3610 */
   3611static void print_daily_error_info(struct timer_list *t)
   3612{
   3613	struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
   3614	struct super_block *sb = sbi->s_sb;
   3615	struct ext4_super_block *es = sbi->s_es;
   3616
   3617	if (es->s_error_count)
   3618		/* fsck newer than v1.41.13 is needed to clean this condition. */
   3619		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
   3620			 le32_to_cpu(es->s_error_count));
   3621	if (es->s_first_error_time) {
   3622		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
   3623		       sb->s_id,
   3624		       ext4_get_tstamp(es, s_first_error_time),
   3625		       (int) sizeof(es->s_first_error_func),
   3626		       es->s_first_error_func,
   3627		       le32_to_cpu(es->s_first_error_line));
   3628		if (es->s_first_error_ino)
   3629			printk(KERN_CONT ": inode %u",
   3630			       le32_to_cpu(es->s_first_error_ino));
   3631		if (es->s_first_error_block)
   3632			printk(KERN_CONT ": block %llu", (unsigned long long)
   3633			       le64_to_cpu(es->s_first_error_block));
   3634		printk(KERN_CONT "\n");
   3635	}
   3636	if (es->s_last_error_time) {
   3637		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
   3638		       sb->s_id,
   3639		       ext4_get_tstamp(es, s_last_error_time),
   3640		       (int) sizeof(es->s_last_error_func),
   3641		       es->s_last_error_func,
   3642		       le32_to_cpu(es->s_last_error_line));
   3643		if (es->s_last_error_ino)
   3644			printk(KERN_CONT ": inode %u",
   3645			       le32_to_cpu(es->s_last_error_ino));
   3646		if (es->s_last_error_block)
   3647			printk(KERN_CONT ": block %llu", (unsigned long long)
   3648			       le64_to_cpu(es->s_last_error_block));
   3649		printk(KERN_CONT "\n");
   3650	}
   3651	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
   3652}
   3653
   3654/* Find next suitable group and run ext4_init_inode_table */
   3655static int ext4_run_li_request(struct ext4_li_request *elr)
   3656{
   3657	struct ext4_group_desc *gdp = NULL;
   3658	struct super_block *sb = elr->lr_super;
   3659	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
   3660	ext4_group_t group = elr->lr_next_group;
   3661	unsigned int prefetch_ios = 0;
   3662	int ret = 0;
   3663	u64 start_time;
   3664
   3665	if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
   3666		elr->lr_next_group = ext4_mb_prefetch(sb, group,
   3667				EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios);
   3668		if (prefetch_ios)
   3669			ext4_mb_prefetch_fini(sb, elr->lr_next_group,
   3670					      prefetch_ios);
   3671		trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group,
   3672					    prefetch_ios);
   3673		if (group >= elr->lr_next_group) {
   3674			ret = 1;
   3675			if (elr->lr_first_not_zeroed != ngroups &&
   3676			    !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) {
   3677				elr->lr_next_group = elr->lr_first_not_zeroed;
   3678				elr->lr_mode = EXT4_LI_MODE_ITABLE;
   3679				ret = 0;
   3680			}
   3681		}
   3682		return ret;
   3683	}
   3684
   3685	for (; group < ngroups; group++) {
   3686		gdp = ext4_get_group_desc(sb, group, NULL);
   3687		if (!gdp) {
   3688			ret = 1;
   3689			break;
   3690		}
   3691
   3692		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
   3693			break;
   3694	}
   3695
   3696	if (group >= ngroups)
   3697		ret = 1;
   3698
   3699	if (!ret) {
   3700		start_time = ktime_get_real_ns();
   3701		ret = ext4_init_inode_table(sb, group,
   3702					    elr->lr_timeout ? 0 : 1);
   3703		trace_ext4_lazy_itable_init(sb, group);
   3704		if (elr->lr_timeout == 0) {
   3705			elr->lr_timeout = nsecs_to_jiffies((ktime_get_real_ns() - start_time) *
   3706				EXT4_SB(elr->lr_super)->s_li_wait_mult);
   3707		}
   3708		elr->lr_next_sched = jiffies + elr->lr_timeout;
   3709		elr->lr_next_group = group + 1;
   3710	}
   3711	return ret;
   3712}
   3713
   3714/*
   3715 * Remove lr_request from the list_request and free the
   3716 * request structure. Should be called with li_list_mtx held
   3717 */
   3718static void ext4_remove_li_request(struct ext4_li_request *elr)
   3719{
   3720	if (!elr)
   3721		return;
   3722
   3723	list_del(&elr->lr_request);
   3724	EXT4_SB(elr->lr_super)->s_li_request = NULL;
   3725	kfree(elr);
   3726}
   3727
   3728static void ext4_unregister_li_request(struct super_block *sb)
   3729{
   3730	mutex_lock(&ext4_li_mtx);
   3731	if (!ext4_li_info) {
   3732		mutex_unlock(&ext4_li_mtx);
   3733		return;
   3734	}
   3735
   3736	mutex_lock(&ext4_li_info->li_list_mtx);
   3737	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
   3738	mutex_unlock(&ext4_li_info->li_list_mtx);
   3739	mutex_unlock(&ext4_li_mtx);
   3740}
   3741
   3742static struct task_struct *ext4_lazyinit_task;
   3743
   3744/*
   3745 * This is the function where ext4lazyinit thread lives. It walks
   3746 * through the request list searching for next scheduled filesystem.
   3747 * When such a fs is found, run the lazy initialization request
   3748 * (ext4_rn_li_request) and keep track of the time spend in this
   3749 * function. Based on that time we compute next schedule time of
   3750 * the request. When walking through the list is complete, compute
   3751 * next waking time and put itself into sleep.
   3752 */
   3753static int ext4_lazyinit_thread(void *arg)
   3754{
   3755	struct ext4_lazy_init *eli = arg;
   3756	struct list_head *pos, *n;
   3757	struct ext4_li_request *elr;
   3758	unsigned long next_wakeup, cur;
   3759
   3760	BUG_ON(NULL == eli);
   3761
   3762cont_thread:
   3763	while (true) {
   3764		next_wakeup = MAX_JIFFY_OFFSET;
   3765
   3766		mutex_lock(&eli->li_list_mtx);
   3767		if (list_empty(&eli->li_request_list)) {
   3768			mutex_unlock(&eli->li_list_mtx);
   3769			goto exit_thread;
   3770		}
   3771		list_for_each_safe(pos, n, &eli->li_request_list) {
   3772			int err = 0;
   3773			int progress = 0;
   3774			elr = list_entry(pos, struct ext4_li_request,
   3775					 lr_request);
   3776
   3777			if (time_before(jiffies, elr->lr_next_sched)) {
   3778				if (time_before(elr->lr_next_sched, next_wakeup))
   3779					next_wakeup = elr->lr_next_sched;
   3780				continue;
   3781			}
   3782			if (down_read_trylock(&elr->lr_super->s_umount)) {
   3783				if (sb_start_write_trylock(elr->lr_super)) {
   3784					progress = 1;
   3785					/*
   3786					 * We hold sb->s_umount, sb can not
   3787					 * be removed from the list, it is
   3788					 * now safe to drop li_list_mtx
   3789					 */
   3790					mutex_unlock(&eli->li_list_mtx);
   3791					err = ext4_run_li_request(elr);
   3792					sb_end_write(elr->lr_super);
   3793					mutex_lock(&eli->li_list_mtx);
   3794					n = pos->next;
   3795				}
   3796				up_read((&elr->lr_super->s_umount));
   3797			}
   3798			/* error, remove the lazy_init job */
   3799			if (err) {
   3800				ext4_remove_li_request(elr);
   3801				continue;
   3802			}
   3803			if (!progress) {
   3804				elr->lr_next_sched = jiffies +
   3805					(prandom_u32()
   3806					 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
   3807			}
   3808			if (time_before(elr->lr_next_sched, next_wakeup))
   3809				next_wakeup = elr->lr_next_sched;
   3810		}
   3811		mutex_unlock(&eli->li_list_mtx);
   3812
   3813		try_to_freeze();
   3814
   3815		cur = jiffies;
   3816		if ((time_after_eq(cur, next_wakeup)) ||
   3817		    (MAX_JIFFY_OFFSET == next_wakeup)) {
   3818			cond_resched();
   3819			continue;
   3820		}
   3821
   3822		schedule_timeout_interruptible(next_wakeup - cur);
   3823
   3824		if (kthread_should_stop()) {
   3825			ext4_clear_request_list();
   3826			goto exit_thread;
   3827		}
   3828	}
   3829
   3830exit_thread:
   3831	/*
   3832	 * It looks like the request list is empty, but we need
   3833	 * to check it under the li_list_mtx lock, to prevent any
   3834	 * additions into it, and of course we should lock ext4_li_mtx
   3835	 * to atomically free the list and ext4_li_info, because at
   3836	 * this point another ext4 filesystem could be registering
   3837	 * new one.
   3838	 */
   3839	mutex_lock(&ext4_li_mtx);
   3840	mutex_lock(&eli->li_list_mtx);
   3841	if (!list_empty(&eli->li_request_list)) {
   3842		mutex_unlock(&eli->li_list_mtx);
   3843		mutex_unlock(&ext4_li_mtx);
   3844		goto cont_thread;
   3845	}
   3846	mutex_unlock(&eli->li_list_mtx);
   3847	kfree(ext4_li_info);
   3848	ext4_li_info = NULL;
   3849	mutex_unlock(&ext4_li_mtx);
   3850
   3851	return 0;
   3852}
   3853
   3854static void ext4_clear_request_list(void)
   3855{
   3856	struct list_head *pos, *n;
   3857	struct ext4_li_request *elr;
   3858
   3859	mutex_lock(&ext4_li_info->li_list_mtx);
   3860	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
   3861		elr = list_entry(pos, struct ext4_li_request,
   3862				 lr_request);
   3863		ext4_remove_li_request(elr);
   3864	}
   3865	mutex_unlock(&ext4_li_info->li_list_mtx);
   3866}
   3867
   3868static int ext4_run_lazyinit_thread(void)
   3869{
   3870	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
   3871					 ext4_li_info, "ext4lazyinit");
   3872	if (IS_ERR(ext4_lazyinit_task)) {
   3873		int err = PTR_ERR(ext4_lazyinit_task);
   3874		ext4_clear_request_list();
   3875		kfree(ext4_li_info);
   3876		ext4_li_info = NULL;
   3877		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
   3878				 "initialization thread\n",
   3879				 err);
   3880		return err;
   3881	}
   3882	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
   3883	return 0;
   3884}
   3885
   3886/*
   3887 * Check whether it make sense to run itable init. thread or not.
   3888 * If there is at least one uninitialized inode table, return
   3889 * corresponding group number, else the loop goes through all
   3890 * groups and return total number of groups.
   3891 */
   3892static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
   3893{
   3894	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
   3895	struct ext4_group_desc *gdp = NULL;
   3896
   3897	if (!ext4_has_group_desc_csum(sb))
   3898		return ngroups;
   3899
   3900	for (group = 0; group < ngroups; group++) {
   3901		gdp = ext4_get_group_desc(sb, group, NULL);
   3902		if (!gdp)
   3903			continue;
   3904
   3905		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
   3906			break;
   3907	}
   3908
   3909	return group;
   3910}
   3911
   3912static int ext4_li_info_new(void)
   3913{
   3914	struct ext4_lazy_init *eli = NULL;
   3915
   3916	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
   3917	if (!eli)
   3918		return -ENOMEM;
   3919
   3920	INIT_LIST_HEAD(&eli->li_request_list);
   3921	mutex_init(&eli->li_list_mtx);
   3922
   3923	eli->li_state |= EXT4_LAZYINIT_QUIT;
   3924
   3925	ext4_li_info = eli;
   3926
   3927	return 0;
   3928}
   3929
   3930static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
   3931					    ext4_group_t start)
   3932{
   3933	struct ext4_li_request *elr;
   3934
   3935	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
   3936	if (!elr)
   3937		return NULL;
   3938
   3939	elr->lr_super = sb;
   3940	elr->lr_first_not_zeroed = start;
   3941	if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) {
   3942		elr->lr_mode = EXT4_LI_MODE_ITABLE;
   3943		elr->lr_next_group = start;
   3944	} else {
   3945		elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP;
   3946	}
   3947
   3948	/*
   3949	 * Randomize first schedule time of the request to
   3950	 * spread the inode table initialization requests
   3951	 * better.
   3952	 */
   3953	elr->lr_next_sched = jiffies + (prandom_u32() %
   3954				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
   3955	return elr;
   3956}
   3957
   3958int ext4_register_li_request(struct super_block *sb,
   3959			     ext4_group_t first_not_zeroed)
   3960{
   3961	struct ext4_sb_info *sbi = EXT4_SB(sb);
   3962	struct ext4_li_request *elr = NULL;
   3963	ext4_group_t ngroups = sbi->s_groups_count;
   3964	int ret = 0;
   3965
   3966	mutex_lock(&ext4_li_mtx);
   3967	if (sbi->s_li_request != NULL) {
   3968		/*
   3969		 * Reset timeout so it can be computed again, because
   3970		 * s_li_wait_mult might have changed.
   3971		 */
   3972		sbi->s_li_request->lr_timeout = 0;
   3973		goto out;
   3974	}
   3975
   3976	if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) &&
   3977	    (first_not_zeroed == ngroups || sb_rdonly(sb) ||
   3978	     !test_opt(sb, INIT_INODE_TABLE)))
   3979		goto out;
   3980
   3981	elr = ext4_li_request_new(sb, first_not_zeroed);
   3982	if (!elr) {
   3983		ret = -ENOMEM;
   3984		goto out;
   3985	}
   3986
   3987	if (NULL == ext4_li_info) {
   3988		ret = ext4_li_info_new();
   3989		if (ret)
   3990			goto out;
   3991	}
   3992
   3993	mutex_lock(&ext4_li_info->li_list_mtx);
   3994	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
   3995	mutex_unlock(&ext4_li_info->li_list_mtx);
   3996
   3997	sbi->s_li_request = elr;
   3998	/*
   3999	 * set elr to NULL here since it has been inserted to
   4000	 * the request_list and the removal and free of it is
   4001	 * handled by ext4_clear_request_list from now on.
   4002	 */
   4003	elr = NULL;
   4004
   4005	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
   4006		ret = ext4_run_lazyinit_thread();
   4007		if (ret)
   4008			goto out;
   4009	}
   4010out:
   4011	mutex_unlock(&ext4_li_mtx);
   4012	if (ret)
   4013		kfree(elr);
   4014	return ret;
   4015}
   4016
   4017/*
   4018 * We do not need to lock anything since this is called on
   4019 * module unload.
   4020 */
   4021static void ext4_destroy_lazyinit_thread(void)
   4022{
   4023	/*
   4024	 * If thread exited earlier
   4025	 * there's nothing to be done.
   4026	 */
   4027	if (!ext4_li_info || !ext4_lazyinit_task)
   4028		return;
   4029
   4030	kthread_stop(ext4_lazyinit_task);
   4031}
   4032
   4033static int set_journal_csum_feature_set(struct super_block *sb)
   4034{
   4035	int ret = 1;
   4036	int compat, incompat;
   4037	struct ext4_sb_info *sbi = EXT4_SB(sb);
   4038
   4039	if (ext4_has_metadata_csum(sb)) {
   4040		/* journal checksum v3 */
   4041		compat = 0;
   4042		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
   4043	} else {
   4044		/* journal checksum v1 */
   4045		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
   4046		incompat = 0;
   4047	}
   4048
   4049	jbd2_journal_clear_features(sbi->s_journal,
   4050			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
   4051			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
   4052			JBD2_FEATURE_INCOMPAT_CSUM_V2);
   4053	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
   4054		ret = jbd2_journal_set_features(sbi->s_journal,
   4055				compat, 0,
   4056				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
   4057				incompat);
   4058	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
   4059		ret = jbd2_journal_set_features(sbi->s_journal,
   4060				compat, 0,
   4061				incompat);
   4062		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
   4063				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
   4064	} else {
   4065		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
   4066				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
   4067	}
   4068
   4069	return ret;
   4070}
   4071
   4072/*
   4073 * Note: calculating the overhead so we can be compatible with
   4074 * historical BSD practice is quite difficult in the face of
   4075 * clusters/bigalloc.  This is because multiple metadata blocks from
   4076 * different block group can end up in the same allocation cluster.
   4077 * Calculating the exact overhead in the face of clustered allocation
   4078 * requires either O(all block bitmaps) in memory or O(number of block
   4079 * groups**2) in time.  We will still calculate the superblock for
   4080 * older file systems --- and if we come across with a bigalloc file
   4081 * system with zero in s_overhead_clusters the estimate will be close to
   4082 * correct especially for very large cluster sizes --- but for newer
   4083 * file systems, it's better to calculate this figure once at mkfs
   4084 * time, and store it in the superblock.  If the superblock value is
   4085 * present (even for non-bigalloc file systems), we will use it.
   4086 */
   4087static int count_overhead(struct super_block *sb, ext4_group_t grp,
   4088			  char *buf)
   4089{
   4090	struct ext4_sb_info	*sbi = EXT4_SB(sb);
   4091	struct ext4_group_desc	*gdp;
   4092	ext4_fsblk_t		first_block, last_block, b;
   4093	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
   4094	int			s, j, count = 0;
   4095	int			has_super = ext4_bg_has_super(sb, grp);
   4096
   4097	if (!ext4_has_feature_bigalloc(sb))
   4098		return (has_super + ext4_bg_num_gdb(sb, grp) +
   4099			(has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
   4100			sbi->s_itb_per_group + 2);
   4101
   4102	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
   4103		(grp * EXT4_BLOCKS_PER_GROUP(sb));
   4104	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
   4105	for (i = 0; i < ngroups; i++) {
   4106		gdp = ext4_get_group_desc(sb, i, NULL);
   4107		b = ext4_block_bitmap(sb, gdp);
   4108		if (b >= first_block && b <= last_block) {
   4109			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
   4110			count++;
   4111		}
   4112		b = ext4_inode_bitmap(sb, gdp);
   4113		if (b >= first_block && b <= last_block) {
   4114			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
   4115			count++;
   4116		}
   4117		b = ext4_inode_table(sb, gdp);
   4118		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
   4119			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
   4120				int c = EXT4_B2C(sbi, b - first_block);
   4121				ext4_set_bit(c, buf);
   4122				count++;
   4123			}
   4124		if (i != grp)
   4125			continue;
   4126		s = 0;
   4127		if (ext4_bg_has_super(sb, grp)) {
   4128			ext4_set_bit(s++, buf);
   4129			count++;
   4130		}
   4131		j = ext4_bg_num_gdb(sb, grp);
   4132		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
   4133			ext4_error(sb, "Invalid number of block group "
   4134				   "descriptor blocks: %d", j);
   4135			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
   4136		}
   4137		count += j;
   4138		for (; j > 0; j--)
   4139			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
   4140	}
   4141	if (!count)
   4142		return 0;
   4143	return EXT4_CLUSTERS_PER_GROUP(sb) -
   4144		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
   4145}
   4146
   4147/*
   4148 * Compute the overhead and stash it in sbi->s_overhead
   4149 */
   4150int ext4_calculate_overhead(struct super_block *sb)
   4151{
   4152	struct ext4_sb_info *sbi = EXT4_SB(sb);
   4153	struct ext4_super_block *es = sbi->s_es;
   4154	struct inode *j_inode;
   4155	unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
   4156	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
   4157	ext4_fsblk_t overhead = 0;
   4158	char *buf = (char *) get_zeroed_page(GFP_NOFS);
   4159
   4160	if (!buf)
   4161		return -ENOMEM;
   4162
   4163	/*
   4164	 * Compute the overhead (FS structures).  This is constant
   4165	 * for a given filesystem unless the number of block groups
   4166	 * changes so we cache the previous value until it does.
   4167	 */
   4168
   4169	/*
   4170	 * All of the blocks before first_data_block are overhead
   4171	 */
   4172	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
   4173
   4174	/*
   4175	 * Add the overhead found in each block group
   4176	 */
   4177	for (i = 0; i < ngroups; i++) {
   4178		int blks;
   4179
   4180		blks = count_overhead(sb, i, buf);
   4181		overhead += blks;
   4182		if (blks)
   4183			memset(buf, 0, PAGE_SIZE);
   4184		cond_resched();
   4185	}
   4186
   4187	/*
   4188	 * Add the internal journal blocks whether the journal has been
   4189	 * loaded or not
   4190	 */
   4191	if (sbi->s_journal && !sbi->s_journal_bdev)
   4192		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
   4193	else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
   4194		/* j_inum for internal journal is non-zero */
   4195		j_inode = ext4_get_journal_inode(sb, j_inum);
   4196		if (j_inode) {
   4197			j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
   4198			overhead += EXT4_NUM_B2C(sbi, j_blocks);
   4199			iput(j_inode);
   4200		} else {
   4201			ext4_msg(sb, KERN_ERR, "can't get journal size");
   4202		}
   4203	}
   4204	sbi->s_overhead = overhead;
   4205	smp_wmb();
   4206	free_page((unsigned long) buf);
   4207	return 0;
   4208}
   4209
   4210static void ext4_set_resv_clusters(struct super_block *sb)
   4211{
   4212	ext4_fsblk_t resv_clusters;
   4213	struct ext4_sb_info *sbi = EXT4_SB(sb);
   4214
   4215	/*
   4216	 * There's no need to reserve anything when we aren't using extents.
   4217	 * The space estimates are exact, there are no unwritten extents,
   4218	 * hole punching doesn't need new metadata... This is needed especially
   4219	 * to keep ext2/3 backward compatibility.
   4220	 */
   4221	if (!ext4_has_feature_extents(sb))
   4222		return;
   4223	/*
   4224	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
   4225	 * This should cover the situations where we can not afford to run
   4226	 * out of space like for example punch hole, or converting
   4227	 * unwritten extents in delalloc path. In most cases such
   4228	 * allocation would require 1, or 2 blocks, higher numbers are
   4229	 * very rare.
   4230	 */
   4231	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
   4232			 sbi->s_cluster_bits);
   4233
   4234	do_div(resv_clusters, 50);
   4235	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
   4236
   4237	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
   4238}
   4239
   4240static const char *ext4_quota_mode(struct super_block *sb)
   4241{
   4242#ifdef CONFIG_QUOTA
   4243	if (!ext4_quota_capable(sb))
   4244		return "none";
   4245
   4246	if (EXT4_SB(sb)->s_journal && ext4_is_quota_journalled(sb))
   4247		return "journalled";
   4248	else
   4249		return "writeback";
   4250#else
   4251	return "disabled";
   4252#endif
   4253}
   4254
   4255static void ext4_setup_csum_trigger(struct super_block *sb,
   4256				    enum ext4_journal_trigger_type type,
   4257				    void (*trigger)(
   4258					struct jbd2_buffer_trigger_type *type,
   4259					struct buffer_head *bh,
   4260					void *mapped_data,
   4261					size_t size))
   4262{
   4263	struct ext4_sb_info *sbi = EXT4_SB(sb);
   4264
   4265	sbi->s_journal_triggers[type].sb = sb;
   4266	sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger;
   4267}
   4268
   4269static void ext4_free_sbi(struct ext4_sb_info *sbi)
   4270{
   4271	if (!sbi)
   4272		return;
   4273
   4274	kfree(sbi->s_blockgroup_lock);
   4275	fs_put_dax(sbi->s_daxdev);
   4276	kfree(sbi);
   4277}
   4278
   4279static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb)
   4280{
   4281	struct ext4_sb_info *sbi;
   4282
   4283	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
   4284	if (!sbi)
   4285		return NULL;
   4286
   4287	sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off);
   4288
   4289	sbi->s_blockgroup_lock =
   4290		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
   4291
   4292	if (!sbi->s_blockgroup_lock)
   4293		goto err_out;
   4294
   4295	sb->s_fs_info = sbi;
   4296	sbi->s_sb = sb;
   4297	return sbi;
   4298err_out:
   4299	fs_put_dax(sbi->s_daxdev);
   4300	kfree(sbi);
   4301	return NULL;
   4302}
   4303
   4304static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
   4305{
   4306	struct buffer_head *bh, **group_desc;
   4307	struct ext4_super_block *es = NULL;
   4308	struct ext4_sb_info *sbi = EXT4_SB(sb);
   4309	struct flex_groups **flex_groups;
   4310	ext4_fsblk_t block;
   4311	ext4_fsblk_t logical_sb_block;
   4312	unsigned long offset = 0;
   4313	unsigned long def_mount_opts;
   4314	struct inode *root;
   4315	int ret = -ENOMEM;
   4316	int blocksize, clustersize;
   4317	unsigned int db_count;
   4318	unsigned int i;
   4319	int needs_recovery, has_huge_files;
   4320	__u64 blocks_count;
   4321	int err = 0;
   4322	ext4_group_t first_not_zeroed;
   4323	struct ext4_fs_context *ctx = fc->fs_private;
   4324	int silent = fc->sb_flags & SB_SILENT;
   4325
   4326	/* Set defaults for the variables that will be set during parsing */
   4327	if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO))
   4328		ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
   4329
   4330	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
   4331	sbi->s_sectors_written_start =
   4332		part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
   4333
   4334	/* -EINVAL is default */
   4335	ret = -EINVAL;
   4336	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
   4337	if (!blocksize) {
   4338		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
   4339		goto out_fail;
   4340	}
   4341
   4342	/*
   4343	 * The ext4 superblock will not be buffer aligned for other than 1kB
   4344	 * block sizes.  We need to calculate the offset from buffer start.
   4345	 */
   4346	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
   4347		logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE;
   4348		offset = do_div(logical_sb_block, blocksize);
   4349	} else {
   4350		logical_sb_block = sbi->s_sb_block;
   4351	}
   4352
   4353	bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
   4354	if (IS_ERR(bh)) {
   4355		ext4_msg(sb, KERN_ERR, "unable to read superblock");
   4356		ret = PTR_ERR(bh);
   4357		goto out_fail;
   4358	}
   4359	/*
   4360	 * Note: s_es must be initialized as soon as possible because
   4361	 *       some ext4 macro-instructions depend on its value
   4362	 */
   4363	es = (struct ext4_super_block *) (bh->b_data + offset);
   4364	sbi->s_es = es;
   4365	sb->s_magic = le16_to_cpu(es->s_magic);
   4366	if (sb->s_magic != EXT4_SUPER_MAGIC)
   4367		goto cantfind_ext4;
   4368	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
   4369
   4370	/* Warn if metadata_csum and gdt_csum are both set. */
   4371	if (ext4_has_feature_metadata_csum(sb) &&
   4372	    ext4_has_feature_gdt_csum(sb))
   4373		ext4_warning(sb, "metadata_csum and uninit_bg are "
   4374			     "redundant flags; please run fsck.");
   4375
   4376	/* Check for a known checksum algorithm */
   4377	if (!ext4_verify_csum_type(sb, es)) {
   4378		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
   4379			 "unknown checksum algorithm.");
   4380		silent = 1;
   4381		goto cantfind_ext4;
   4382	}
   4383	ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE,
   4384				ext4_orphan_file_block_trigger);
   4385
   4386	/* Load the checksum driver */
   4387	sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
   4388	if (IS_ERR(sbi->s_chksum_driver)) {
   4389		ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
   4390		ret = PTR_ERR(sbi->s_chksum_driver);
   4391		sbi->s_chksum_driver = NULL;
   4392		goto failed_mount;
   4393	}
   4394
   4395	/* Check superblock checksum */
   4396	if (!ext4_superblock_csum_verify(sb, es)) {
   4397		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
   4398			 "invalid superblock checksum.  Run e2fsck?");
   4399		silent = 1;
   4400		ret = -EFSBADCRC;
   4401		goto cantfind_ext4;
   4402	}
   4403
   4404	/* Precompute checksum seed for all metadata */
   4405	if (ext4_has_feature_csum_seed(sb))
   4406		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
   4407	else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
   4408		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
   4409					       sizeof(es->s_uuid));
   4410
   4411	/* Set defaults before we parse the mount options */
   4412	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
   4413	set_opt(sb, INIT_INODE_TABLE);
   4414	if (def_mount_opts & EXT4_DEFM_DEBUG)
   4415		set_opt(sb, DEBUG);
   4416	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
   4417		set_opt(sb, GRPID);
   4418	if (def_mount_opts & EXT4_DEFM_UID16)
   4419		set_opt(sb, NO_UID32);
   4420	/* xattr user namespace & acls are now defaulted on */
   4421	set_opt(sb, XATTR_USER);
   4422#ifdef CONFIG_EXT4_FS_POSIX_ACL
   4423	set_opt(sb, POSIX_ACL);
   4424#endif
   4425	if (ext4_has_feature_fast_commit(sb))
   4426		set_opt2(sb, JOURNAL_FAST_COMMIT);
   4427	/* don't forget to enable journal_csum when metadata_csum is enabled. */
   4428	if (ext4_has_metadata_csum(sb))
   4429		set_opt(sb, JOURNAL_CHECKSUM);
   4430
   4431	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
   4432		set_opt(sb, JOURNAL_DATA);
   4433	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
   4434		set_opt(sb, ORDERED_DATA);
   4435	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
   4436		set_opt(sb, WRITEBACK_DATA);
   4437
   4438	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
   4439		set_opt(sb, ERRORS_PANIC);
   4440	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
   4441		set_opt(sb, ERRORS_CONT);
   4442	else
   4443		set_opt(sb, ERRORS_RO);
   4444	/* block_validity enabled by default; disable with noblock_validity */
   4445	set_opt(sb, BLOCK_VALIDITY);
   4446	if (def_mount_opts & EXT4_DEFM_DISCARD)
   4447		set_opt(sb, DISCARD);
   4448
   4449	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
   4450	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
   4451	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
   4452	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
   4453	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
   4454
   4455	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
   4456		set_opt(sb, BARRIER);
   4457
   4458	/*
   4459	 * enable delayed allocation by default
   4460	 * Use -o nodelalloc to turn it off
   4461	 */
   4462	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
   4463	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
   4464		set_opt(sb, DELALLOC);
   4465
   4466	/*
   4467	 * set default s_li_wait_mult for lazyinit, for the case there is
   4468	 * no mount option specified.
   4469	 */
   4470	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
   4471
   4472	if (le32_to_cpu(es->s_log_block_size) >
   4473	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
   4474		ext4_msg(sb, KERN_ERR,
   4475			 "Invalid log block size: %u",
   4476			 le32_to_cpu(es->s_log_block_size));
   4477		goto failed_mount;
   4478	}
   4479	if (le32_to_cpu(es->s_log_cluster_size) >
   4480	    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
   4481		ext4_msg(sb, KERN_ERR,
   4482			 "Invalid log cluster size: %u",
   4483			 le32_to_cpu(es->s_log_cluster_size));
   4484		goto failed_mount;
   4485	}
   4486
   4487	blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
   4488
   4489	if (blocksize == PAGE_SIZE)
   4490		set_opt(sb, DIOREAD_NOLOCK);
   4491
   4492	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
   4493		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
   4494		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
   4495	} else {
   4496		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
   4497		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
   4498		if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
   4499			ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
   4500				 sbi->s_first_ino);
   4501			goto failed_mount;
   4502		}
   4503		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
   4504		    (!is_power_of_2(sbi->s_inode_size)) ||
   4505		    (sbi->s_inode_size > blocksize)) {
   4506			ext4_msg(sb, KERN_ERR,
   4507			       "unsupported inode size: %d",
   4508			       sbi->s_inode_size);
   4509			ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
   4510			goto failed_mount;
   4511		}
   4512		/*
   4513		 * i_atime_extra is the last extra field available for
   4514		 * [acm]times in struct ext4_inode. Checking for that
   4515		 * field should suffice to ensure we have extra space
   4516		 * for all three.
   4517		 */
   4518		if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
   4519			sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
   4520			sb->s_time_gran = 1;
   4521			sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
   4522		} else {
   4523			sb->s_time_gran = NSEC_PER_SEC;
   4524			sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
   4525		}
   4526		sb->s_time_min = EXT4_TIMESTAMP_MIN;
   4527	}
   4528	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
   4529		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
   4530			EXT4_GOOD_OLD_INODE_SIZE;
   4531		if (ext4_has_feature_extra_isize(sb)) {
   4532			unsigned v, max = (sbi->s_inode_size -
   4533					   EXT4_GOOD_OLD_INODE_SIZE);
   4534
   4535			v = le16_to_cpu(es->s_want_extra_isize);
   4536			if (v > max) {
   4537				ext4_msg(sb, KERN_ERR,
   4538					 "bad s_want_extra_isize: %d", v);
   4539				goto failed_mount;
   4540			}
   4541			if (sbi->s_want_extra_isize < v)
   4542				sbi->s_want_extra_isize = v;
   4543
   4544			v = le16_to_cpu(es->s_min_extra_isize);
   4545			if (v > max) {
   4546				ext4_msg(sb, KERN_ERR,
   4547					 "bad s_min_extra_isize: %d", v);
   4548				goto failed_mount;
   4549			}
   4550			if (sbi->s_want_extra_isize < v)
   4551				sbi->s_want_extra_isize = v;
   4552		}
   4553	}
   4554
   4555	err = parse_apply_sb_mount_options(sb, ctx);
   4556	if (err < 0)
   4557		goto failed_mount;
   4558
   4559	sbi->s_def_mount_opt = sbi->s_mount_opt;
   4560
   4561	err = ext4_check_opt_consistency(fc, sb);
   4562	if (err < 0)
   4563		goto failed_mount;
   4564
   4565	ext4_apply_options(fc, sb);
   4566
   4567#if IS_ENABLED(CONFIG_UNICODE)
   4568	if (ext4_has_feature_casefold(sb) && !sb->s_encoding) {
   4569		const struct ext4_sb_encodings *encoding_info;
   4570		struct unicode_map *encoding;
   4571		__u16 encoding_flags = le16_to_cpu(es->s_encoding_flags);
   4572
   4573		encoding_info = ext4_sb_read_encoding(es);
   4574		if (!encoding_info) {
   4575			ext4_msg(sb, KERN_ERR,
   4576				 "Encoding requested by superblock is unknown");
   4577			goto failed_mount;
   4578		}
   4579
   4580		encoding = utf8_load(encoding_info->version);
   4581		if (IS_ERR(encoding)) {
   4582			ext4_msg(sb, KERN_ERR,
   4583				 "can't mount with superblock charset: %s-%u.%u.%u "
   4584				 "not supported by the kernel. flags: 0x%x.",
   4585				 encoding_info->name,
   4586				 unicode_major(encoding_info->version),
   4587				 unicode_minor(encoding_info->version),
   4588				 unicode_rev(encoding_info->version),
   4589				 encoding_flags);
   4590			goto failed_mount;
   4591		}
   4592		ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
   4593			 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
   4594			 unicode_major(encoding_info->version),
   4595			 unicode_minor(encoding_info->version),
   4596			 unicode_rev(encoding_info->version),
   4597			 encoding_flags);
   4598
   4599		sb->s_encoding = encoding;
   4600		sb->s_encoding_flags = encoding_flags;
   4601	}
   4602#endif
   4603
   4604	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
   4605		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, O_DIRECT and fast_commit support!\n");
   4606		/* can't mount with both data=journal and dioread_nolock. */
   4607		clear_opt(sb, DIOREAD_NOLOCK);
   4608		clear_opt2(sb, JOURNAL_FAST_COMMIT);
   4609		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
   4610			ext4_msg(sb, KERN_ERR, "can't mount with "
   4611				 "both data=journal and delalloc");
   4612			goto failed_mount;
   4613		}
   4614		if (test_opt(sb, DAX_ALWAYS)) {
   4615			ext4_msg(sb, KERN_ERR, "can't mount with "
   4616				 "both data=journal and dax");
   4617			goto failed_mount;
   4618		}
   4619		if (ext4_has_feature_encrypt(sb)) {
   4620			ext4_msg(sb, KERN_WARNING,
   4621				 "encrypted files will use data=ordered "
   4622				 "instead of data journaling mode");
   4623		}
   4624		if (test_opt(sb, DELALLOC))
   4625			clear_opt(sb, DELALLOC);
   4626	} else {
   4627		sb->s_iflags |= SB_I_CGROUPWB;
   4628	}
   4629
   4630	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
   4631		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
   4632
   4633	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
   4634	    (ext4_has_compat_features(sb) ||
   4635	     ext4_has_ro_compat_features(sb) ||
   4636	     ext4_has_incompat_features(sb)))
   4637		ext4_msg(sb, KERN_WARNING,
   4638		       "feature flags set on rev 0 fs, "
   4639		       "running e2fsck is recommended");
   4640
   4641	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
   4642		set_opt2(sb, HURD_COMPAT);
   4643		if (ext4_has_feature_64bit(sb)) {
   4644			ext4_msg(sb, KERN_ERR,
   4645				 "The Hurd can't support 64-bit file systems");
   4646			goto failed_mount;
   4647		}
   4648
   4649		/*
   4650		 * ea_inode feature uses l_i_version field which is not
   4651		 * available in HURD_COMPAT mode.
   4652		 */
   4653		if (ext4_has_feature_ea_inode(sb)) {
   4654			ext4_msg(sb, KERN_ERR,
   4655				 "ea_inode feature is not supported for Hurd");
   4656			goto failed_mount;
   4657		}
   4658	}
   4659
   4660	if (IS_EXT2_SB(sb)) {
   4661		if (ext2_feature_set_ok(sb))
   4662			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
   4663				 "using the ext4 subsystem");
   4664		else {
   4665			/*
   4666			 * If we're probing be silent, if this looks like
   4667			 * it's actually an ext[34] filesystem.
   4668			 */
   4669			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
   4670				goto failed_mount;
   4671			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
   4672				 "to feature incompatibilities");
   4673			goto failed_mount;
   4674		}
   4675	}
   4676
   4677	if (IS_EXT3_SB(sb)) {
   4678		if (ext3_feature_set_ok(sb))
   4679			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
   4680				 "using the ext4 subsystem");
   4681		else {
   4682			/*
   4683			 * If we're probing be silent, if this looks like
   4684			 * it's actually an ext4 filesystem.
   4685			 */
   4686			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
   4687				goto failed_mount;
   4688			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
   4689				 "to feature incompatibilities");
   4690			goto failed_mount;
   4691		}
   4692	}
   4693
   4694	/*
   4695	 * Check feature flags regardless of the revision level, since we
   4696	 * previously didn't change the revision level when setting the flags,
   4697	 * so there is a chance incompat flags are set on a rev 0 filesystem.
   4698	 */
   4699	if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
   4700		goto failed_mount;
   4701
   4702	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
   4703		ext4_msg(sb, KERN_ERR,
   4704			 "Number of reserved GDT blocks insanely large: %d",
   4705			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
   4706		goto failed_mount;
   4707	}
   4708
   4709	if (sbi->s_daxdev) {
   4710		if (blocksize == PAGE_SIZE)
   4711			set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
   4712		else
   4713			ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n");
   4714	}
   4715
   4716	if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
   4717		if (ext4_has_feature_inline_data(sb)) {
   4718			ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
   4719					" that may contain inline data");
   4720			goto failed_mount;
   4721		}
   4722		if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) {
   4723			ext4_msg(sb, KERN_ERR,
   4724				"DAX unsupported by block device.");
   4725			goto failed_mount;
   4726		}
   4727	}
   4728
   4729	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
   4730		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
   4731			 es->s_encryption_level);
   4732		goto failed_mount;
   4733	}
   4734
   4735	if (sb->s_blocksize != blocksize) {
   4736		/*
   4737		 * bh must be released before kill_bdev(), otherwise
   4738		 * it won't be freed and its page also. kill_bdev()
   4739		 * is called by sb_set_blocksize().
   4740		 */
   4741		brelse(bh);
   4742		/* Validate the filesystem blocksize */
   4743		if (!sb_set_blocksize(sb, blocksize)) {
   4744			ext4_msg(sb, KERN_ERR, "bad block size %d",
   4745					blocksize);
   4746			bh = NULL;
   4747			goto failed_mount;
   4748		}
   4749
   4750		logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE;
   4751		offset = do_div(logical_sb_block, blocksize);
   4752		bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
   4753		if (IS_ERR(bh)) {
   4754			ext4_msg(sb, KERN_ERR,
   4755			       "Can't read superblock on 2nd try");
   4756			ret = PTR_ERR(bh);
   4757			bh = NULL;
   4758			goto failed_mount;
   4759		}
   4760		es = (struct ext4_super_block *)(bh->b_data + offset);
   4761		sbi->s_es = es;
   4762		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
   4763			ext4_msg(sb, KERN_ERR,
   4764			       "Magic mismatch, very weird!");
   4765			goto failed_mount;
   4766		}
   4767	}
   4768
   4769	has_huge_files = ext4_has_feature_huge_file(sb);
   4770	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
   4771						      has_huge_files);
   4772	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
   4773
   4774	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
   4775	if (ext4_has_feature_64bit(sb)) {
   4776		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
   4777		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
   4778		    !is_power_of_2(sbi->s_desc_size)) {
   4779			ext4_msg(sb, KERN_ERR,
   4780			       "unsupported descriptor size %lu",
   4781			       sbi->s_desc_size);
   4782			goto failed_mount;
   4783		}
   4784	} else
   4785		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
   4786
   4787	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
   4788	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
   4789
   4790	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
   4791	if (sbi->s_inodes_per_block == 0)
   4792		goto cantfind_ext4;
   4793	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
   4794	    sbi->s_inodes_per_group > blocksize * 8) {
   4795		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
   4796			 sbi->s_inodes_per_group);
   4797		goto failed_mount;
   4798	}
   4799	sbi->s_itb_per_group = sbi->s_inodes_per_group /
   4800					sbi->s_inodes_per_block;
   4801	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
   4802	sbi->s_sbh = bh;
   4803	sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY;
   4804	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
   4805	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
   4806
   4807	for (i = 0; i < 4; i++)
   4808		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
   4809	sbi->s_def_hash_version = es->s_def_hash_version;
   4810	if (ext4_has_feature_dir_index(sb)) {
   4811		i = le32_to_cpu(es->s_flags);
   4812		if (i & EXT2_FLAGS_UNSIGNED_HASH)
   4813			sbi->s_hash_unsigned = 3;
   4814		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
   4815#ifdef __CHAR_UNSIGNED__
   4816			if (!sb_rdonly(sb))
   4817				es->s_flags |=
   4818					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
   4819			sbi->s_hash_unsigned = 3;
   4820#else
   4821			if (!sb_rdonly(sb))
   4822				es->s_flags |=
   4823					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
   4824#endif
   4825		}
   4826	}
   4827
   4828	/* Handle clustersize */
   4829	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
   4830	if (ext4_has_feature_bigalloc(sb)) {
   4831		if (clustersize < blocksize) {
   4832			ext4_msg(sb, KERN_ERR,
   4833				 "cluster size (%d) smaller than "
   4834				 "block size (%d)", clustersize, blocksize);
   4835			goto failed_mount;
   4836		}
   4837		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
   4838			le32_to_cpu(es->s_log_block_size);
   4839		sbi->s_clusters_per_group =
   4840			le32_to_cpu(es->s_clusters_per_group);
   4841		if (sbi->s_clusters_per_group > blocksize * 8) {
   4842			ext4_msg(sb, KERN_ERR,
   4843				 "#clusters per group too big: %lu",
   4844				 sbi->s_clusters_per_group);
   4845			goto failed_mount;
   4846		}
   4847		if (sbi->s_blocks_per_group !=
   4848		    (sbi->s_clusters_per_group * (clustersize / blocksize))) {
   4849			ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
   4850				 "clusters per group (%lu) inconsistent",
   4851				 sbi->s_blocks_per_group,
   4852				 sbi->s_clusters_per_group);
   4853			goto failed_mount;
   4854		}
   4855	} else {
   4856		if (clustersize != blocksize) {
   4857			ext4_msg(sb, KERN_ERR,
   4858				 "fragment/cluster size (%d) != "
   4859				 "block size (%d)", clustersize, blocksize);
   4860			goto failed_mount;
   4861		}
   4862		if (sbi->s_blocks_per_group > blocksize * 8) {
   4863			ext4_msg(sb, KERN_ERR,
   4864				 "#blocks per group too big: %lu",
   4865				 sbi->s_blocks_per_group);
   4866			goto failed_mount;
   4867		}
   4868		sbi->s_clusters_per_group = sbi->s_blocks_per_group;
   4869		sbi->s_cluster_bits = 0;
   4870	}
   4871	sbi->s_cluster_ratio = clustersize / blocksize;
   4872
   4873	/* Do we have standard group size of clustersize * 8 blocks ? */
   4874	if (sbi->s_blocks_per_group == clustersize << 3)
   4875		set_opt2(sb, STD_GROUP_SIZE);
   4876
   4877	/*
   4878	 * Test whether we have more sectors than will fit in sector_t,
   4879	 * and whether the max offset is addressable by the page cache.
   4880	 */
   4881	err = generic_check_addressable(sb->s_blocksize_bits,
   4882					ext4_blocks_count(es));
   4883	if (err) {
   4884		ext4_msg(sb, KERN_ERR, "filesystem"
   4885			 " too large to mount safely on this system");
   4886		goto failed_mount;
   4887	}
   4888
   4889	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
   4890		goto cantfind_ext4;
   4891
   4892	/* check blocks count against device size */
   4893	blocks_count = sb_bdev_nr_blocks(sb);
   4894	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
   4895		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
   4896		       "exceeds size of device (%llu blocks)",
   4897		       ext4_blocks_count(es), blocks_count);
   4898		goto failed_mount;
   4899	}
   4900
   4901	/*
   4902	 * It makes no sense for the first data block to be beyond the end
   4903	 * of the filesystem.
   4904	 */
   4905	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
   4906		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
   4907			 "block %u is beyond end of filesystem (%llu)",
   4908			 le32_to_cpu(es->s_first_data_block),
   4909			 ext4_blocks_count(es));
   4910		goto failed_mount;
   4911	}
   4912	if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
   4913	    (sbi->s_cluster_ratio == 1)) {
   4914		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
   4915			 "block is 0 with a 1k block and cluster size");
   4916		goto failed_mount;
   4917	}
   4918
   4919	blocks_count = (ext4_blocks_count(es) -
   4920			le32_to_cpu(es->s_first_data_block) +
   4921			EXT4_BLOCKS_PER_GROUP(sb) - 1);
   4922	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
   4923	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
   4924		ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
   4925		       "(block count %llu, first data block %u, "
   4926		       "blocks per group %lu)", blocks_count,
   4927		       ext4_blocks_count(es),
   4928		       le32_to_cpu(es->s_first_data_block),
   4929		       EXT4_BLOCKS_PER_GROUP(sb));
   4930		goto failed_mount;
   4931	}
   4932	sbi->s_groups_count = blocks_count;
   4933	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
   4934			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
   4935	if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
   4936	    le32_to_cpu(es->s_inodes_count)) {
   4937		ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
   4938			 le32_to_cpu(es->s_inodes_count),
   4939			 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
   4940		ret = -EINVAL;
   4941		goto failed_mount;
   4942	}
   4943	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
   4944		   EXT4_DESC_PER_BLOCK(sb);
   4945	if (ext4_has_feature_meta_bg(sb)) {
   4946		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
   4947			ext4_msg(sb, KERN_WARNING,
   4948				 "first meta block group too large: %u "
   4949				 "(group descriptor block count %u)",
   4950				 le32_to_cpu(es->s_first_meta_bg), db_count);
   4951			goto failed_mount;
   4952		}
   4953	}
   4954	rcu_assign_pointer(sbi->s_group_desc,
   4955			   kvmalloc_array(db_count,
   4956					  sizeof(struct buffer_head *),
   4957					  GFP_KERNEL));
   4958	if (sbi->s_group_desc == NULL) {
   4959		ext4_msg(sb, KERN_ERR, "not enough memory");
   4960		ret = -ENOMEM;
   4961		goto failed_mount;
   4962	}
   4963
   4964	bgl_lock_init(sbi->s_blockgroup_lock);
   4965
   4966	/* Pre-read the descriptors into the buffer cache */
   4967	for (i = 0; i < db_count; i++) {
   4968		block = descriptor_loc(sb, logical_sb_block, i);
   4969		ext4_sb_breadahead_unmovable(sb, block);
   4970	}
   4971
   4972	for (i = 0; i < db_count; i++) {
   4973		struct buffer_head *bh;
   4974
   4975		block = descriptor_loc(sb, logical_sb_block, i);
   4976		bh = ext4_sb_bread_unmovable(sb, block);
   4977		if (IS_ERR(bh)) {
   4978			ext4_msg(sb, KERN_ERR,
   4979			       "can't read group descriptor %d", i);
   4980			db_count = i;
   4981			ret = PTR_ERR(bh);
   4982			goto failed_mount2;
   4983		}
   4984		rcu_read_lock();
   4985		rcu_dereference(sbi->s_group_desc)[i] = bh;
   4986		rcu_read_unlock();
   4987	}
   4988	sbi->s_gdb_count = db_count;
   4989	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
   4990		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
   4991		ret = -EFSCORRUPTED;
   4992		goto failed_mount2;
   4993	}
   4994
   4995	timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
   4996	spin_lock_init(&sbi->s_error_lock);
   4997	INIT_WORK(&sbi->s_error_work, flush_stashed_error_work);
   4998
   4999	/* Register extent status tree shrinker */
   5000	if (ext4_es_register_shrinker(sbi))
   5001		goto failed_mount3;
   5002
   5003	sbi->s_stripe = ext4_get_stripe_size(sbi);
   5004	sbi->s_extent_max_zeroout_kb = 32;
   5005
   5006	/*
   5007	 * set up enough so that it can read an inode
   5008	 */
   5009	sb->s_op = &ext4_sops;
   5010	sb->s_export_op = &ext4_export_ops;
   5011	sb->s_xattr = ext4_xattr_handlers;
   5012#ifdef CONFIG_FS_ENCRYPTION
   5013	sb->s_cop = &ext4_cryptops;
   5014#endif
   5015#ifdef CONFIG_FS_VERITY
   5016	sb->s_vop = &ext4_verityops;
   5017#endif
   5018#ifdef CONFIG_QUOTA
   5019	sb->dq_op = &ext4_quota_operations;
   5020	if (ext4_has_feature_quota(sb))
   5021		sb->s_qcop = &dquot_quotactl_sysfile_ops;
   5022	else
   5023		sb->s_qcop = &ext4_qctl_operations;
   5024	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
   5025#endif
   5026	memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
   5027
   5028	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
   5029	mutex_init(&sbi->s_orphan_lock);
   5030
   5031	/* Initialize fast commit stuff */
   5032	atomic_set(&sbi->s_fc_subtid, 0);
   5033	INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
   5034	INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
   5035	INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
   5036	INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
   5037	sbi->s_fc_bytes = 0;
   5038	ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
   5039	sbi->s_fc_ineligible_tid = 0;
   5040	spin_lock_init(&sbi->s_fc_lock);
   5041	memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
   5042	sbi->s_fc_replay_state.fc_regions = NULL;
   5043	sbi->s_fc_replay_state.fc_regions_size = 0;
   5044	sbi->s_fc_replay_state.fc_regions_used = 0;
   5045	sbi->s_fc_replay_state.fc_regions_valid = 0;
   5046	sbi->s_fc_replay_state.fc_modified_inodes = NULL;
   5047	sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
   5048	sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
   5049
   5050	sb->s_root = NULL;
   5051
   5052	needs_recovery = (es->s_last_orphan != 0 ||
   5053			  ext4_has_feature_orphan_present(sb) ||
   5054			  ext4_has_feature_journal_needs_recovery(sb));
   5055
   5056	if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
   5057		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
   5058			goto failed_mount3a;
   5059
   5060	/*
   5061	 * The first inode we look at is the journal inode.  Don't try
   5062	 * root first: it may be modified in the journal!
   5063	 */
   5064	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
   5065		err = ext4_load_journal(sb, es, ctx->journal_devnum);
   5066		if (err)
   5067			goto failed_mount3a;
   5068	} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
   5069		   ext4_has_feature_journal_needs_recovery(sb)) {
   5070		ext4_msg(sb, KERN_ERR, "required journal recovery "
   5071		       "suppressed and not mounted read-only");
   5072		goto failed_mount_wq;
   5073	} else {
   5074		/* Nojournal mode, all journal mount options are illegal */
   5075		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
   5076			ext4_msg(sb, KERN_ERR, "can't mount with "
   5077				 "journal_checksum, fs mounted w/o journal");
   5078			goto failed_mount_wq;
   5079		}
   5080		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
   5081			ext4_msg(sb, KERN_ERR, "can't mount with "
   5082				 "journal_async_commit, fs mounted w/o journal");
   5083			goto failed_mount_wq;
   5084		}
   5085		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
   5086			ext4_msg(sb, KERN_ERR, "can't mount with "
   5087				 "commit=%lu, fs mounted w/o journal",
   5088				 sbi->s_commit_interval / HZ);
   5089			goto failed_mount_wq;
   5090		}
   5091		if (EXT4_MOUNT_DATA_FLAGS &
   5092		    (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
   5093			ext4_msg(sb, KERN_ERR, "can't mount with "
   5094				 "data=, fs mounted w/o journal");
   5095			goto failed_mount_wq;
   5096		}
   5097		sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
   5098		clear_opt(sb, JOURNAL_CHECKSUM);
   5099		clear_opt(sb, DATA_FLAGS);
   5100		clear_opt2(sb, JOURNAL_FAST_COMMIT);
   5101		sbi->s_journal = NULL;
   5102		needs_recovery = 0;
   5103		goto no_journal;
   5104	}
   5105
   5106	if (ext4_has_feature_64bit(sb) &&
   5107	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
   5108				       JBD2_FEATURE_INCOMPAT_64BIT)) {
   5109		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
   5110		goto failed_mount_wq;
   5111	}
   5112
   5113	if (!set_journal_csum_feature_set(sb)) {
   5114		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
   5115			 "feature set");
   5116		goto failed_mount_wq;
   5117	}
   5118
   5119	if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
   5120		!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
   5121					  JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) {
   5122		ext4_msg(sb, KERN_ERR,
   5123			"Failed to set fast commit journal feature");
   5124		goto failed_mount_wq;
   5125	}
   5126
   5127	/* We have now updated the journal if required, so we can
   5128	 * validate the data journaling mode. */
   5129	switch (test_opt(sb, DATA_FLAGS)) {
   5130	case 0:
   5131		/* No mode set, assume a default based on the journal
   5132		 * capabilities: ORDERED_DATA if the journal can
   5133		 * cope, else JOURNAL_DATA
   5134		 */
   5135		if (jbd2_journal_check_available_features
   5136		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
   5137			set_opt(sb, ORDERED_DATA);
   5138			sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
   5139		} else {
   5140			set_opt(sb, JOURNAL_DATA);
   5141			sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
   5142		}
   5143		break;
   5144
   5145	case EXT4_MOUNT_ORDERED_DATA:
   5146	case EXT4_MOUNT_WRITEBACK_DATA:
   5147		if (!jbd2_journal_check_available_features
   5148		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
   5149			ext4_msg(sb, KERN_ERR, "Journal does not support "
   5150			       "requested data journaling mode");
   5151			goto failed_mount_wq;
   5152		}
   5153		break;
   5154	default:
   5155		break;
   5156	}
   5157
   5158	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
   5159	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
   5160		ext4_msg(sb, KERN_ERR, "can't mount with "
   5161			"journal_async_commit in data=ordered mode");
   5162		goto failed_mount_wq;
   5163	}
   5164
   5165	set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio);
   5166
   5167	sbi->s_journal->j_submit_inode_data_buffers =
   5168		ext4_journal_submit_inode_data_buffers;
   5169	sbi->s_journal->j_finish_inode_data_buffers =
   5170		ext4_journal_finish_inode_data_buffers;
   5171
   5172no_journal:
   5173	if (!test_opt(sb, NO_MBCACHE)) {
   5174		sbi->s_ea_block_cache = ext4_xattr_create_cache();
   5175		if (!sbi->s_ea_block_cache) {
   5176			ext4_msg(sb, KERN_ERR,
   5177				 "Failed to create ea_block_cache");
   5178			goto failed_mount_wq;
   5179		}
   5180
   5181		if (ext4_has_feature_ea_inode(sb)) {
   5182			sbi->s_ea_inode_cache = ext4_xattr_create_cache();
   5183			if (!sbi->s_ea_inode_cache) {
   5184				ext4_msg(sb, KERN_ERR,
   5185					 "Failed to create ea_inode_cache");
   5186				goto failed_mount_wq;
   5187			}
   5188		}
   5189	}
   5190
   5191	if (ext4_has_feature_verity(sb) && blocksize != PAGE_SIZE) {
   5192		ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity");
   5193		goto failed_mount_wq;
   5194	}
   5195
   5196	/*
   5197	 * Get the # of file system overhead blocks from the
   5198	 * superblock if present.
   5199	 */
   5200	sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
   5201	/* ignore the precalculated value if it is ridiculous */
   5202	if (sbi->s_overhead > ext4_blocks_count(es))
   5203		sbi->s_overhead = 0;
   5204	/*
   5205	 * If the bigalloc feature is not enabled recalculating the
   5206	 * overhead doesn't take long, so we might as well just redo
   5207	 * it to make sure we are using the correct value.
   5208	 */
   5209	if (!ext4_has_feature_bigalloc(sb))
   5210		sbi->s_overhead = 0;
   5211	if (sbi->s_overhead == 0) {
   5212		err = ext4_calculate_overhead(sb);
   5213		if (err)
   5214			goto failed_mount_wq;
   5215	}
   5216
   5217	/*
   5218	 * The maximum number of concurrent works can be high and
   5219	 * concurrency isn't really necessary.  Limit it to 1.
   5220	 */
   5221	EXT4_SB(sb)->rsv_conversion_wq =
   5222		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
   5223	if (!EXT4_SB(sb)->rsv_conversion_wq) {
   5224		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
   5225		ret = -ENOMEM;
   5226		goto failed_mount4;
   5227	}
   5228
   5229	/*
   5230	 * The jbd2_journal_load will have done any necessary log recovery,
   5231	 * so we can safely mount the rest of the filesystem now.
   5232	 */
   5233
   5234	root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
   5235	if (IS_ERR(root)) {
   5236		ext4_msg(sb, KERN_ERR, "get root inode failed");
   5237		ret = PTR_ERR(root);
   5238		root = NULL;
   5239		goto failed_mount4;
   5240	}
   5241	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
   5242		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
   5243		iput(root);
   5244		goto failed_mount4;
   5245	}
   5246
   5247	sb->s_root = d_make_root(root);
   5248	if (!sb->s_root) {
   5249		ext4_msg(sb, KERN_ERR, "get root dentry failed");
   5250		ret = -ENOMEM;
   5251		goto failed_mount4;
   5252	}
   5253
   5254	ret = ext4_setup_super(sb, es, sb_rdonly(sb));
   5255	if (ret == -EROFS) {
   5256		sb->s_flags |= SB_RDONLY;
   5257		ret = 0;
   5258	} else if (ret)
   5259		goto failed_mount4a;
   5260
   5261	ext4_set_resv_clusters(sb);
   5262
   5263	if (test_opt(sb, BLOCK_VALIDITY)) {
   5264		err = ext4_setup_system_zone(sb);
   5265		if (err) {
   5266			ext4_msg(sb, KERN_ERR, "failed to initialize system "
   5267				 "zone (%d)", err);
   5268			goto failed_mount4a;
   5269		}
   5270	}
   5271	ext4_fc_replay_cleanup(sb);
   5272
   5273	ext4_ext_init(sb);
   5274
   5275	/*
   5276	 * Enable optimize_scan if number of groups is > threshold. This can be
   5277	 * turned off by passing "mb_optimize_scan=0". This can also be
   5278	 * turned on forcefully by passing "mb_optimize_scan=1".
   5279	 */
   5280	if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) {
   5281		if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD)
   5282			set_opt2(sb, MB_OPTIMIZE_SCAN);
   5283		else
   5284			clear_opt2(sb, MB_OPTIMIZE_SCAN);
   5285	}
   5286
   5287	err = ext4_mb_init(sb);
   5288	if (err) {
   5289		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
   5290			 err);
   5291		goto failed_mount5;
   5292	}
   5293
   5294	/*
   5295	 * We can only set up the journal commit callback once
   5296	 * mballoc is initialized
   5297	 */
   5298	if (sbi->s_journal)
   5299		sbi->s_journal->j_commit_callback =
   5300			ext4_journal_commit_callback;
   5301
   5302	block = ext4_count_free_clusters(sb);
   5303	ext4_free_blocks_count_set(sbi->s_es,
   5304				   EXT4_C2B(sbi, block));
   5305	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
   5306				  GFP_KERNEL);
   5307	if (!err) {
   5308		unsigned long freei = ext4_count_free_inodes(sb);
   5309		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
   5310		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
   5311					  GFP_KERNEL);
   5312	}
   5313	if (!err)
   5314		err = percpu_counter_init(&sbi->s_dirs_counter,
   5315					  ext4_count_dirs(sb), GFP_KERNEL);
   5316	if (!err)
   5317		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
   5318					  GFP_KERNEL);
   5319	if (!err)
   5320		err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
   5321					  GFP_KERNEL);
   5322	if (!err)
   5323		err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
   5324
   5325	if (err) {
   5326		ext4_msg(sb, KERN_ERR, "insufficient memory");
   5327		goto failed_mount6;
   5328	}
   5329
   5330	if (ext4_has_feature_flex_bg(sb))
   5331		if (!ext4_fill_flex_info(sb)) {
   5332			ext4_msg(sb, KERN_ERR,
   5333			       "unable to initialize "
   5334			       "flex_bg meta info!");
   5335			ret = -ENOMEM;
   5336			goto failed_mount6;
   5337		}
   5338
   5339	err = ext4_register_li_request(sb, first_not_zeroed);
   5340	if (err)
   5341		goto failed_mount6;
   5342
   5343	err = ext4_register_sysfs(sb);
   5344	if (err)
   5345		goto failed_mount7;
   5346
   5347	err = ext4_init_orphan_info(sb);
   5348	if (err)
   5349		goto failed_mount8;
   5350#ifdef CONFIG_QUOTA
   5351	/* Enable quota usage during mount. */
   5352	if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
   5353		err = ext4_enable_quotas(sb);
   5354		if (err)
   5355			goto failed_mount9;
   5356	}
   5357#endif  /* CONFIG_QUOTA */
   5358
   5359	/*
   5360	 * Save the original bdev mapping's wb_err value which could be
   5361	 * used to detect the metadata async write error.
   5362	 */
   5363	spin_lock_init(&sbi->s_bdev_wb_lock);
   5364	errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
   5365				 &sbi->s_bdev_wb_err);
   5366	sb->s_bdev->bd_super = sb;
   5367	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
   5368	ext4_orphan_cleanup(sb, es);
   5369	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
   5370	/*
   5371	 * Update the checksum after updating free space/inode counters and
   5372	 * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect
   5373	 * checksum in the buffer cache until it is written out and
   5374	 * e2fsprogs programs trying to open a file system immediately
   5375	 * after it is mounted can fail.
   5376	 */
   5377	ext4_superblock_csum_set(sb);
   5378	if (needs_recovery) {
   5379		ext4_msg(sb, KERN_INFO, "recovery complete");
   5380		err = ext4_mark_recovery_complete(sb, es);
   5381		if (err)
   5382			goto failed_mount9;
   5383	}
   5384
   5385	if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev))
   5386		ext4_msg(sb, KERN_WARNING,
   5387			 "mounting with \"discard\" option, but the device does not support discard");
   5388
   5389	if (es->s_error_count)
   5390		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
   5391
   5392	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
   5393	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
   5394	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
   5395	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
   5396	atomic_set(&sbi->s_warning_count, 0);
   5397	atomic_set(&sbi->s_msg_count, 0);
   5398
   5399	return 0;
   5400
   5401cantfind_ext4:
   5402	if (!silent)
   5403		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
   5404	goto failed_mount;
   5405
   5406failed_mount9:
   5407	ext4_release_orphan_info(sb);
   5408failed_mount8:
   5409	ext4_unregister_sysfs(sb);
   5410	kobject_put(&sbi->s_kobj);
   5411failed_mount7:
   5412	ext4_unregister_li_request(sb);
   5413failed_mount6:
   5414	ext4_mb_release(sb);
   5415	rcu_read_lock();
   5416	flex_groups = rcu_dereference(sbi->s_flex_groups);
   5417	if (flex_groups) {
   5418		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
   5419			kvfree(flex_groups[i]);
   5420		kvfree(flex_groups);
   5421	}
   5422	rcu_read_unlock();
   5423	percpu_counter_destroy(&sbi->s_freeclusters_counter);
   5424	percpu_counter_destroy(&sbi->s_freeinodes_counter);
   5425	percpu_counter_destroy(&sbi->s_dirs_counter);
   5426	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
   5427	percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
   5428	percpu_free_rwsem(&sbi->s_writepages_rwsem);
   5429failed_mount5:
   5430	ext4_ext_release(sb);
   5431	ext4_release_system_zone(sb);
   5432failed_mount4a:
   5433	dput(sb->s_root);
   5434	sb->s_root = NULL;
   5435failed_mount4:
   5436	ext4_msg(sb, KERN_ERR, "mount failed");
   5437	if (EXT4_SB(sb)->rsv_conversion_wq)
   5438		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
   5439failed_mount_wq:
   5440	ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
   5441	sbi->s_ea_inode_cache = NULL;
   5442
   5443	ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
   5444	sbi->s_ea_block_cache = NULL;
   5445
   5446	if (sbi->s_journal) {
   5447		/* flush s_error_work before journal destroy. */
   5448		flush_work(&sbi->s_error_work);
   5449		jbd2_journal_destroy(sbi->s_journal);
   5450		sbi->s_journal = NULL;
   5451	}
   5452failed_mount3a:
   5453	ext4_es_unregister_shrinker(sbi);
   5454failed_mount3:
   5455	/* flush s_error_work before sbi destroy */
   5456	flush_work(&sbi->s_error_work);
   5457	del_timer_sync(&sbi->s_err_report);
   5458	ext4_stop_mmpd(sbi);
   5459failed_mount2:
   5460	rcu_read_lock();
   5461	group_desc = rcu_dereference(sbi->s_group_desc);
   5462	for (i = 0; i < db_count; i++)
   5463		brelse(group_desc[i]);
   5464	kvfree(group_desc);
   5465	rcu_read_unlock();
   5466failed_mount:
   5467	if (sbi->s_chksum_driver)
   5468		crypto_free_shash(sbi->s_chksum_driver);
   5469
   5470#if IS_ENABLED(CONFIG_UNICODE)
   5471	utf8_unload(sb->s_encoding);
   5472#endif
   5473
   5474#ifdef CONFIG_QUOTA
   5475	for (i = 0; i < EXT4_MAXQUOTAS; i++)
   5476		kfree(get_qf_name(sb, sbi, i));
   5477#endif
   5478	fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
   5479	/* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
   5480	brelse(bh);
   5481	ext4_blkdev_remove(sbi);
   5482out_fail:
   5483	sb->s_fs_info = NULL;
   5484	return err ? err : ret;
   5485}
   5486
   5487static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
   5488{
   5489	struct ext4_fs_context *ctx = fc->fs_private;
   5490	struct ext4_sb_info *sbi;
   5491	const char *descr;
   5492	int ret;
   5493
   5494	sbi = ext4_alloc_sbi(sb);
   5495	if (!sbi)
   5496		return -ENOMEM;
   5497
   5498	fc->s_fs_info = sbi;
   5499
   5500	/* Cleanup superblock name */
   5501	strreplace(sb->s_id, '/', '!');
   5502
   5503	sbi->s_sb_block = 1;	/* Default super block location */
   5504	if (ctx->spec & EXT4_SPEC_s_sb_block)
   5505		sbi->s_sb_block = ctx->s_sb_block;
   5506
   5507	ret = __ext4_fill_super(fc, sb);
   5508	if (ret < 0)
   5509		goto free_sbi;
   5510
   5511	if (sbi->s_journal) {
   5512		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
   5513			descr = " journalled data mode";
   5514		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
   5515			descr = " ordered data mode";
   5516		else
   5517			descr = " writeback data mode";
   5518	} else
   5519		descr = "out journal";
   5520
   5521	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
   5522		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
   5523			 "Quota mode: %s.", descr, ext4_quota_mode(sb));
   5524
   5525	/* Update the s_overhead_clusters if necessary */
   5526	ext4_update_overhead(sb);
   5527	return 0;
   5528
   5529free_sbi:
   5530	ext4_free_sbi(sbi);
   5531	fc->s_fs_info = NULL;
   5532	return ret;
   5533}
   5534
   5535static int ext4_get_tree(struct fs_context *fc)
   5536{
   5537	return get_tree_bdev(fc, ext4_fill_super);
   5538}
   5539
   5540/*
   5541 * Setup any per-fs journal parameters now.  We'll do this both on
   5542 * initial mount, once the journal has been initialised but before we've
   5543 * done any recovery; and again on any subsequent remount.
   5544 */
   5545static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
   5546{
   5547	struct ext4_sb_info *sbi = EXT4_SB(sb);
   5548
   5549	journal->j_commit_interval = sbi->s_commit_interval;
   5550	journal->j_min_batch_time = sbi->s_min_batch_time;
   5551	journal->j_max_batch_time = sbi->s_max_batch_time;
   5552	ext4_fc_init(sb, journal);
   5553
   5554	write_lock(&journal->j_state_lock);
   5555	if (test_opt(sb, BARRIER))
   5556		journal->j_flags |= JBD2_BARRIER;
   5557	else
   5558		journal->j_flags &= ~JBD2_BARRIER;
   5559	if (test_opt(sb, DATA_ERR_ABORT))
   5560		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
   5561	else
   5562		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
   5563	write_unlock(&journal->j_state_lock);
   5564}
   5565
   5566static struct inode *ext4_get_journal_inode(struct super_block *sb,
   5567					     unsigned int journal_inum)
   5568{
   5569	struct inode *journal_inode;
   5570
   5571	/*
   5572	 * Test for the existence of a valid inode on disk.  Bad things
   5573	 * happen if we iget() an unused inode, as the subsequent iput()
   5574	 * will try to delete it.
   5575	 */
   5576	journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
   5577	if (IS_ERR(journal_inode)) {
   5578		ext4_msg(sb, KERN_ERR, "no journal found");
   5579		return NULL;
   5580	}
   5581	if (!journal_inode->i_nlink) {
   5582		make_bad_inode(journal_inode);
   5583		iput(journal_inode);
   5584		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
   5585		return NULL;
   5586	}
   5587
   5588	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
   5589		  journal_inode, journal_inode->i_size);
   5590	if (!S_ISREG(journal_inode->i_mode)) {
   5591		ext4_msg(sb, KERN_ERR, "invalid journal inode");
   5592		iput(journal_inode);
   5593		return NULL;
   5594	}
   5595	return journal_inode;
   5596}
   5597
   5598static journal_t *ext4_get_journal(struct super_block *sb,
   5599				   unsigned int journal_inum)
   5600{
   5601	struct inode *journal_inode;
   5602	journal_t *journal;
   5603
   5604	if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
   5605		return NULL;
   5606
   5607	journal_inode = ext4_get_journal_inode(sb, journal_inum);
   5608	if (!journal_inode)
   5609		return NULL;
   5610
   5611	journal = jbd2_journal_init_inode(journal_inode);
   5612	if (!journal) {
   5613		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
   5614		iput(journal_inode);
   5615		return NULL;
   5616	}
   5617	journal->j_private = sb;
   5618	ext4_init_journal_params(sb, journal);
   5619	return journal;
   5620}
   5621
   5622static journal_t *ext4_get_dev_journal(struct super_block *sb,
   5623				       dev_t j_dev)
   5624{
   5625	struct buffer_head *bh;
   5626	journal_t *journal;
   5627	ext4_fsblk_t start;
   5628	ext4_fsblk_t len;
   5629	int hblock, blocksize;
   5630	ext4_fsblk_t sb_block;
   5631	unsigned long offset;
   5632	struct ext4_super_block *es;
   5633	struct block_device *bdev;
   5634
   5635	if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
   5636		return NULL;
   5637
   5638	bdev = ext4_blkdev_get(j_dev, sb);
   5639	if (bdev == NULL)
   5640		return NULL;
   5641
   5642	blocksize = sb->s_blocksize;
   5643	hblock = bdev_logical_block_size(bdev);
   5644	if (blocksize < hblock) {
   5645		ext4_msg(sb, KERN_ERR,
   5646			"blocksize too small for journal device");
   5647		goto out_bdev;
   5648	}
   5649
   5650	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
   5651	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
   5652	set_blocksize(bdev, blocksize);
   5653	if (!(bh = __bread(bdev, sb_block, blocksize))) {
   5654		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
   5655		       "external journal");
   5656		goto out_bdev;
   5657	}
   5658
   5659	es = (struct ext4_super_block *) (bh->b_data + offset);
   5660	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
   5661	    !(le32_to_cpu(es->s_feature_incompat) &
   5662	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
   5663		ext4_msg(sb, KERN_ERR, "external journal has "
   5664					"bad superblock");
   5665		brelse(bh);
   5666		goto out_bdev;
   5667	}
   5668
   5669	if ((le32_to_cpu(es->s_feature_ro_compat) &
   5670	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
   5671	    es->s_checksum != ext4_superblock_csum(sb, es)) {
   5672		ext4_msg(sb, KERN_ERR, "external journal has "
   5673				       "corrupt superblock");
   5674		brelse(bh);
   5675		goto out_bdev;
   5676	}
   5677
   5678	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
   5679		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
   5680		brelse(bh);
   5681		goto out_bdev;
   5682	}
   5683
   5684	len = ext4_blocks_count(es);
   5685	start = sb_block + 1;
   5686	brelse(bh);	/* we're done with the superblock */
   5687
   5688	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
   5689					start, len, blocksize);
   5690	if (!journal) {
   5691		ext4_msg(sb, KERN_ERR, "failed to create device journal");
   5692		goto out_bdev;
   5693	}
   5694	journal->j_private = sb;
   5695	if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) {
   5696		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
   5697		goto out_journal;
   5698	}
   5699	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
   5700		ext4_msg(sb, KERN_ERR, "External journal has more than one "
   5701					"user (unsupported) - %d",
   5702			be32_to_cpu(journal->j_superblock->s_nr_users));
   5703		goto out_journal;
   5704	}
   5705	EXT4_SB(sb)->s_journal_bdev = bdev;
   5706	ext4_init_journal_params(sb, journal);
   5707	return journal;
   5708
   5709out_journal:
   5710	jbd2_journal_destroy(journal);
   5711out_bdev:
   5712	ext4_blkdev_put(bdev);
   5713	return NULL;
   5714}
   5715
   5716static int ext4_load_journal(struct super_block *sb,
   5717			     struct ext4_super_block *es,
   5718			     unsigned long journal_devnum)
   5719{
   5720	journal_t *journal;
   5721	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
   5722	dev_t journal_dev;
   5723	int err = 0;
   5724	int really_read_only;
   5725	int journal_dev_ro;
   5726
   5727	if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
   5728		return -EFSCORRUPTED;
   5729
   5730	if (journal_devnum &&
   5731	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
   5732		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
   5733			"numbers have changed");
   5734		journal_dev = new_decode_dev(journal_devnum);
   5735	} else
   5736		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
   5737
   5738	if (journal_inum && journal_dev) {
   5739		ext4_msg(sb, KERN_ERR,
   5740			 "filesystem has both journal inode and journal device!");
   5741		return -EINVAL;
   5742	}
   5743
   5744	if (journal_inum) {
   5745		journal = ext4_get_journal(sb, journal_inum);
   5746		if (!journal)
   5747			return -EINVAL;
   5748	} else {
   5749		journal = ext4_get_dev_journal(sb, journal_dev);
   5750		if (!journal)
   5751			return -EINVAL;
   5752	}
   5753
   5754	journal_dev_ro = bdev_read_only(journal->j_dev);
   5755	really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
   5756
   5757	if (journal_dev_ro && !sb_rdonly(sb)) {
   5758		ext4_msg(sb, KERN_ERR,
   5759			 "journal device read-only, try mounting with '-o ro'");
   5760		err = -EROFS;
   5761		goto err_out;
   5762	}
   5763
   5764	/*
   5765	 * Are we loading a blank journal or performing recovery after a
   5766	 * crash?  For recovery, we need to check in advance whether we
   5767	 * can get read-write access to the device.
   5768	 */
   5769	if (ext4_has_feature_journal_needs_recovery(sb)) {
   5770		if (sb_rdonly(sb)) {
   5771			ext4_msg(sb, KERN_INFO, "INFO: recovery "
   5772					"required on readonly filesystem");
   5773			if (really_read_only) {
   5774				ext4_msg(sb, KERN_ERR, "write access "
   5775					"unavailable, cannot proceed "
   5776					"(try mounting with noload)");
   5777				err = -EROFS;
   5778				goto err_out;
   5779			}
   5780			ext4_msg(sb, KERN_INFO, "write access will "
   5781			       "be enabled during recovery");
   5782		}
   5783	}
   5784
   5785	if (!(journal->j_flags & JBD2_BARRIER))
   5786		ext4_msg(sb, KERN_INFO, "barriers disabled");
   5787
   5788	if (!ext4_has_feature_journal_needs_recovery(sb))
   5789		err = jbd2_journal_wipe(journal, !really_read_only);
   5790	if (!err) {
   5791		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
   5792		if (save)
   5793			memcpy(save, ((char *) es) +
   5794			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
   5795		err = jbd2_journal_load(journal);
   5796		if (save)
   5797			memcpy(((char *) es) + EXT4_S_ERR_START,
   5798			       save, EXT4_S_ERR_LEN);
   5799		kfree(save);
   5800	}
   5801
   5802	if (err) {
   5803		ext4_msg(sb, KERN_ERR, "error loading journal");
   5804		goto err_out;
   5805	}
   5806
   5807	EXT4_SB(sb)->s_journal = journal;
   5808	err = ext4_clear_journal_err(sb, es);
   5809	if (err) {
   5810		EXT4_SB(sb)->s_journal = NULL;
   5811		jbd2_journal_destroy(journal);
   5812		return err;
   5813	}
   5814
   5815	if (!really_read_only && journal_devnum &&
   5816	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
   5817		es->s_journal_dev = cpu_to_le32(journal_devnum);
   5818
   5819		/* Make sure we flush the recovery flag to disk. */
   5820		ext4_commit_super(sb);
   5821	}
   5822
   5823	return 0;
   5824
   5825err_out:
   5826	jbd2_journal_destroy(journal);
   5827	return err;
   5828}
   5829
   5830/* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */
   5831static void ext4_update_super(struct super_block *sb)
   5832{
   5833	struct ext4_sb_info *sbi = EXT4_SB(sb);
   5834	struct ext4_super_block *es = sbi->s_es;
   5835	struct buffer_head *sbh = sbi->s_sbh;
   5836
   5837	lock_buffer(sbh);
   5838	/*
   5839	 * If the file system is mounted read-only, don't update the
   5840	 * superblock write time.  This avoids updating the superblock
   5841	 * write time when we are mounting the root file system
   5842	 * read/only but we need to replay the journal; at that point,
   5843	 * for people who are east of GMT and who make their clock
   5844	 * tick in localtime for Windows bug-for-bug compatibility,
   5845	 * the clock is set in the future, and this will cause e2fsck
   5846	 * to complain and force a full file system check.
   5847	 */
   5848	if (!(sb->s_flags & SB_RDONLY))
   5849		ext4_update_tstamp(es, s_wtime);
   5850	es->s_kbytes_written =
   5851		cpu_to_le64(sbi->s_kbytes_written +
   5852		    ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
   5853		      sbi->s_sectors_written_start) >> 1));
   5854	if (percpu_counter_initialized(&sbi->s_freeclusters_counter))
   5855		ext4_free_blocks_count_set(es,
   5856			EXT4_C2B(sbi, percpu_counter_sum_positive(
   5857				&sbi->s_freeclusters_counter)));
   5858	if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
   5859		es->s_free_inodes_count =
   5860			cpu_to_le32(percpu_counter_sum_positive(
   5861				&sbi->s_freeinodes_counter));
   5862	/* Copy error information to the on-disk superblock */
   5863	spin_lock(&sbi->s_error_lock);
   5864	if (sbi->s_add_error_count > 0) {
   5865		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
   5866		if (!es->s_first_error_time && !es->s_first_error_time_hi) {
   5867			__ext4_update_tstamp(&es->s_first_error_time,
   5868					     &es->s_first_error_time_hi,
   5869					     sbi->s_first_error_time);
   5870			strncpy(es->s_first_error_func, sbi->s_first_error_func,
   5871				sizeof(es->s_first_error_func));
   5872			es->s_first_error_line =
   5873				cpu_to_le32(sbi->s_first_error_line);
   5874			es->s_first_error_ino =
   5875				cpu_to_le32(sbi->s_first_error_ino);
   5876			es->s_first_error_block =
   5877				cpu_to_le64(sbi->s_first_error_block);
   5878			es->s_first_error_errcode =
   5879				ext4_errno_to_code(sbi->s_first_error_code);
   5880		}
   5881		__ext4_update_tstamp(&es->s_last_error_time,
   5882				     &es->s_last_error_time_hi,
   5883				     sbi->s_last_error_time);
   5884		strncpy(es->s_last_error_func, sbi->s_last_error_func,
   5885			sizeof(es->s_last_error_func));
   5886		es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line);
   5887		es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino);
   5888		es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block);
   5889		es->s_last_error_errcode =
   5890				ext4_errno_to_code(sbi->s_last_error_code);
   5891		/*
   5892		 * Start the daily error reporting function if it hasn't been
   5893		 * started already
   5894		 */
   5895		if (!es->s_error_count)
   5896			mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);
   5897		le32_add_cpu(&es->s_error_count, sbi->s_add_error_count);
   5898		sbi->s_add_error_count = 0;
   5899	}
   5900	spin_unlock(&sbi->s_error_lock);
   5901
   5902	ext4_superblock_csum_set(sb);
   5903	unlock_buffer(sbh);
   5904}
   5905
   5906static int ext4_commit_super(struct super_block *sb)
   5907{
   5908	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
   5909
   5910	if (!sbh)
   5911		return -EINVAL;
   5912	if (block_device_ejected(sb))
   5913		return -ENODEV;
   5914
   5915	ext4_update_super(sb);
   5916
   5917	lock_buffer(sbh);
   5918	/* Buffer got discarded which means block device got invalidated */
   5919	if (!buffer_mapped(sbh)) {
   5920		unlock_buffer(sbh);
   5921		return -EIO;
   5922	}
   5923
   5924	if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
   5925		/*
   5926		 * Oh, dear.  A previous attempt to write the
   5927		 * superblock failed.  This could happen because the
   5928		 * USB device was yanked out.  Or it could happen to
   5929		 * be a transient write error and maybe the block will
   5930		 * be remapped.  Nothing we can do but to retry the
   5931		 * write and hope for the best.
   5932		 */
   5933		ext4_msg(sb, KERN_ERR, "previous I/O error to "
   5934		       "superblock detected");
   5935		clear_buffer_write_io_error(sbh);
   5936		set_buffer_uptodate(sbh);
   5937	}
   5938	get_bh(sbh);
   5939	/* Clear potential dirty bit if it was journalled update */
   5940	clear_buffer_dirty(sbh);
   5941	sbh->b_end_io = end_buffer_write_sync;
   5942	submit_bh(REQ_OP_WRITE,
   5943		  REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh);
   5944	wait_on_buffer(sbh);
   5945	if (buffer_write_io_error(sbh)) {
   5946		ext4_msg(sb, KERN_ERR, "I/O error while writing "
   5947		       "superblock");
   5948		clear_buffer_write_io_error(sbh);
   5949		set_buffer_uptodate(sbh);
   5950		return -EIO;
   5951	}
   5952	return 0;
   5953}
   5954
   5955/*
   5956 * Have we just finished recovery?  If so, and if we are mounting (or
   5957 * remounting) the filesystem readonly, then we will end up with a
   5958 * consistent fs on disk.  Record that fact.
   5959 */
   5960static int ext4_mark_recovery_complete(struct super_block *sb,
   5961				       struct ext4_super_block *es)
   5962{
   5963	int err;
   5964	journal_t *journal = EXT4_SB(sb)->s_journal;
   5965
   5966	if (!ext4_has_feature_journal(sb)) {
   5967		if (journal != NULL) {
   5968			ext4_error(sb, "Journal got removed while the fs was "
   5969				   "mounted!");
   5970			return -EFSCORRUPTED;
   5971		}
   5972		return 0;
   5973	}
   5974	jbd2_journal_lock_updates(journal);
   5975	err = jbd2_journal_flush(journal, 0);
   5976	if (err < 0)
   5977		goto out;
   5978
   5979	if (sb_rdonly(sb) && (ext4_has_feature_journal_needs_recovery(sb) ||
   5980	    ext4_has_feature_orphan_present(sb))) {
   5981		if (!ext4_orphan_file_empty(sb)) {
   5982			ext4_error(sb, "Orphan file not empty on read-only fs.");
   5983			err = -EFSCORRUPTED;
   5984			goto out;
   5985		}
   5986		ext4_clear_feature_journal_needs_recovery(sb);
   5987		ext4_clear_feature_orphan_present(sb);
   5988		ext4_commit_super(sb);
   5989	}
   5990out:
   5991	jbd2_journal_unlock_updates(journal);
   5992	return err;
   5993}
   5994
   5995/*
   5996 * If we are mounting (or read-write remounting) a filesystem whose journal
   5997 * has recorded an error from a previous lifetime, move that error to the
   5998 * main filesystem now.
   5999 */
   6000static int ext4_clear_journal_err(struct super_block *sb,
   6001				   struct ext4_super_block *es)
   6002{
   6003	journal_t *journal;
   6004	int j_errno;
   6005	const char *errstr;
   6006
   6007	if (!ext4_has_feature_journal(sb)) {
   6008		ext4_error(sb, "Journal got removed while the fs was mounted!");
   6009		return -EFSCORRUPTED;
   6010	}
   6011
   6012	journal = EXT4_SB(sb)->s_journal;
   6013
   6014	/*
   6015	 * Now check for any error status which may have been recorded in the
   6016	 * journal by a prior ext4_error() or ext4_abort()
   6017	 */
   6018
   6019	j_errno = jbd2_journal_errno(journal);
   6020	if (j_errno) {
   6021		char nbuf[16];
   6022
   6023		errstr = ext4_decode_error(sb, j_errno, nbuf);
   6024		ext4_warning(sb, "Filesystem error recorded "
   6025			     "from previous mount: %s", errstr);
   6026		ext4_warning(sb, "Marking fs in need of filesystem check.");
   6027
   6028		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
   6029		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
   6030		ext4_commit_super(sb);
   6031
   6032		jbd2_journal_clear_err(journal);
   6033		jbd2_journal_update_sb_errno(journal);
   6034	}
   6035	return 0;
   6036}
   6037
   6038/*
   6039 * Force the running and committing transactions to commit,
   6040 * and wait on the commit.
   6041 */
   6042int ext4_force_commit(struct super_block *sb)
   6043{
   6044	journal_t *journal;
   6045
   6046	if (sb_rdonly(sb))
   6047		return 0;
   6048
   6049	journal = EXT4_SB(sb)->s_journal;
   6050	return ext4_journal_force_commit(journal);
   6051}
   6052
   6053static int ext4_sync_fs(struct super_block *sb, int wait)
   6054{
   6055	int ret = 0;
   6056	tid_t target;
   6057	bool needs_barrier = false;
   6058	struct ext4_sb_info *sbi = EXT4_SB(sb);
   6059
   6060	if (unlikely(ext4_forced_shutdown(sbi)))
   6061		return 0;
   6062
   6063	trace_ext4_sync_fs(sb, wait);
   6064	flush_workqueue(sbi->rsv_conversion_wq);
   6065	/*
   6066	 * Writeback quota in non-journalled quota case - journalled quota has
   6067	 * no dirty dquots
   6068	 */
   6069	dquot_writeback_dquots(sb, -1);
   6070	/*
   6071	 * Data writeback is possible w/o journal transaction, so barrier must
   6072	 * being sent at the end of the function. But we can skip it if
   6073	 * transaction_commit will do it for us.
   6074	 */
   6075	if (sbi->s_journal) {
   6076		target = jbd2_get_latest_transaction(sbi->s_journal);
   6077		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
   6078		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
   6079			needs_barrier = true;
   6080
   6081		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
   6082			if (wait)
   6083				ret = jbd2_log_wait_commit(sbi->s_journal,
   6084							   target);
   6085		}
   6086	} else if (wait && test_opt(sb, BARRIER))
   6087		needs_barrier = true;
   6088	if (needs_barrier) {
   6089		int err;
   6090		err = blkdev_issue_flush(sb->s_bdev);
   6091		if (!ret)
   6092			ret = err;
   6093	}
   6094
   6095	return ret;
   6096}
   6097
   6098/*
   6099 * LVM calls this function before a (read-only) snapshot is created.  This
   6100 * gives us a chance to flush the journal completely and mark the fs clean.
   6101 *
   6102 * Note that only this function cannot bring a filesystem to be in a clean
   6103 * state independently. It relies on upper layer to stop all data & metadata
   6104 * modifications.
   6105 */
   6106static int ext4_freeze(struct super_block *sb)
   6107{
   6108	int error = 0;
   6109	journal_t *journal;
   6110
   6111	if (sb_rdonly(sb))
   6112		return 0;
   6113
   6114	journal = EXT4_SB(sb)->s_journal;
   6115
   6116	if (journal) {
   6117		/* Now we set up the journal barrier. */
   6118		jbd2_journal_lock_updates(journal);
   6119
   6120		/*
   6121		 * Don't clear the needs_recovery flag if we failed to
   6122		 * flush the journal.
   6123		 */
   6124		error = jbd2_journal_flush(journal, 0);
   6125		if (error < 0)
   6126			goto out;
   6127
   6128		/* Journal blocked and flushed, clear needs_recovery flag. */
   6129		ext4_clear_feature_journal_needs_recovery(sb);
   6130		if (ext4_orphan_file_empty(sb))
   6131			ext4_clear_feature_orphan_present(sb);
   6132	}
   6133
   6134	error = ext4_commit_super(sb);
   6135out:
   6136	if (journal)
   6137		/* we rely on upper layer to stop further updates */
   6138		jbd2_journal_unlock_updates(journal);
   6139	return error;
   6140}
   6141
   6142/*
   6143 * Called by LVM after the snapshot is done.  We need to reset the RECOVER
   6144 * flag here, even though the filesystem is not technically dirty yet.
   6145 */
   6146static int ext4_unfreeze(struct super_block *sb)
   6147{
   6148	if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
   6149		return 0;
   6150
   6151	if (EXT4_SB(sb)->s_journal) {
   6152		/* Reset the needs_recovery flag before the fs is unlocked. */
   6153		ext4_set_feature_journal_needs_recovery(sb);
   6154		if (ext4_has_feature_orphan_file(sb))
   6155			ext4_set_feature_orphan_present(sb);
   6156	}
   6157
   6158	ext4_commit_super(sb);
   6159	return 0;
   6160}
   6161
   6162/*
   6163 * Structure to save mount options for ext4_remount's benefit
   6164 */
   6165struct ext4_mount_options {
   6166	unsigned long s_mount_opt;
   6167	unsigned long s_mount_opt2;
   6168	kuid_t s_resuid;
   6169	kgid_t s_resgid;
   6170	unsigned long s_commit_interval;
   6171	u32 s_min_batch_time, s_max_batch_time;
   6172#ifdef CONFIG_QUOTA
   6173	int s_jquota_fmt;
   6174	char *s_qf_names[EXT4_MAXQUOTAS];
   6175#endif
   6176};
   6177
   6178static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
   6179{
   6180	struct ext4_fs_context *ctx = fc->fs_private;
   6181	struct ext4_super_block *es;
   6182	struct ext4_sb_info *sbi = EXT4_SB(sb);
   6183	unsigned long old_sb_flags;
   6184	struct ext4_mount_options old_opts;
   6185	ext4_group_t g;
   6186	int err = 0;
   6187#ifdef CONFIG_QUOTA
   6188	int enable_quota = 0;
   6189	int i, j;
   6190	char *to_free[EXT4_MAXQUOTAS];
   6191#endif
   6192
   6193
   6194	/* Store the original options */
   6195	old_sb_flags = sb->s_flags;
   6196	old_opts.s_mount_opt = sbi->s_mount_opt;
   6197	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
   6198	old_opts.s_resuid = sbi->s_resuid;
   6199	old_opts.s_resgid = sbi->s_resgid;
   6200	old_opts.s_commit_interval = sbi->s_commit_interval;
   6201	old_opts.s_min_batch_time = sbi->s_min_batch_time;
   6202	old_opts.s_max_batch_time = sbi->s_max_batch_time;
   6203#ifdef CONFIG_QUOTA
   6204	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
   6205	for (i = 0; i < EXT4_MAXQUOTAS; i++)
   6206		if (sbi->s_qf_names[i]) {
   6207			char *qf_name = get_qf_name(sb, sbi, i);
   6208
   6209			old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
   6210			if (!old_opts.s_qf_names[i]) {
   6211				for (j = 0; j < i; j++)
   6212					kfree(old_opts.s_qf_names[j]);
   6213				return -ENOMEM;
   6214			}
   6215		} else
   6216			old_opts.s_qf_names[i] = NULL;
   6217#endif
   6218	if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) {
   6219		if (sbi->s_journal && sbi->s_journal->j_task->io_context)
   6220			ctx->journal_ioprio =
   6221				sbi->s_journal->j_task->io_context->ioprio;
   6222		else
   6223			ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
   6224
   6225	}
   6226
   6227	ext4_apply_options(fc, sb);
   6228
   6229	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
   6230	    test_opt(sb, JOURNAL_CHECKSUM)) {
   6231		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
   6232			 "during remount not supported; ignoring");
   6233		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
   6234	}
   6235
   6236	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
   6237		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
   6238			ext4_msg(sb, KERN_ERR, "can't mount with "
   6239				 "both data=journal and delalloc");
   6240			err = -EINVAL;
   6241			goto restore_opts;
   6242		}
   6243		if (test_opt(sb, DIOREAD_NOLOCK)) {
   6244			ext4_msg(sb, KERN_ERR, "can't mount with "
   6245				 "both data=journal and dioread_nolock");
   6246			err = -EINVAL;
   6247			goto restore_opts;
   6248		}
   6249	} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
   6250		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
   6251			ext4_msg(sb, KERN_ERR, "can't mount with "
   6252				"journal_async_commit in data=ordered mode");
   6253			err = -EINVAL;
   6254			goto restore_opts;
   6255		}
   6256	}
   6257
   6258	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
   6259		ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
   6260		err = -EINVAL;
   6261		goto restore_opts;
   6262	}
   6263
   6264	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
   6265		ext4_abort(sb, ESHUTDOWN, "Abort forced by user");
   6266
   6267	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
   6268		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
   6269
   6270	es = sbi->s_es;
   6271
   6272	if (sbi->s_journal) {
   6273		ext4_init_journal_params(sb, sbi->s_journal);
   6274		set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio);
   6275	}
   6276
   6277	/* Flush outstanding errors before changing fs state */
   6278	flush_work(&sbi->s_error_work);
   6279
   6280	if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) {
   6281		if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) {
   6282			err = -EROFS;
   6283			goto restore_opts;
   6284		}
   6285
   6286		if (fc->sb_flags & SB_RDONLY) {
   6287			err = sync_filesystem(sb);
   6288			if (err < 0)
   6289				goto restore_opts;
   6290			err = dquot_suspend(sb, -1);
   6291			if (err < 0)
   6292				goto restore_opts;
   6293
   6294			/*
   6295			 * First of all, the unconditional stuff we have to do
   6296			 * to disable replay of the journal when we next remount
   6297			 */
   6298			sb->s_flags |= SB_RDONLY;
   6299
   6300			/*
   6301			 * OK, test if we are remounting a valid rw partition
   6302			 * readonly, and if so set the rdonly flag and then
   6303			 * mark the partition as valid again.
   6304			 */
   6305			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
   6306			    (sbi->s_mount_state & EXT4_VALID_FS))
   6307				es->s_state = cpu_to_le16(sbi->s_mount_state);
   6308
   6309			if (sbi->s_journal) {
   6310				/*
   6311				 * We let remount-ro finish even if marking fs
   6312				 * as clean failed...
   6313				 */
   6314				ext4_mark_recovery_complete(sb, es);
   6315			}
   6316		} else {
   6317			/* Make sure we can mount this feature set readwrite */
   6318			if (ext4_has_feature_readonly(sb) ||
   6319			    !ext4_feature_set_ok(sb, 0)) {
   6320				err = -EROFS;
   6321				goto restore_opts;
   6322			}
   6323			/*
   6324			 * Make sure the group descriptor checksums
   6325			 * are sane.  If they aren't, refuse to remount r/w.
   6326			 */
   6327			for (g = 0; g < sbi->s_groups_count; g++) {
   6328				struct ext4_group_desc *gdp =
   6329					ext4_get_group_desc(sb, g, NULL);
   6330
   6331				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
   6332					ext4_msg(sb, KERN_ERR,
   6333	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
   6334		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
   6335					       le16_to_cpu(gdp->bg_checksum));
   6336					err = -EFSBADCRC;
   6337					goto restore_opts;
   6338				}
   6339			}
   6340
   6341			/*
   6342			 * If we have an unprocessed orphan list hanging
   6343			 * around from a previously readonly bdev mount,
   6344			 * require a full umount/remount for now.
   6345			 */
   6346			if (es->s_last_orphan || !ext4_orphan_file_empty(sb)) {
   6347				ext4_msg(sb, KERN_WARNING, "Couldn't "
   6348				       "remount RDWR because of unprocessed "
   6349				       "orphan inode list.  Please "
   6350				       "umount/remount instead");
   6351				err = -EINVAL;
   6352				goto restore_opts;
   6353			}
   6354
   6355			/*
   6356			 * Mounting a RDONLY partition read-write, so reread
   6357			 * and store the current valid flag.  (It may have
   6358			 * been changed by e2fsck since we originally mounted
   6359			 * the partition.)
   6360			 */
   6361			if (sbi->s_journal) {
   6362				err = ext4_clear_journal_err(sb, es);
   6363				if (err)
   6364					goto restore_opts;
   6365			}
   6366			sbi->s_mount_state = (le16_to_cpu(es->s_state) &
   6367					      ~EXT4_FC_REPLAY);
   6368
   6369			err = ext4_setup_super(sb, es, 0);
   6370			if (err)
   6371				goto restore_opts;
   6372
   6373			sb->s_flags &= ~SB_RDONLY;
   6374			if (ext4_has_feature_mmp(sb))
   6375				if (ext4_multi_mount_protect(sb,
   6376						le64_to_cpu(es->s_mmp_block))) {
   6377					err = -EROFS;
   6378					goto restore_opts;
   6379				}
   6380#ifdef CONFIG_QUOTA
   6381			enable_quota = 1;
   6382#endif
   6383		}
   6384	}
   6385
   6386	/*
   6387	 * Reinitialize lazy itable initialization thread based on
   6388	 * current settings
   6389	 */
   6390	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
   6391		ext4_unregister_li_request(sb);
   6392	else {
   6393		ext4_group_t first_not_zeroed;
   6394		first_not_zeroed = ext4_has_uninit_itable(sb);
   6395		ext4_register_li_request(sb, first_not_zeroed);
   6396	}
   6397
   6398	/*
   6399	 * Handle creation of system zone data early because it can fail.
   6400	 * Releasing of existing data is done when we are sure remount will
   6401	 * succeed.
   6402	 */
   6403	if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) {
   6404		err = ext4_setup_system_zone(sb);
   6405		if (err)
   6406			goto restore_opts;
   6407	}
   6408
   6409	if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
   6410		err = ext4_commit_super(sb);
   6411		if (err)
   6412			goto restore_opts;
   6413	}
   6414
   6415#ifdef CONFIG_QUOTA
   6416	/* Release old quota file names */
   6417	for (i = 0; i < EXT4_MAXQUOTAS; i++)
   6418		kfree(old_opts.s_qf_names[i]);
   6419	if (enable_quota) {
   6420		if (sb_any_quota_suspended(sb))
   6421			dquot_resume(sb, -1);
   6422		else if (ext4_has_feature_quota(sb)) {
   6423			err = ext4_enable_quotas(sb);
   6424			if (err)
   6425				goto restore_opts;
   6426		}
   6427	}
   6428#endif
   6429	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
   6430		ext4_release_system_zone(sb);
   6431
   6432	if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
   6433		ext4_stop_mmpd(sbi);
   6434
   6435	return 0;
   6436
   6437restore_opts:
   6438	sb->s_flags = old_sb_flags;
   6439	sbi->s_mount_opt = old_opts.s_mount_opt;
   6440	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
   6441	sbi->s_resuid = old_opts.s_resuid;
   6442	sbi->s_resgid = old_opts.s_resgid;
   6443	sbi->s_commit_interval = old_opts.s_commit_interval;
   6444	sbi->s_min_batch_time = old_opts.s_min_batch_time;
   6445	sbi->s_max_batch_time = old_opts.s_max_batch_time;
   6446	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
   6447		ext4_release_system_zone(sb);
   6448#ifdef CONFIG_QUOTA
   6449	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
   6450	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
   6451		to_free[i] = get_qf_name(sb, sbi, i);
   6452		rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
   6453	}
   6454	synchronize_rcu();
   6455	for (i = 0; i < EXT4_MAXQUOTAS; i++)
   6456		kfree(to_free[i]);
   6457#endif
   6458	if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
   6459		ext4_stop_mmpd(sbi);
   6460	return err;
   6461}
   6462
   6463static int ext4_reconfigure(struct fs_context *fc)
   6464{
   6465	struct super_block *sb = fc->root->d_sb;
   6466	int ret;
   6467
   6468	fc->s_fs_info = EXT4_SB(sb);
   6469
   6470	ret = ext4_check_opt_consistency(fc, sb);
   6471	if (ret < 0)
   6472		return ret;
   6473
   6474	ret = __ext4_remount(fc, sb);
   6475	if (ret < 0)
   6476		return ret;
   6477
   6478	ext4_msg(sb, KERN_INFO, "re-mounted. Quota mode: %s.",
   6479		 ext4_quota_mode(sb));
   6480
   6481	return 0;
   6482}
   6483
   6484#ifdef CONFIG_QUOTA
   6485static int ext4_statfs_project(struct super_block *sb,
   6486			       kprojid_t projid, struct kstatfs *buf)
   6487{
   6488	struct kqid qid;
   6489	struct dquot *dquot;
   6490	u64 limit;
   6491	u64 curblock;
   6492
   6493	qid = make_kqid_projid(projid);
   6494	dquot = dqget(sb, qid);
   6495	if (IS_ERR(dquot))
   6496		return PTR_ERR(dquot);
   6497	spin_lock(&dquot->dq_dqb_lock);
   6498
   6499	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
   6500			     dquot->dq_dqb.dqb_bhardlimit);
   6501	limit >>= sb->s_blocksize_bits;
   6502
   6503	if (limit && buf->f_blocks > limit) {
   6504		curblock = (dquot->dq_dqb.dqb_curspace +
   6505			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
   6506		buf->f_blocks = limit;
   6507		buf->f_bfree = buf->f_bavail =
   6508			(buf->f_blocks > curblock) ?
   6509			 (buf->f_blocks - curblock) : 0;
   6510	}
   6511
   6512	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
   6513			     dquot->dq_dqb.dqb_ihardlimit);
   6514	if (limit && buf->f_files > limit) {
   6515		buf->f_files = limit;
   6516		buf->f_ffree =
   6517			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
   6518			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
   6519	}
   6520
   6521	spin_unlock(&dquot->dq_dqb_lock);
   6522	dqput(dquot);
   6523	return 0;
   6524}
   6525#endif
   6526
   6527static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
   6528{
   6529	struct super_block *sb = dentry->d_sb;
   6530	struct ext4_sb_info *sbi = EXT4_SB(sb);
   6531	struct ext4_super_block *es = sbi->s_es;
   6532	ext4_fsblk_t overhead = 0, resv_blocks;
   6533	s64 bfree;
   6534	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
   6535
   6536	if (!test_opt(sb, MINIX_DF))
   6537		overhead = sbi->s_overhead;
   6538
   6539	buf->f_type = EXT4_SUPER_MAGIC;
   6540	buf->f_bsize = sb->s_blocksize;
   6541	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
   6542	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
   6543		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
   6544	/* prevent underflow in case that few free space is available */
   6545	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
   6546	buf->f_bavail = buf->f_bfree -
   6547			(ext4_r_blocks_count(es) + resv_blocks);
   6548	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
   6549		buf->f_bavail = 0;
   6550	buf->f_files = le32_to_cpu(es->s_inodes_count);
   6551	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
   6552	buf->f_namelen = EXT4_NAME_LEN;
   6553	buf->f_fsid = uuid_to_fsid(es->s_uuid);
   6554
   6555#ifdef CONFIG_QUOTA
   6556	if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
   6557	    sb_has_quota_limits_enabled(sb, PRJQUOTA))
   6558		ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
   6559#endif
   6560	return 0;
   6561}
   6562
   6563
   6564#ifdef CONFIG_QUOTA
   6565
   6566/*
   6567 * Helper functions so that transaction is started before we acquire dqio_sem
   6568 * to keep correct lock ordering of transaction > dqio_sem
   6569 */
   6570static inline struct inode *dquot_to_inode(struct dquot *dquot)
   6571{
   6572	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
   6573}
   6574
   6575static int ext4_write_dquot(struct dquot *dquot)
   6576{
   6577	int ret, err;
   6578	handle_t *handle;
   6579	struct inode *inode;
   6580
   6581	inode = dquot_to_inode(dquot);
   6582	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
   6583				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
   6584	if (IS_ERR(handle))
   6585		return PTR_ERR(handle);
   6586	ret = dquot_commit(dquot);
   6587	err = ext4_journal_stop(handle);
   6588	if (!ret)
   6589		ret = err;
   6590	return ret;
   6591}
   6592
   6593static int ext4_acquire_dquot(struct dquot *dquot)
   6594{
   6595	int ret, err;
   6596	handle_t *handle;
   6597
   6598	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
   6599				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
   6600	if (IS_ERR(handle))
   6601		return PTR_ERR(handle);
   6602	ret = dquot_acquire(dquot);
   6603	err = ext4_journal_stop(handle);
   6604	if (!ret)
   6605		ret = err;
   6606	return ret;
   6607}
   6608
   6609static int ext4_release_dquot(struct dquot *dquot)
   6610{
   6611	int ret, err;
   6612	handle_t *handle;
   6613
   6614	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
   6615				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
   6616	if (IS_ERR(handle)) {
   6617		/* Release dquot anyway to avoid endless cycle in dqput() */
   6618		dquot_release(dquot);
   6619		return PTR_ERR(handle);
   6620	}
   6621	ret = dquot_release(dquot);
   6622	err = ext4_journal_stop(handle);
   6623	if (!ret)
   6624		ret = err;
   6625	return ret;
   6626}
   6627
   6628static int ext4_mark_dquot_dirty(struct dquot *dquot)
   6629{
   6630	struct super_block *sb = dquot->dq_sb;
   6631
   6632	if (ext4_is_quota_journalled(sb)) {
   6633		dquot_mark_dquot_dirty(dquot);
   6634		return ext4_write_dquot(dquot);
   6635	} else {
   6636		return dquot_mark_dquot_dirty(dquot);
   6637	}
   6638}
   6639
   6640static int ext4_write_info(struct super_block *sb, int type)
   6641{
   6642	int ret, err;
   6643	handle_t *handle;
   6644
   6645	/* Data block + inode block */
   6646	handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
   6647	if (IS_ERR(handle))
   6648		return PTR_ERR(handle);
   6649	ret = dquot_commit_info(sb, type);
   6650	err = ext4_journal_stop(handle);
   6651	if (!ret)
   6652		ret = err;
   6653	return ret;
   6654}
   6655
   6656static void lockdep_set_quota_inode(struct inode *inode, int subclass)
   6657{
   6658	struct ext4_inode_info *ei = EXT4_I(inode);
   6659
   6660	/* The first argument of lockdep_set_subclass has to be
   6661	 * *exactly* the same as the argument to init_rwsem() --- in
   6662	 * this case, in init_once() --- or lockdep gets unhappy
   6663	 * because the name of the lock is set using the
   6664	 * stringification of the argument to init_rwsem().
   6665	 */
   6666	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
   6667	lockdep_set_subclass(&ei->i_data_sem, subclass);
   6668}
   6669
   6670/*
   6671 * Standard function to be called on quota_on
   6672 */
   6673static int ext4_quota_on(struct super_block *sb, int type, int format_id,
   6674			 const struct path *path)
   6675{
   6676	int err;
   6677
   6678	if (!test_opt(sb, QUOTA))
   6679		return -EINVAL;
   6680
   6681	/* Quotafile not on the same filesystem? */
   6682	if (path->dentry->d_sb != sb)
   6683		return -EXDEV;
   6684
   6685	/* Quota already enabled for this file? */
   6686	if (IS_NOQUOTA(d_inode(path->dentry)))
   6687		return -EBUSY;
   6688
   6689	/* Journaling quota? */
   6690	if (EXT4_SB(sb)->s_qf_names[type]) {
   6691		/* Quotafile not in fs root? */
   6692		if (path->dentry->d_parent != sb->s_root)
   6693			ext4_msg(sb, KERN_WARNING,
   6694				"Quota file not on filesystem root. "
   6695				"Journaled quota will not work");
   6696		sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
   6697	} else {
   6698		/*
   6699		 * Clear the flag just in case mount options changed since
   6700		 * last time.
   6701		 */
   6702		sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
   6703	}
   6704
   6705	/*
   6706	 * When we journal data on quota file, we have to flush journal to see
   6707	 * all updates to the file when we bypass pagecache...
   6708	 */
   6709	if (EXT4_SB(sb)->s_journal &&
   6710	    ext4_should_journal_data(d_inode(path->dentry))) {
   6711		/*
   6712		 * We don't need to lock updates but journal_flush() could
   6713		 * otherwise be livelocked...
   6714		 */
   6715		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
   6716		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
   6717		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
   6718		if (err)
   6719			return err;
   6720	}
   6721
   6722	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
   6723	err = dquot_quota_on(sb, type, format_id, path);
   6724	if (!err) {
   6725		struct inode *inode = d_inode(path->dentry);
   6726		handle_t *handle;
   6727
   6728		/*
   6729		 * Set inode flags to prevent userspace from messing with quota
   6730		 * files. If this fails, we return success anyway since quotas
   6731		 * are already enabled and this is not a hard failure.
   6732		 */
   6733		inode_lock(inode);
   6734		handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
   6735		if (IS_ERR(handle))
   6736			goto unlock_inode;
   6737		EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
   6738		inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
   6739				S_NOATIME | S_IMMUTABLE);
   6740		err = ext4_mark_inode_dirty(handle, inode);
   6741		ext4_journal_stop(handle);
   6742	unlock_inode:
   6743		inode_unlock(inode);
   6744		if (err)
   6745			dquot_quota_off(sb, type);
   6746	}
   6747	if (err)
   6748		lockdep_set_quota_inode(path->dentry->d_inode,
   6749					     I_DATA_SEM_NORMAL);
   6750	return err;
   6751}
   6752
   6753static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
   6754			     unsigned int flags)
   6755{
   6756	int err;
   6757	struct inode *qf_inode;
   6758	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
   6759		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
   6760		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
   6761		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
   6762	};
   6763
   6764	BUG_ON(!ext4_has_feature_quota(sb));
   6765
   6766	if (!qf_inums[type])
   6767		return -EPERM;
   6768
   6769	qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
   6770	if (IS_ERR(qf_inode)) {
   6771		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
   6772		return PTR_ERR(qf_inode);
   6773	}
   6774
   6775	/* Don't account quota for quota files to avoid recursion */
   6776	qf_inode->i_flags |= S_NOQUOTA;
   6777	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
   6778	err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
   6779	if (err)
   6780		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
   6781	iput(qf_inode);
   6782
   6783	return err;
   6784}
   6785
   6786/* Enable usage tracking for all quota types. */
   6787int ext4_enable_quotas(struct super_block *sb)
   6788{
   6789	int type, err = 0;
   6790	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
   6791		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
   6792		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
   6793		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
   6794	};
   6795	bool quota_mopt[EXT4_MAXQUOTAS] = {
   6796		test_opt(sb, USRQUOTA),
   6797		test_opt(sb, GRPQUOTA),
   6798		test_opt(sb, PRJQUOTA),
   6799	};
   6800
   6801	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
   6802	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
   6803		if (qf_inums[type]) {
   6804			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
   6805				DQUOT_USAGE_ENABLED |
   6806				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
   6807			if (err) {
   6808				ext4_warning(sb,
   6809					"Failed to enable quota tracking "
   6810					"(type=%d, err=%d). Please run "
   6811					"e2fsck to fix.", type, err);
   6812				for (type--; type >= 0; type--) {
   6813					struct inode *inode;
   6814
   6815					inode = sb_dqopt(sb)->files[type];
   6816					if (inode)
   6817						inode = igrab(inode);
   6818					dquot_quota_off(sb, type);
   6819					if (inode) {
   6820						lockdep_set_quota_inode(inode,
   6821							I_DATA_SEM_NORMAL);
   6822						iput(inode);
   6823					}
   6824				}
   6825
   6826				return err;
   6827			}
   6828		}
   6829	}
   6830	return 0;
   6831}
   6832
   6833static int ext4_quota_off(struct super_block *sb, int type)
   6834{
   6835	struct inode *inode = sb_dqopt(sb)->files[type];
   6836	handle_t *handle;
   6837	int err;
   6838
   6839	/* Force all delayed allocation blocks to be allocated.
   6840	 * Caller already holds s_umount sem */
   6841	if (test_opt(sb, DELALLOC))
   6842		sync_filesystem(sb);
   6843
   6844	if (!inode || !igrab(inode))
   6845		goto out;
   6846
   6847	err = dquot_quota_off(sb, type);
   6848	if (err || ext4_has_feature_quota(sb))
   6849		goto out_put;
   6850
   6851	inode_lock(inode);
   6852	/*
   6853	 * Update modification times of quota files when userspace can
   6854	 * start looking at them. If we fail, we return success anyway since
   6855	 * this is not a hard failure and quotas are already disabled.
   6856	 */
   6857	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
   6858	if (IS_ERR(handle)) {
   6859		err = PTR_ERR(handle);
   6860		goto out_unlock;
   6861	}
   6862	EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
   6863	inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
   6864	inode->i_mtime = inode->i_ctime = current_time(inode);
   6865	err = ext4_mark_inode_dirty(handle, inode);
   6866	ext4_journal_stop(handle);
   6867out_unlock:
   6868	inode_unlock(inode);
   6869out_put:
   6870	lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
   6871	iput(inode);
   6872	return err;
   6873out:
   6874	return dquot_quota_off(sb, type);
   6875}
   6876
   6877/* Read data from quotafile - avoid pagecache and such because we cannot afford
   6878 * acquiring the locks... As quota files are never truncated and quota code
   6879 * itself serializes the operations (and no one else should touch the files)
   6880 * we don't have to be afraid of races */
   6881static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
   6882			       size_t len, loff_t off)
   6883{
   6884	struct inode *inode = sb_dqopt(sb)->files[type];
   6885	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
   6886	int offset = off & (sb->s_blocksize - 1);
   6887	int tocopy;
   6888	size_t toread;
   6889	struct buffer_head *bh;
   6890	loff_t i_size = i_size_read(inode);
   6891
   6892	if (off > i_size)
   6893		return 0;
   6894	if (off+len > i_size)
   6895		len = i_size-off;
   6896	toread = len;
   6897	while (toread > 0) {
   6898		tocopy = sb->s_blocksize - offset < toread ?
   6899				sb->s_blocksize - offset : toread;
   6900		bh = ext4_bread(NULL, inode, blk, 0);
   6901		if (IS_ERR(bh))
   6902			return PTR_ERR(bh);
   6903		if (!bh)	/* A hole? */
   6904			memset(data, 0, tocopy);
   6905		else
   6906			memcpy(data, bh->b_data+offset, tocopy);
   6907		brelse(bh);
   6908		offset = 0;
   6909		toread -= tocopy;
   6910		data += tocopy;
   6911		blk++;
   6912	}
   6913	return len;
   6914}
   6915
   6916/* Write to quotafile (we know the transaction is already started and has
   6917 * enough credits) */
   6918static ssize_t ext4_quota_write(struct super_block *sb, int type,
   6919				const char *data, size_t len, loff_t off)
   6920{
   6921	struct inode *inode = sb_dqopt(sb)->files[type];
   6922	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
   6923	int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1);
   6924	int retries = 0;
   6925	struct buffer_head *bh;
   6926	handle_t *handle = journal_current_handle();
   6927
   6928	if (!handle) {
   6929		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
   6930			" cancelled because transaction is not started",
   6931			(unsigned long long)off, (unsigned long long)len);
   6932		return -EIO;
   6933	}
   6934	/*
   6935	 * Since we account only one data block in transaction credits,
   6936	 * then it is impossible to cross a block boundary.
   6937	 */
   6938	if (sb->s_blocksize - offset < len) {
   6939		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
   6940			" cancelled because not block aligned",
   6941			(unsigned long long)off, (unsigned long long)len);
   6942		return -EIO;
   6943	}
   6944
   6945	do {
   6946		bh = ext4_bread(handle, inode, blk,
   6947				EXT4_GET_BLOCKS_CREATE |
   6948				EXT4_GET_BLOCKS_METADATA_NOFAIL);
   6949	} while (PTR_ERR(bh) == -ENOSPC &&
   6950		 ext4_should_retry_alloc(inode->i_sb, &retries));
   6951	if (IS_ERR(bh))
   6952		return PTR_ERR(bh);
   6953	if (!bh)
   6954		goto out;
   6955	BUFFER_TRACE(bh, "get write access");
   6956	err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
   6957	if (err) {
   6958		brelse(bh);
   6959		return err;
   6960	}
   6961	lock_buffer(bh);
   6962	memcpy(bh->b_data+offset, data, len);
   6963	flush_dcache_page(bh->b_page);
   6964	unlock_buffer(bh);
   6965	err = ext4_handle_dirty_metadata(handle, NULL, bh);
   6966	brelse(bh);
   6967out:
   6968	if (inode->i_size < off + len) {
   6969		i_size_write(inode, off + len);
   6970		EXT4_I(inode)->i_disksize = inode->i_size;
   6971		err2 = ext4_mark_inode_dirty(handle, inode);
   6972		if (unlikely(err2 && !err))
   6973			err = err2;
   6974	}
   6975	return err ? err : len;
   6976}
   6977#endif
   6978
   6979#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
   6980static inline void register_as_ext2(void)
   6981{
   6982	int err = register_filesystem(&ext2_fs_type);
   6983	if (err)
   6984		printk(KERN_WARNING
   6985		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
   6986}
   6987
   6988static inline void unregister_as_ext2(void)
   6989{
   6990	unregister_filesystem(&ext2_fs_type);
   6991}
   6992
   6993static inline int ext2_feature_set_ok(struct super_block *sb)
   6994{
   6995	if (ext4_has_unknown_ext2_incompat_features(sb))
   6996		return 0;
   6997	if (sb_rdonly(sb))
   6998		return 1;
   6999	if (ext4_has_unknown_ext2_ro_compat_features(sb))
   7000		return 0;
   7001	return 1;
   7002}
   7003#else
   7004static inline void register_as_ext2(void) { }
   7005static inline void unregister_as_ext2(void) { }
   7006static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
   7007#endif
   7008
   7009static inline void register_as_ext3(void)
   7010{
   7011	int err = register_filesystem(&ext3_fs_type);
   7012	if (err)
   7013		printk(KERN_WARNING
   7014		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
   7015}
   7016
   7017static inline void unregister_as_ext3(void)
   7018{
   7019	unregister_filesystem(&ext3_fs_type);
   7020}
   7021
   7022static inline int ext3_feature_set_ok(struct super_block *sb)
   7023{
   7024	if (ext4_has_unknown_ext3_incompat_features(sb))
   7025		return 0;
   7026	if (!ext4_has_feature_journal(sb))
   7027		return 0;
   7028	if (sb_rdonly(sb))
   7029		return 1;
   7030	if (ext4_has_unknown_ext3_ro_compat_features(sb))
   7031		return 0;
   7032	return 1;
   7033}
   7034
   7035static struct file_system_type ext4_fs_type = {
   7036	.owner			= THIS_MODULE,
   7037	.name			= "ext4",
   7038	.init_fs_context	= ext4_init_fs_context,
   7039	.parameters		= ext4_param_specs,
   7040	.kill_sb		= kill_block_super,
   7041	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
   7042};
   7043MODULE_ALIAS_FS("ext4");
   7044
   7045/* Shared across all ext4 file systems */
   7046wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
   7047
   7048static int __init ext4_init_fs(void)
   7049{
   7050	int i, err;
   7051
   7052	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
   7053	ext4_li_info = NULL;
   7054
   7055	/* Build-time check for flags consistency */
   7056	ext4_check_flag_values();
   7057
   7058	for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
   7059		init_waitqueue_head(&ext4__ioend_wq[i]);
   7060
   7061	err = ext4_init_es();
   7062	if (err)
   7063		return err;
   7064
   7065	err = ext4_init_pending();
   7066	if (err)
   7067		goto out7;
   7068
   7069	err = ext4_init_post_read_processing();
   7070	if (err)
   7071		goto out6;
   7072
   7073	err = ext4_init_pageio();
   7074	if (err)
   7075		goto out5;
   7076
   7077	err = ext4_init_system_zone();
   7078	if (err)
   7079		goto out4;
   7080
   7081	err = ext4_init_sysfs();
   7082	if (err)
   7083		goto out3;
   7084
   7085	err = ext4_init_mballoc();
   7086	if (err)
   7087		goto out2;
   7088	err = init_inodecache();
   7089	if (err)
   7090		goto out1;
   7091
   7092	err = ext4_fc_init_dentry_cache();
   7093	if (err)
   7094		goto out05;
   7095
   7096	register_as_ext3();
   7097	register_as_ext2();
   7098	err = register_filesystem(&ext4_fs_type);
   7099	if (err)
   7100		goto out;
   7101
   7102	return 0;
   7103out:
   7104	unregister_as_ext2();
   7105	unregister_as_ext3();
   7106	ext4_fc_destroy_dentry_cache();
   7107out05:
   7108	destroy_inodecache();
   7109out1:
   7110	ext4_exit_mballoc();
   7111out2:
   7112	ext4_exit_sysfs();
   7113out3:
   7114	ext4_exit_system_zone();
   7115out4:
   7116	ext4_exit_pageio();
   7117out5:
   7118	ext4_exit_post_read_processing();
   7119out6:
   7120	ext4_exit_pending();
   7121out7:
   7122	ext4_exit_es();
   7123
   7124	return err;
   7125}
   7126
   7127static void __exit ext4_exit_fs(void)
   7128{
   7129	ext4_destroy_lazyinit_thread();
   7130	unregister_as_ext2();
   7131	unregister_as_ext3();
   7132	unregister_filesystem(&ext4_fs_type);
   7133	ext4_fc_destroy_dentry_cache();
   7134	destroy_inodecache();
   7135	ext4_exit_mballoc();
   7136	ext4_exit_sysfs();
   7137	ext4_exit_system_zone();
   7138	ext4_exit_pageio();
   7139	ext4_exit_post_read_processing();
   7140	ext4_exit_es();
   7141	ext4_exit_pending();
   7142}
   7143
   7144MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
   7145MODULE_DESCRIPTION("Fourth Extended Filesystem");
   7146MODULE_LICENSE("GPL");
   7147MODULE_SOFTDEP("pre: crc32c");
   7148module_init(ext4_init_fs)
   7149module_exit(ext4_exit_fs)