cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

super.c (46467B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  linux/fs/super.c
      4 *
      5 *  Copyright (C) 1991, 1992  Linus Torvalds
      6 *
      7 *  super.c contains code to handle: - mount structures
      8 *                                   - super-block tables
      9 *                                   - filesystem drivers list
     10 *                                   - mount system call
     11 *                                   - umount system call
     12 *                                   - ustat system call
     13 *
     14 * GK 2/5/95  -  Changed to support mounting the root fs via NFS
     15 *
     16 *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
     17 *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
     18 *  Added options to /proc/mounts:
     19 *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
     20 *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
     21 *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
     22 */
     23
     24#include <linux/export.h>
     25#include <linux/slab.h>
     26#include <linux/blkdev.h>
     27#include <linux/mount.h>
     28#include <linux/security.h>
     29#include <linux/writeback.h>		/* for the emergency remount stuff */
     30#include <linux/idr.h>
     31#include <linux/mutex.h>
     32#include <linux/backing-dev.h>
     33#include <linux/rculist_bl.h>
     34#include <linux/fscrypt.h>
     35#include <linux/fsnotify.h>
     36#include <linux/lockdep.h>
     37#include <linux/user_namespace.h>
     38#include <linux/fs_context.h>
     39#include <uapi/linux/mount.h>
     40#include "internal.h"
     41
     42static int thaw_super_locked(struct super_block *sb);
     43
     44static LIST_HEAD(super_blocks);
     45static DEFINE_SPINLOCK(sb_lock);
     46
     47static char *sb_writers_name[SB_FREEZE_LEVELS] = {
     48	"sb_writers",
     49	"sb_pagefaults",
     50	"sb_internal",
     51};
     52
     53/*
     54 * One thing we have to be careful of with a per-sb shrinker is that we don't
     55 * drop the last active reference to the superblock from within the shrinker.
     56 * If that happens we could trigger unregistering the shrinker from within the
     57 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
     58 * take a passive reference to the superblock to avoid this from occurring.
     59 */
     60static unsigned long super_cache_scan(struct shrinker *shrink,
     61				      struct shrink_control *sc)
     62{
     63	struct super_block *sb;
     64	long	fs_objects = 0;
     65	long	total_objects;
     66	long	freed = 0;
     67	long	dentries;
     68	long	inodes;
     69
     70	sb = container_of(shrink, struct super_block, s_shrink);
     71
     72	/*
     73	 * Deadlock avoidance.  We may hold various FS locks, and we don't want
     74	 * to recurse into the FS that called us in clear_inode() and friends..
     75	 */
     76	if (!(sc->gfp_mask & __GFP_FS))
     77		return SHRINK_STOP;
     78
     79	if (!trylock_super(sb))
     80		return SHRINK_STOP;
     81
     82	if (sb->s_op->nr_cached_objects)
     83		fs_objects = sb->s_op->nr_cached_objects(sb, sc);
     84
     85	inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
     86	dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
     87	total_objects = dentries + inodes + fs_objects + 1;
     88	if (!total_objects)
     89		total_objects = 1;
     90
     91	/* proportion the scan between the caches */
     92	dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
     93	inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
     94	fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
     95
     96	/*
     97	 * prune the dcache first as the icache is pinned by it, then
     98	 * prune the icache, followed by the filesystem specific caches
     99	 *
    100	 * Ensure that we always scan at least one object - memcg kmem
    101	 * accounting uses this to fully empty the caches.
    102	 */
    103	sc->nr_to_scan = dentries + 1;
    104	freed = prune_dcache_sb(sb, sc);
    105	sc->nr_to_scan = inodes + 1;
    106	freed += prune_icache_sb(sb, sc);
    107
    108	if (fs_objects) {
    109		sc->nr_to_scan = fs_objects + 1;
    110		freed += sb->s_op->free_cached_objects(sb, sc);
    111	}
    112
    113	up_read(&sb->s_umount);
    114	return freed;
    115}
    116
    117static unsigned long super_cache_count(struct shrinker *shrink,
    118				       struct shrink_control *sc)
    119{
    120	struct super_block *sb;
    121	long	total_objects = 0;
    122
    123	sb = container_of(shrink, struct super_block, s_shrink);
    124
    125	/*
    126	 * We don't call trylock_super() here as it is a scalability bottleneck,
    127	 * so we're exposed to partial setup state. The shrinker rwsem does not
    128	 * protect filesystem operations backing list_lru_shrink_count() or
    129	 * s_op->nr_cached_objects(). Counts can change between
    130	 * super_cache_count and super_cache_scan, so we really don't need locks
    131	 * here.
    132	 *
    133	 * However, if we are currently mounting the superblock, the underlying
    134	 * filesystem might be in a state of partial construction and hence it
    135	 * is dangerous to access it.  trylock_super() uses a SB_BORN check to
    136	 * avoid this situation, so do the same here. The memory barrier is
    137	 * matched with the one in mount_fs() as we don't hold locks here.
    138	 */
    139	if (!(sb->s_flags & SB_BORN))
    140		return 0;
    141	smp_rmb();
    142
    143	if (sb->s_op && sb->s_op->nr_cached_objects)
    144		total_objects = sb->s_op->nr_cached_objects(sb, sc);
    145
    146	total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
    147	total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
    148
    149	if (!total_objects)
    150		return SHRINK_EMPTY;
    151
    152	total_objects = vfs_pressure_ratio(total_objects);
    153	return total_objects;
    154}
    155
    156static void destroy_super_work(struct work_struct *work)
    157{
    158	struct super_block *s = container_of(work, struct super_block,
    159							destroy_work);
    160	int i;
    161
    162	for (i = 0; i < SB_FREEZE_LEVELS; i++)
    163		percpu_free_rwsem(&s->s_writers.rw_sem[i]);
    164	kfree(s);
    165}
    166
    167static void destroy_super_rcu(struct rcu_head *head)
    168{
    169	struct super_block *s = container_of(head, struct super_block, rcu);
    170	INIT_WORK(&s->destroy_work, destroy_super_work);
    171	schedule_work(&s->destroy_work);
    172}
    173
    174/* Free a superblock that has never been seen by anyone */
    175static void destroy_unused_super(struct super_block *s)
    176{
    177	if (!s)
    178		return;
    179	up_write(&s->s_umount);
    180	list_lru_destroy(&s->s_dentry_lru);
    181	list_lru_destroy(&s->s_inode_lru);
    182	security_sb_free(s);
    183	put_user_ns(s->s_user_ns);
    184	kfree(s->s_subtype);
    185	free_prealloced_shrinker(&s->s_shrink);
    186	/* no delays needed */
    187	destroy_super_work(&s->destroy_work);
    188}
    189
    190/**
    191 *	alloc_super	-	create new superblock
    192 *	@type:	filesystem type superblock should belong to
    193 *	@flags: the mount flags
    194 *	@user_ns: User namespace for the super_block
    195 *
    196 *	Allocates and initializes a new &struct super_block.  alloc_super()
    197 *	returns a pointer new superblock or %NULL if allocation had failed.
    198 */
    199static struct super_block *alloc_super(struct file_system_type *type, int flags,
    200				       struct user_namespace *user_ns)
    201{
    202	struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
    203	static const struct super_operations default_op;
    204	int i;
    205
    206	if (!s)
    207		return NULL;
    208
    209	INIT_LIST_HEAD(&s->s_mounts);
    210	s->s_user_ns = get_user_ns(user_ns);
    211	init_rwsem(&s->s_umount);
    212	lockdep_set_class(&s->s_umount, &type->s_umount_key);
    213	/*
    214	 * sget() can have s_umount recursion.
    215	 *
    216	 * When it cannot find a suitable sb, it allocates a new
    217	 * one (this one), and tries again to find a suitable old
    218	 * one.
    219	 *
    220	 * In case that succeeds, it will acquire the s_umount
    221	 * lock of the old one. Since these are clearly distrinct
    222	 * locks, and this object isn't exposed yet, there's no
    223	 * risk of deadlocks.
    224	 *
    225	 * Annotate this by putting this lock in a different
    226	 * subclass.
    227	 */
    228	down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
    229
    230	if (security_sb_alloc(s))
    231		goto fail;
    232
    233	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
    234		if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
    235					sb_writers_name[i],
    236					&type->s_writers_key[i]))
    237			goto fail;
    238	}
    239	init_waitqueue_head(&s->s_writers.wait_unfrozen);
    240	s->s_bdi = &noop_backing_dev_info;
    241	s->s_flags = flags;
    242	if (s->s_user_ns != &init_user_ns)
    243		s->s_iflags |= SB_I_NODEV;
    244	INIT_HLIST_NODE(&s->s_instances);
    245	INIT_HLIST_BL_HEAD(&s->s_roots);
    246	mutex_init(&s->s_sync_lock);
    247	INIT_LIST_HEAD(&s->s_inodes);
    248	spin_lock_init(&s->s_inode_list_lock);
    249	INIT_LIST_HEAD(&s->s_inodes_wb);
    250	spin_lock_init(&s->s_inode_wblist_lock);
    251
    252	s->s_count = 1;
    253	atomic_set(&s->s_active, 1);
    254	mutex_init(&s->s_vfs_rename_mutex);
    255	lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
    256	init_rwsem(&s->s_dquot.dqio_sem);
    257	s->s_maxbytes = MAX_NON_LFS;
    258	s->s_op = &default_op;
    259	s->s_time_gran = 1000000000;
    260	s->s_time_min = TIME64_MIN;
    261	s->s_time_max = TIME64_MAX;
    262
    263	s->s_shrink.seeks = DEFAULT_SEEKS;
    264	s->s_shrink.scan_objects = super_cache_scan;
    265	s->s_shrink.count_objects = super_cache_count;
    266	s->s_shrink.batch = 1024;
    267	s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
    268	if (prealloc_shrinker(&s->s_shrink))
    269		goto fail;
    270	if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
    271		goto fail;
    272	if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink))
    273		goto fail;
    274	return s;
    275
    276fail:
    277	destroy_unused_super(s);
    278	return NULL;
    279}
    280
    281/* Superblock refcounting  */
    282
    283/*
    284 * Drop a superblock's refcount.  The caller must hold sb_lock.
    285 */
    286static void __put_super(struct super_block *s)
    287{
    288	if (!--s->s_count) {
    289		list_del_init(&s->s_list);
    290		WARN_ON(s->s_dentry_lru.node);
    291		WARN_ON(s->s_inode_lru.node);
    292		WARN_ON(!list_empty(&s->s_mounts));
    293		security_sb_free(s);
    294		fscrypt_sb_free(s);
    295		put_user_ns(s->s_user_ns);
    296		kfree(s->s_subtype);
    297		call_rcu(&s->rcu, destroy_super_rcu);
    298	}
    299}
    300
    301/**
    302 *	put_super	-	drop a temporary reference to superblock
    303 *	@sb: superblock in question
    304 *
    305 *	Drops a temporary reference, frees superblock if there's no
    306 *	references left.
    307 */
    308void put_super(struct super_block *sb)
    309{
    310	spin_lock(&sb_lock);
    311	__put_super(sb);
    312	spin_unlock(&sb_lock);
    313}
    314
    315
    316/**
    317 *	deactivate_locked_super	-	drop an active reference to superblock
    318 *	@s: superblock to deactivate
    319 *
    320 *	Drops an active reference to superblock, converting it into a temporary
    321 *	one if there is no other active references left.  In that case we
    322 *	tell fs driver to shut it down and drop the temporary reference we
    323 *	had just acquired.
    324 *
    325 *	Caller holds exclusive lock on superblock; that lock is released.
    326 */
    327void deactivate_locked_super(struct super_block *s)
    328{
    329	struct file_system_type *fs = s->s_type;
    330	if (atomic_dec_and_test(&s->s_active)) {
    331		unregister_shrinker(&s->s_shrink);
    332		fs->kill_sb(s);
    333
    334		/*
    335		 * Since list_lru_destroy() may sleep, we cannot call it from
    336		 * put_super(), where we hold the sb_lock. Therefore we destroy
    337		 * the lru lists right now.
    338		 */
    339		list_lru_destroy(&s->s_dentry_lru);
    340		list_lru_destroy(&s->s_inode_lru);
    341
    342		put_filesystem(fs);
    343		put_super(s);
    344	} else {
    345		up_write(&s->s_umount);
    346	}
    347}
    348
    349EXPORT_SYMBOL(deactivate_locked_super);
    350
    351/**
    352 *	deactivate_super	-	drop an active reference to superblock
    353 *	@s: superblock to deactivate
    354 *
    355 *	Variant of deactivate_locked_super(), except that superblock is *not*
    356 *	locked by caller.  If we are going to drop the final active reference,
    357 *	lock will be acquired prior to that.
    358 */
    359void deactivate_super(struct super_block *s)
    360{
    361	if (!atomic_add_unless(&s->s_active, -1, 1)) {
    362		down_write(&s->s_umount);
    363		deactivate_locked_super(s);
    364	}
    365}
    366
    367EXPORT_SYMBOL(deactivate_super);
    368
    369/**
    370 *	grab_super - acquire an active reference
    371 *	@s: reference we are trying to make active
    372 *
    373 *	Tries to acquire an active reference.  grab_super() is used when we
    374 * 	had just found a superblock in super_blocks or fs_type->fs_supers
    375 *	and want to turn it into a full-blown active reference.  grab_super()
    376 *	is called with sb_lock held and drops it.  Returns 1 in case of
    377 *	success, 0 if we had failed (superblock contents was already dead or
    378 *	dying when grab_super() had been called).  Note that this is only
    379 *	called for superblocks not in rundown mode (== ones still on ->fs_supers
    380 *	of their type), so increment of ->s_count is OK here.
    381 */
    382static int grab_super(struct super_block *s) __releases(sb_lock)
    383{
    384	s->s_count++;
    385	spin_unlock(&sb_lock);
    386	down_write(&s->s_umount);
    387	if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
    388		put_super(s);
    389		return 1;
    390	}
    391	up_write(&s->s_umount);
    392	put_super(s);
    393	return 0;
    394}
    395
    396/*
    397 *	trylock_super - try to grab ->s_umount shared
    398 *	@sb: reference we are trying to grab
    399 *
    400 *	Try to prevent fs shutdown.  This is used in places where we
    401 *	cannot take an active reference but we need to ensure that the
    402 *	filesystem is not shut down while we are working on it. It returns
    403 *	false if we cannot acquire s_umount or if we lose the race and
    404 *	filesystem already got into shutdown, and returns true with the s_umount
    405 *	lock held in read mode in case of success. On successful return,
    406 *	the caller must drop the s_umount lock when done.
    407 *
    408 *	Note that unlike get_super() et.al. this one does *not* bump ->s_count.
    409 *	The reason why it's safe is that we are OK with doing trylock instead
    410 *	of down_read().  There's a couple of places that are OK with that, but
    411 *	it's very much not a general-purpose interface.
    412 */
    413bool trylock_super(struct super_block *sb)
    414{
    415	if (down_read_trylock(&sb->s_umount)) {
    416		if (!hlist_unhashed(&sb->s_instances) &&
    417		    sb->s_root && (sb->s_flags & SB_BORN))
    418			return true;
    419		up_read(&sb->s_umount);
    420	}
    421
    422	return false;
    423}
    424
    425/**
    426 *	generic_shutdown_super	-	common helper for ->kill_sb()
    427 *	@sb: superblock to kill
    428 *
    429 *	generic_shutdown_super() does all fs-independent work on superblock
    430 *	shutdown.  Typical ->kill_sb() should pick all fs-specific objects
    431 *	that need destruction out of superblock, call generic_shutdown_super()
    432 *	and release aforementioned objects.  Note: dentries and inodes _are_
    433 *	taken care of and do not need specific handling.
    434 *
    435 *	Upon calling this function, the filesystem may no longer alter or
    436 *	rearrange the set of dentries belonging to this super_block, nor may it
    437 *	change the attachments of dentries to inodes.
    438 */
    439void generic_shutdown_super(struct super_block *sb)
    440{
    441	const struct super_operations *sop = sb->s_op;
    442
    443	if (sb->s_root) {
    444		shrink_dcache_for_umount(sb);
    445		sync_filesystem(sb);
    446		sb->s_flags &= ~SB_ACTIVE;
    447
    448		cgroup_writeback_umount();
    449
    450		/* evict all inodes with zero refcount */
    451		evict_inodes(sb);
    452		/* only nonzero refcount inodes can have marks */
    453		fsnotify_sb_delete(sb);
    454		security_sb_delete(sb);
    455
    456		if (sb->s_dio_done_wq) {
    457			destroy_workqueue(sb->s_dio_done_wq);
    458			sb->s_dio_done_wq = NULL;
    459		}
    460
    461		if (sop->put_super)
    462			sop->put_super(sb);
    463
    464		if (!list_empty(&sb->s_inodes)) {
    465			printk("VFS: Busy inodes after unmount of %s. "
    466			   "Self-destruct in 5 seconds.  Have a nice day...\n",
    467			   sb->s_id);
    468		}
    469	}
    470	spin_lock(&sb_lock);
    471	/* should be initialized for __put_super_and_need_restart() */
    472	hlist_del_init(&sb->s_instances);
    473	spin_unlock(&sb_lock);
    474	up_write(&sb->s_umount);
    475	if (sb->s_bdi != &noop_backing_dev_info) {
    476		if (sb->s_iflags & SB_I_PERSB_BDI)
    477			bdi_unregister(sb->s_bdi);
    478		bdi_put(sb->s_bdi);
    479		sb->s_bdi = &noop_backing_dev_info;
    480	}
    481}
    482
    483EXPORT_SYMBOL(generic_shutdown_super);
    484
    485bool mount_capable(struct fs_context *fc)
    486{
    487	if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
    488		return capable(CAP_SYS_ADMIN);
    489	else
    490		return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
    491}
    492
    493/**
    494 * sget_fc - Find or create a superblock
    495 * @fc:	Filesystem context.
    496 * @test: Comparison callback
    497 * @set: Setup callback
    498 *
    499 * Find or create a superblock using the parameters stored in the filesystem
    500 * context and the two callback functions.
    501 *
    502 * If an extant superblock is matched, then that will be returned with an
    503 * elevated reference count that the caller must transfer or discard.
    504 *
    505 * If no match is made, a new superblock will be allocated and basic
    506 * initialisation will be performed (s_type, s_fs_info and s_id will be set and
    507 * the set() callback will be invoked), the superblock will be published and it
    508 * will be returned in a partially constructed state with SB_BORN and SB_ACTIVE
    509 * as yet unset.
    510 */
    511struct super_block *sget_fc(struct fs_context *fc,
    512			    int (*test)(struct super_block *, struct fs_context *),
    513			    int (*set)(struct super_block *, struct fs_context *))
    514{
    515	struct super_block *s = NULL;
    516	struct super_block *old;
    517	struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
    518	int err;
    519
    520retry:
    521	spin_lock(&sb_lock);
    522	if (test) {
    523		hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
    524			if (test(old, fc))
    525				goto share_extant_sb;
    526		}
    527	}
    528	if (!s) {
    529		spin_unlock(&sb_lock);
    530		s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
    531		if (!s)
    532			return ERR_PTR(-ENOMEM);
    533		goto retry;
    534	}
    535
    536	s->s_fs_info = fc->s_fs_info;
    537	err = set(s, fc);
    538	if (err) {
    539		s->s_fs_info = NULL;
    540		spin_unlock(&sb_lock);
    541		destroy_unused_super(s);
    542		return ERR_PTR(err);
    543	}
    544	fc->s_fs_info = NULL;
    545	s->s_type = fc->fs_type;
    546	s->s_iflags |= fc->s_iflags;
    547	strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id));
    548	list_add_tail(&s->s_list, &super_blocks);
    549	hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
    550	spin_unlock(&sb_lock);
    551	get_filesystem(s->s_type);
    552	register_shrinker_prepared(&s->s_shrink);
    553	return s;
    554
    555share_extant_sb:
    556	if (user_ns != old->s_user_ns) {
    557		spin_unlock(&sb_lock);
    558		destroy_unused_super(s);
    559		return ERR_PTR(-EBUSY);
    560	}
    561	if (!grab_super(old))
    562		goto retry;
    563	destroy_unused_super(s);
    564	return old;
    565}
    566EXPORT_SYMBOL(sget_fc);
    567
    568/**
    569 *	sget	-	find or create a superblock
    570 *	@type:	  filesystem type superblock should belong to
    571 *	@test:	  comparison callback
    572 *	@set:	  setup callback
    573 *	@flags:	  mount flags
    574 *	@data:	  argument to each of them
    575 */
    576struct super_block *sget(struct file_system_type *type,
    577			int (*test)(struct super_block *,void *),
    578			int (*set)(struct super_block *,void *),
    579			int flags,
    580			void *data)
    581{
    582	struct user_namespace *user_ns = current_user_ns();
    583	struct super_block *s = NULL;
    584	struct super_block *old;
    585	int err;
    586
    587	/* We don't yet pass the user namespace of the parent
    588	 * mount through to here so always use &init_user_ns
    589	 * until that changes.
    590	 */
    591	if (flags & SB_SUBMOUNT)
    592		user_ns = &init_user_ns;
    593
    594retry:
    595	spin_lock(&sb_lock);
    596	if (test) {
    597		hlist_for_each_entry(old, &type->fs_supers, s_instances) {
    598			if (!test(old, data))
    599				continue;
    600			if (user_ns != old->s_user_ns) {
    601				spin_unlock(&sb_lock);
    602				destroy_unused_super(s);
    603				return ERR_PTR(-EBUSY);
    604			}
    605			if (!grab_super(old))
    606				goto retry;
    607			destroy_unused_super(s);
    608			return old;
    609		}
    610	}
    611	if (!s) {
    612		spin_unlock(&sb_lock);
    613		s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
    614		if (!s)
    615			return ERR_PTR(-ENOMEM);
    616		goto retry;
    617	}
    618
    619	err = set(s, data);
    620	if (err) {
    621		spin_unlock(&sb_lock);
    622		destroy_unused_super(s);
    623		return ERR_PTR(err);
    624	}
    625	s->s_type = type;
    626	strlcpy(s->s_id, type->name, sizeof(s->s_id));
    627	list_add_tail(&s->s_list, &super_blocks);
    628	hlist_add_head(&s->s_instances, &type->fs_supers);
    629	spin_unlock(&sb_lock);
    630	get_filesystem(type);
    631	register_shrinker_prepared(&s->s_shrink);
    632	return s;
    633}
    634EXPORT_SYMBOL(sget);
    635
    636void drop_super(struct super_block *sb)
    637{
    638	up_read(&sb->s_umount);
    639	put_super(sb);
    640}
    641
    642EXPORT_SYMBOL(drop_super);
    643
    644void drop_super_exclusive(struct super_block *sb)
    645{
    646	up_write(&sb->s_umount);
    647	put_super(sb);
    648}
    649EXPORT_SYMBOL(drop_super_exclusive);
    650
    651static void __iterate_supers(void (*f)(struct super_block *))
    652{
    653	struct super_block *sb, *p = NULL;
    654
    655	spin_lock(&sb_lock);
    656	list_for_each_entry(sb, &super_blocks, s_list) {
    657		if (hlist_unhashed(&sb->s_instances))
    658			continue;
    659		sb->s_count++;
    660		spin_unlock(&sb_lock);
    661
    662		f(sb);
    663
    664		spin_lock(&sb_lock);
    665		if (p)
    666			__put_super(p);
    667		p = sb;
    668	}
    669	if (p)
    670		__put_super(p);
    671	spin_unlock(&sb_lock);
    672}
    673/**
    674 *	iterate_supers - call function for all active superblocks
    675 *	@f: function to call
    676 *	@arg: argument to pass to it
    677 *
    678 *	Scans the superblock list and calls given function, passing it
    679 *	locked superblock and given argument.
    680 */
    681void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
    682{
    683	struct super_block *sb, *p = NULL;
    684
    685	spin_lock(&sb_lock);
    686	list_for_each_entry(sb, &super_blocks, s_list) {
    687		if (hlist_unhashed(&sb->s_instances))
    688			continue;
    689		sb->s_count++;
    690		spin_unlock(&sb_lock);
    691
    692		down_read(&sb->s_umount);
    693		if (sb->s_root && (sb->s_flags & SB_BORN))
    694			f(sb, arg);
    695		up_read(&sb->s_umount);
    696
    697		spin_lock(&sb_lock);
    698		if (p)
    699			__put_super(p);
    700		p = sb;
    701	}
    702	if (p)
    703		__put_super(p);
    704	spin_unlock(&sb_lock);
    705}
    706
    707/**
    708 *	iterate_supers_type - call function for superblocks of given type
    709 *	@type: fs type
    710 *	@f: function to call
    711 *	@arg: argument to pass to it
    712 *
    713 *	Scans the superblock list and calls given function, passing it
    714 *	locked superblock and given argument.
    715 */
    716void iterate_supers_type(struct file_system_type *type,
    717	void (*f)(struct super_block *, void *), void *arg)
    718{
    719	struct super_block *sb, *p = NULL;
    720
    721	spin_lock(&sb_lock);
    722	hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
    723		sb->s_count++;
    724		spin_unlock(&sb_lock);
    725
    726		down_read(&sb->s_umount);
    727		if (sb->s_root && (sb->s_flags & SB_BORN))
    728			f(sb, arg);
    729		up_read(&sb->s_umount);
    730
    731		spin_lock(&sb_lock);
    732		if (p)
    733			__put_super(p);
    734		p = sb;
    735	}
    736	if (p)
    737		__put_super(p);
    738	spin_unlock(&sb_lock);
    739}
    740
    741EXPORT_SYMBOL(iterate_supers_type);
    742
    743/**
    744 * get_super - get the superblock of a device
    745 * @bdev: device to get the superblock for
    746 *
    747 * Scans the superblock list and finds the superblock of the file system
    748 * mounted on the device given. %NULL is returned if no match is found.
    749 */
    750struct super_block *get_super(struct block_device *bdev)
    751{
    752	struct super_block *sb;
    753
    754	if (!bdev)
    755		return NULL;
    756
    757	spin_lock(&sb_lock);
    758rescan:
    759	list_for_each_entry(sb, &super_blocks, s_list) {
    760		if (hlist_unhashed(&sb->s_instances))
    761			continue;
    762		if (sb->s_bdev == bdev) {
    763			sb->s_count++;
    764			spin_unlock(&sb_lock);
    765			down_read(&sb->s_umount);
    766			/* still alive? */
    767			if (sb->s_root && (sb->s_flags & SB_BORN))
    768				return sb;
    769			up_read(&sb->s_umount);
    770			/* nope, got unmounted */
    771			spin_lock(&sb_lock);
    772			__put_super(sb);
    773			goto rescan;
    774		}
    775	}
    776	spin_unlock(&sb_lock);
    777	return NULL;
    778}
    779
    780/**
    781 * get_active_super - get an active reference to the superblock of a device
    782 * @bdev: device to get the superblock for
    783 *
    784 * Scans the superblock list and finds the superblock of the file system
    785 * mounted on the device given.  Returns the superblock with an active
    786 * reference or %NULL if none was found.
    787 */
    788struct super_block *get_active_super(struct block_device *bdev)
    789{
    790	struct super_block *sb;
    791
    792	if (!bdev)
    793		return NULL;
    794
    795restart:
    796	spin_lock(&sb_lock);
    797	list_for_each_entry(sb, &super_blocks, s_list) {
    798		if (hlist_unhashed(&sb->s_instances))
    799			continue;
    800		if (sb->s_bdev == bdev) {
    801			if (!grab_super(sb))
    802				goto restart;
    803			up_write(&sb->s_umount);
    804			return sb;
    805		}
    806	}
    807	spin_unlock(&sb_lock);
    808	return NULL;
    809}
    810
    811struct super_block *user_get_super(dev_t dev, bool excl)
    812{
    813	struct super_block *sb;
    814
    815	spin_lock(&sb_lock);
    816rescan:
    817	list_for_each_entry(sb, &super_blocks, s_list) {
    818		if (hlist_unhashed(&sb->s_instances))
    819			continue;
    820		if (sb->s_dev ==  dev) {
    821			sb->s_count++;
    822			spin_unlock(&sb_lock);
    823			if (excl)
    824				down_write(&sb->s_umount);
    825			else
    826				down_read(&sb->s_umount);
    827			/* still alive? */
    828			if (sb->s_root && (sb->s_flags & SB_BORN))
    829				return sb;
    830			if (excl)
    831				up_write(&sb->s_umount);
    832			else
    833				up_read(&sb->s_umount);
    834			/* nope, got unmounted */
    835			spin_lock(&sb_lock);
    836			__put_super(sb);
    837			goto rescan;
    838		}
    839	}
    840	spin_unlock(&sb_lock);
    841	return NULL;
    842}
    843
    844/**
    845 * reconfigure_super - asks filesystem to change superblock parameters
    846 * @fc: The superblock and configuration
    847 *
    848 * Alters the configuration parameters of a live superblock.
    849 */
    850int reconfigure_super(struct fs_context *fc)
    851{
    852	struct super_block *sb = fc->root->d_sb;
    853	int retval;
    854	bool remount_ro = false;
    855	bool force = fc->sb_flags & SB_FORCE;
    856
    857	if (fc->sb_flags_mask & ~MS_RMT_MASK)
    858		return -EINVAL;
    859	if (sb->s_writers.frozen != SB_UNFROZEN)
    860		return -EBUSY;
    861
    862	retval = security_sb_remount(sb, fc->security);
    863	if (retval)
    864		return retval;
    865
    866	if (fc->sb_flags_mask & SB_RDONLY) {
    867#ifdef CONFIG_BLOCK
    868		if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
    869		    bdev_read_only(sb->s_bdev))
    870			return -EACCES;
    871#endif
    872
    873		remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
    874	}
    875
    876	if (remount_ro) {
    877		if (!hlist_empty(&sb->s_pins)) {
    878			up_write(&sb->s_umount);
    879			group_pin_kill(&sb->s_pins);
    880			down_write(&sb->s_umount);
    881			if (!sb->s_root)
    882				return 0;
    883			if (sb->s_writers.frozen != SB_UNFROZEN)
    884				return -EBUSY;
    885			remount_ro = !sb_rdonly(sb);
    886		}
    887	}
    888	shrink_dcache_sb(sb);
    889
    890	/* If we are reconfiguring to RDONLY and current sb is read/write,
    891	 * make sure there are no files open for writing.
    892	 */
    893	if (remount_ro) {
    894		if (force) {
    895			sb->s_readonly_remount = 1;
    896			smp_wmb();
    897		} else {
    898			retval = sb_prepare_remount_readonly(sb);
    899			if (retval)
    900				return retval;
    901		}
    902	}
    903
    904	if (fc->ops->reconfigure) {
    905		retval = fc->ops->reconfigure(fc);
    906		if (retval) {
    907			if (!force)
    908				goto cancel_readonly;
    909			/* If forced remount, go ahead despite any errors */
    910			WARN(1, "forced remount of a %s fs returned %i\n",
    911			     sb->s_type->name, retval);
    912		}
    913	}
    914
    915	WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
    916				 (fc->sb_flags & fc->sb_flags_mask)));
    917	/* Needs to be ordered wrt mnt_is_readonly() */
    918	smp_wmb();
    919	sb->s_readonly_remount = 0;
    920
    921	/*
    922	 * Some filesystems modify their metadata via some other path than the
    923	 * bdev buffer cache (eg. use a private mapping, or directories in
    924	 * pagecache, etc). Also file data modifications go via their own
    925	 * mappings. So If we try to mount readonly then copy the filesystem
    926	 * from bdev, we could get stale data, so invalidate it to give a best
    927	 * effort at coherency.
    928	 */
    929	if (remount_ro && sb->s_bdev)
    930		invalidate_bdev(sb->s_bdev);
    931	return 0;
    932
    933cancel_readonly:
    934	sb->s_readonly_remount = 0;
    935	return retval;
    936}
    937
    938static void do_emergency_remount_callback(struct super_block *sb)
    939{
    940	down_write(&sb->s_umount);
    941	if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
    942	    !sb_rdonly(sb)) {
    943		struct fs_context *fc;
    944
    945		fc = fs_context_for_reconfigure(sb->s_root,
    946					SB_RDONLY | SB_FORCE, SB_RDONLY);
    947		if (!IS_ERR(fc)) {
    948			if (parse_monolithic_mount_data(fc, NULL) == 0)
    949				(void)reconfigure_super(fc);
    950			put_fs_context(fc);
    951		}
    952	}
    953	up_write(&sb->s_umount);
    954}
    955
    956static void do_emergency_remount(struct work_struct *work)
    957{
    958	__iterate_supers(do_emergency_remount_callback);
    959	kfree(work);
    960	printk("Emergency Remount complete\n");
    961}
    962
    963void emergency_remount(void)
    964{
    965	struct work_struct *work;
    966
    967	work = kmalloc(sizeof(*work), GFP_ATOMIC);
    968	if (work) {
    969		INIT_WORK(work, do_emergency_remount);
    970		schedule_work(work);
    971	}
    972}
    973
    974static void do_thaw_all_callback(struct super_block *sb)
    975{
    976	down_write(&sb->s_umount);
    977	if (sb->s_root && sb->s_flags & SB_BORN) {
    978		emergency_thaw_bdev(sb);
    979		thaw_super_locked(sb);
    980	} else {
    981		up_write(&sb->s_umount);
    982	}
    983}
    984
    985static void do_thaw_all(struct work_struct *work)
    986{
    987	__iterate_supers(do_thaw_all_callback);
    988	kfree(work);
    989	printk(KERN_WARNING "Emergency Thaw complete\n");
    990}
    991
    992/**
    993 * emergency_thaw_all -- forcibly thaw every frozen filesystem
    994 *
    995 * Used for emergency unfreeze of all filesystems via SysRq
    996 */
    997void emergency_thaw_all(void)
    998{
    999	struct work_struct *work;
   1000
   1001	work = kmalloc(sizeof(*work), GFP_ATOMIC);
   1002	if (work) {
   1003		INIT_WORK(work, do_thaw_all);
   1004		schedule_work(work);
   1005	}
   1006}
   1007
   1008static DEFINE_IDA(unnamed_dev_ida);
   1009
   1010/**
   1011 * get_anon_bdev - Allocate a block device for filesystems which don't have one.
   1012 * @p: Pointer to a dev_t.
   1013 *
   1014 * Filesystems which don't use real block devices can call this function
   1015 * to allocate a virtual block device.
   1016 *
   1017 * Context: Any context.  Frequently called while holding sb_lock.
   1018 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
   1019 * or -ENOMEM if memory allocation failed.
   1020 */
   1021int get_anon_bdev(dev_t *p)
   1022{
   1023	int dev;
   1024
   1025	/*
   1026	 * Many userspace utilities consider an FSID of 0 invalid.
   1027	 * Always return at least 1 from get_anon_bdev.
   1028	 */
   1029	dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
   1030			GFP_ATOMIC);
   1031	if (dev == -ENOSPC)
   1032		dev = -EMFILE;
   1033	if (dev < 0)
   1034		return dev;
   1035
   1036	*p = MKDEV(0, dev);
   1037	return 0;
   1038}
   1039EXPORT_SYMBOL(get_anon_bdev);
   1040
   1041void free_anon_bdev(dev_t dev)
   1042{
   1043	ida_free(&unnamed_dev_ida, MINOR(dev));
   1044}
   1045EXPORT_SYMBOL(free_anon_bdev);
   1046
   1047int set_anon_super(struct super_block *s, void *data)
   1048{
   1049	return get_anon_bdev(&s->s_dev);
   1050}
   1051EXPORT_SYMBOL(set_anon_super);
   1052
   1053void kill_anon_super(struct super_block *sb)
   1054{
   1055	dev_t dev = sb->s_dev;
   1056	generic_shutdown_super(sb);
   1057	free_anon_bdev(dev);
   1058}
   1059EXPORT_SYMBOL(kill_anon_super);
   1060
   1061void kill_litter_super(struct super_block *sb)
   1062{
   1063	if (sb->s_root)
   1064		d_genocide(sb->s_root);
   1065	kill_anon_super(sb);
   1066}
   1067EXPORT_SYMBOL(kill_litter_super);
   1068
   1069int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
   1070{
   1071	return set_anon_super(sb, NULL);
   1072}
   1073EXPORT_SYMBOL(set_anon_super_fc);
   1074
   1075static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
   1076{
   1077	return sb->s_fs_info == fc->s_fs_info;
   1078}
   1079
   1080static int test_single_super(struct super_block *s, struct fs_context *fc)
   1081{
   1082	return 1;
   1083}
   1084
   1085/**
   1086 * vfs_get_super - Get a superblock with a search key set in s_fs_info.
   1087 * @fc: The filesystem context holding the parameters
   1088 * @keying: How to distinguish superblocks
   1089 * @fill_super: Helper to initialise a new superblock
   1090 *
   1091 * Search for a superblock and create a new one if not found.  The search
   1092 * criterion is controlled by @keying.  If the search fails, a new superblock
   1093 * is created and @fill_super() is called to initialise it.
   1094 *
   1095 * @keying can take one of a number of values:
   1096 *
   1097 * (1) vfs_get_single_super - Only one superblock of this type may exist on the
   1098 *     system.  This is typically used for special system filesystems.
   1099 *
   1100 * (2) vfs_get_keyed_super - Multiple superblocks may exist, but they must have
   1101 *     distinct keys (where the key is in s_fs_info).  Searching for the same
   1102 *     key again will turn up the superblock for that key.
   1103 *
   1104 * (3) vfs_get_independent_super - Multiple superblocks may exist and are
   1105 *     unkeyed.  Each call will get a new superblock.
   1106 *
   1107 * A permissions check is made by sget_fc() unless we're getting a superblock
   1108 * for a kernel-internal mount or a submount.
   1109 */
   1110int vfs_get_super(struct fs_context *fc,
   1111		  enum vfs_get_super_keying keying,
   1112		  int (*fill_super)(struct super_block *sb,
   1113				    struct fs_context *fc))
   1114{
   1115	int (*test)(struct super_block *, struct fs_context *);
   1116	struct super_block *sb;
   1117	int err;
   1118
   1119	switch (keying) {
   1120	case vfs_get_single_super:
   1121	case vfs_get_single_reconf_super:
   1122		test = test_single_super;
   1123		break;
   1124	case vfs_get_keyed_super:
   1125		test = test_keyed_super;
   1126		break;
   1127	case vfs_get_independent_super:
   1128		test = NULL;
   1129		break;
   1130	default:
   1131		BUG();
   1132	}
   1133
   1134	sb = sget_fc(fc, test, set_anon_super_fc);
   1135	if (IS_ERR(sb))
   1136		return PTR_ERR(sb);
   1137
   1138	if (!sb->s_root) {
   1139		err = fill_super(sb, fc);
   1140		if (err)
   1141			goto error;
   1142
   1143		sb->s_flags |= SB_ACTIVE;
   1144		fc->root = dget(sb->s_root);
   1145	} else {
   1146		fc->root = dget(sb->s_root);
   1147		if (keying == vfs_get_single_reconf_super) {
   1148			err = reconfigure_super(fc);
   1149			if (err < 0) {
   1150				dput(fc->root);
   1151				fc->root = NULL;
   1152				goto error;
   1153			}
   1154		}
   1155	}
   1156
   1157	return 0;
   1158
   1159error:
   1160	deactivate_locked_super(sb);
   1161	return err;
   1162}
   1163EXPORT_SYMBOL(vfs_get_super);
   1164
   1165int get_tree_nodev(struct fs_context *fc,
   1166		  int (*fill_super)(struct super_block *sb,
   1167				    struct fs_context *fc))
   1168{
   1169	return vfs_get_super(fc, vfs_get_independent_super, fill_super);
   1170}
   1171EXPORT_SYMBOL(get_tree_nodev);
   1172
   1173int get_tree_single(struct fs_context *fc,
   1174		  int (*fill_super)(struct super_block *sb,
   1175				    struct fs_context *fc))
   1176{
   1177	return vfs_get_super(fc, vfs_get_single_super, fill_super);
   1178}
   1179EXPORT_SYMBOL(get_tree_single);
   1180
   1181int get_tree_single_reconf(struct fs_context *fc,
   1182		  int (*fill_super)(struct super_block *sb,
   1183				    struct fs_context *fc))
   1184{
   1185	return vfs_get_super(fc, vfs_get_single_reconf_super, fill_super);
   1186}
   1187EXPORT_SYMBOL(get_tree_single_reconf);
   1188
   1189int get_tree_keyed(struct fs_context *fc,
   1190		  int (*fill_super)(struct super_block *sb,
   1191				    struct fs_context *fc),
   1192		void *key)
   1193{
   1194	fc->s_fs_info = key;
   1195	return vfs_get_super(fc, vfs_get_keyed_super, fill_super);
   1196}
   1197EXPORT_SYMBOL(get_tree_keyed);
   1198
   1199#ifdef CONFIG_BLOCK
   1200
   1201static int set_bdev_super(struct super_block *s, void *data)
   1202{
   1203	s->s_bdev = data;
   1204	s->s_dev = s->s_bdev->bd_dev;
   1205	s->s_bdi = bdi_get(s->s_bdev->bd_disk->bdi);
   1206
   1207	if (bdev_stable_writes(s->s_bdev))
   1208		s->s_iflags |= SB_I_STABLE_WRITES;
   1209	return 0;
   1210}
   1211
   1212static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
   1213{
   1214	return set_bdev_super(s, fc->sget_key);
   1215}
   1216
   1217static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
   1218{
   1219	return s->s_bdev == fc->sget_key;
   1220}
   1221
   1222/**
   1223 * get_tree_bdev - Get a superblock based on a single block device
   1224 * @fc: The filesystem context holding the parameters
   1225 * @fill_super: Helper to initialise a new superblock
   1226 */
   1227int get_tree_bdev(struct fs_context *fc,
   1228		int (*fill_super)(struct super_block *,
   1229				  struct fs_context *))
   1230{
   1231	struct block_device *bdev;
   1232	struct super_block *s;
   1233	fmode_t mode = FMODE_READ | FMODE_EXCL;
   1234	int error = 0;
   1235
   1236	if (!(fc->sb_flags & SB_RDONLY))
   1237		mode |= FMODE_WRITE;
   1238
   1239	if (!fc->source)
   1240		return invalf(fc, "No source specified");
   1241
   1242	bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type);
   1243	if (IS_ERR(bdev)) {
   1244		errorf(fc, "%s: Can't open blockdev", fc->source);
   1245		return PTR_ERR(bdev);
   1246	}
   1247
   1248	/* Once the superblock is inserted into the list by sget_fc(), s_umount
   1249	 * will protect the lockfs code from trying to start a snapshot while
   1250	 * we are mounting
   1251	 */
   1252	mutex_lock(&bdev->bd_fsfreeze_mutex);
   1253	if (bdev->bd_fsfreeze_count > 0) {
   1254		mutex_unlock(&bdev->bd_fsfreeze_mutex);
   1255		warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
   1256		blkdev_put(bdev, mode);
   1257		return -EBUSY;
   1258	}
   1259
   1260	fc->sb_flags |= SB_NOSEC;
   1261	fc->sget_key = bdev;
   1262	s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
   1263	mutex_unlock(&bdev->bd_fsfreeze_mutex);
   1264	if (IS_ERR(s)) {
   1265		blkdev_put(bdev, mode);
   1266		return PTR_ERR(s);
   1267	}
   1268
   1269	if (s->s_root) {
   1270		/* Don't summarily change the RO/RW state. */
   1271		if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
   1272			warnf(fc, "%pg: Can't mount, would change RO state", bdev);
   1273			deactivate_locked_super(s);
   1274			blkdev_put(bdev, mode);
   1275			return -EBUSY;
   1276		}
   1277
   1278		/*
   1279		 * s_umount nests inside open_mutex during
   1280		 * __invalidate_device().  blkdev_put() acquires
   1281		 * open_mutex and can't be called under s_umount.  Drop
   1282		 * s_umount temporarily.  This is safe as we're
   1283		 * holding an active reference.
   1284		 */
   1285		up_write(&s->s_umount);
   1286		blkdev_put(bdev, mode);
   1287		down_write(&s->s_umount);
   1288	} else {
   1289		s->s_mode = mode;
   1290		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
   1291		sb_set_blocksize(s, block_size(bdev));
   1292		error = fill_super(s, fc);
   1293		if (error) {
   1294			deactivate_locked_super(s);
   1295			return error;
   1296		}
   1297
   1298		s->s_flags |= SB_ACTIVE;
   1299		bdev->bd_super = s;
   1300	}
   1301
   1302	BUG_ON(fc->root);
   1303	fc->root = dget(s->s_root);
   1304	return 0;
   1305}
   1306EXPORT_SYMBOL(get_tree_bdev);
   1307
   1308static int test_bdev_super(struct super_block *s, void *data)
   1309{
   1310	return (void *)s->s_bdev == data;
   1311}
   1312
   1313struct dentry *mount_bdev(struct file_system_type *fs_type,
   1314	int flags, const char *dev_name, void *data,
   1315	int (*fill_super)(struct super_block *, void *, int))
   1316{
   1317	struct block_device *bdev;
   1318	struct super_block *s;
   1319	fmode_t mode = FMODE_READ | FMODE_EXCL;
   1320	int error = 0;
   1321
   1322	if (!(flags & SB_RDONLY))
   1323		mode |= FMODE_WRITE;
   1324
   1325	bdev = blkdev_get_by_path(dev_name, mode, fs_type);
   1326	if (IS_ERR(bdev))
   1327		return ERR_CAST(bdev);
   1328
   1329	/*
   1330	 * once the super is inserted into the list by sget, s_umount
   1331	 * will protect the lockfs code from trying to start a snapshot
   1332	 * while we are mounting
   1333	 */
   1334	mutex_lock(&bdev->bd_fsfreeze_mutex);
   1335	if (bdev->bd_fsfreeze_count > 0) {
   1336		mutex_unlock(&bdev->bd_fsfreeze_mutex);
   1337		error = -EBUSY;
   1338		goto error_bdev;
   1339	}
   1340	s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC,
   1341		 bdev);
   1342	mutex_unlock(&bdev->bd_fsfreeze_mutex);
   1343	if (IS_ERR(s))
   1344		goto error_s;
   1345
   1346	if (s->s_root) {
   1347		if ((flags ^ s->s_flags) & SB_RDONLY) {
   1348			deactivate_locked_super(s);
   1349			error = -EBUSY;
   1350			goto error_bdev;
   1351		}
   1352
   1353		/*
   1354		 * s_umount nests inside open_mutex during
   1355		 * __invalidate_device().  blkdev_put() acquires
   1356		 * open_mutex and can't be called under s_umount.  Drop
   1357		 * s_umount temporarily.  This is safe as we're
   1358		 * holding an active reference.
   1359		 */
   1360		up_write(&s->s_umount);
   1361		blkdev_put(bdev, mode);
   1362		down_write(&s->s_umount);
   1363	} else {
   1364		s->s_mode = mode;
   1365		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
   1366		sb_set_blocksize(s, block_size(bdev));
   1367		error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
   1368		if (error) {
   1369			deactivate_locked_super(s);
   1370			goto error;
   1371		}
   1372
   1373		s->s_flags |= SB_ACTIVE;
   1374		bdev->bd_super = s;
   1375	}
   1376
   1377	return dget(s->s_root);
   1378
   1379error_s:
   1380	error = PTR_ERR(s);
   1381error_bdev:
   1382	blkdev_put(bdev, mode);
   1383error:
   1384	return ERR_PTR(error);
   1385}
   1386EXPORT_SYMBOL(mount_bdev);
   1387
   1388void kill_block_super(struct super_block *sb)
   1389{
   1390	struct block_device *bdev = sb->s_bdev;
   1391	fmode_t mode = sb->s_mode;
   1392
   1393	bdev->bd_super = NULL;
   1394	generic_shutdown_super(sb);
   1395	sync_blockdev(bdev);
   1396	WARN_ON_ONCE(!(mode & FMODE_EXCL));
   1397	blkdev_put(bdev, mode | FMODE_EXCL);
   1398}
   1399
   1400EXPORT_SYMBOL(kill_block_super);
   1401#endif
   1402
   1403struct dentry *mount_nodev(struct file_system_type *fs_type,
   1404	int flags, void *data,
   1405	int (*fill_super)(struct super_block *, void *, int))
   1406{
   1407	int error;
   1408	struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
   1409
   1410	if (IS_ERR(s))
   1411		return ERR_CAST(s);
   1412
   1413	error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
   1414	if (error) {
   1415		deactivate_locked_super(s);
   1416		return ERR_PTR(error);
   1417	}
   1418	s->s_flags |= SB_ACTIVE;
   1419	return dget(s->s_root);
   1420}
   1421EXPORT_SYMBOL(mount_nodev);
   1422
   1423int reconfigure_single(struct super_block *s,
   1424		       int flags, void *data)
   1425{
   1426	struct fs_context *fc;
   1427	int ret;
   1428
   1429	/* The caller really need to be passing fc down into mount_single(),
   1430	 * then a chunk of this can be removed.  [Bollocks -- AV]
   1431	 * Better yet, reconfiguration shouldn't happen, but rather the second
   1432	 * mount should be rejected if the parameters are not compatible.
   1433	 */
   1434	fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
   1435	if (IS_ERR(fc))
   1436		return PTR_ERR(fc);
   1437
   1438	ret = parse_monolithic_mount_data(fc, data);
   1439	if (ret < 0)
   1440		goto out;
   1441
   1442	ret = reconfigure_super(fc);
   1443out:
   1444	put_fs_context(fc);
   1445	return ret;
   1446}
   1447
   1448static int compare_single(struct super_block *s, void *p)
   1449{
   1450	return 1;
   1451}
   1452
   1453struct dentry *mount_single(struct file_system_type *fs_type,
   1454	int flags, void *data,
   1455	int (*fill_super)(struct super_block *, void *, int))
   1456{
   1457	struct super_block *s;
   1458	int error;
   1459
   1460	s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
   1461	if (IS_ERR(s))
   1462		return ERR_CAST(s);
   1463	if (!s->s_root) {
   1464		error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
   1465		if (!error)
   1466			s->s_flags |= SB_ACTIVE;
   1467	} else {
   1468		error = reconfigure_single(s, flags, data);
   1469	}
   1470	if (unlikely(error)) {
   1471		deactivate_locked_super(s);
   1472		return ERR_PTR(error);
   1473	}
   1474	return dget(s->s_root);
   1475}
   1476EXPORT_SYMBOL(mount_single);
   1477
   1478/**
   1479 * vfs_get_tree - Get the mountable root
   1480 * @fc: The superblock configuration context.
   1481 *
   1482 * The filesystem is invoked to get or create a superblock which can then later
   1483 * be used for mounting.  The filesystem places a pointer to the root to be
   1484 * used for mounting in @fc->root.
   1485 */
   1486int vfs_get_tree(struct fs_context *fc)
   1487{
   1488	struct super_block *sb;
   1489	int error;
   1490
   1491	if (fc->root)
   1492		return -EBUSY;
   1493
   1494	/* Get the mountable root in fc->root, with a ref on the root and a ref
   1495	 * on the superblock.
   1496	 */
   1497	error = fc->ops->get_tree(fc);
   1498	if (error < 0)
   1499		return error;
   1500
   1501	if (!fc->root) {
   1502		pr_err("Filesystem %s get_tree() didn't set fc->root\n",
   1503		       fc->fs_type->name);
   1504		/* We don't know what the locking state of the superblock is -
   1505		 * if there is a superblock.
   1506		 */
   1507		BUG();
   1508	}
   1509
   1510	sb = fc->root->d_sb;
   1511	WARN_ON(!sb->s_bdi);
   1512
   1513	/*
   1514	 * Write barrier is for super_cache_count(). We place it before setting
   1515	 * SB_BORN as the data dependency between the two functions is the
   1516	 * superblock structure contents that we just set up, not the SB_BORN
   1517	 * flag.
   1518	 */
   1519	smp_wmb();
   1520	sb->s_flags |= SB_BORN;
   1521
   1522	error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
   1523	if (unlikely(error)) {
   1524		fc_drop_locked(fc);
   1525		return error;
   1526	}
   1527
   1528	/*
   1529	 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
   1530	 * but s_maxbytes was an unsigned long long for many releases. Throw
   1531	 * this warning for a little while to try and catch filesystems that
   1532	 * violate this rule.
   1533	 */
   1534	WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
   1535		"negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
   1536
   1537	return 0;
   1538}
   1539EXPORT_SYMBOL(vfs_get_tree);
   1540
   1541/*
   1542 * Setup private BDI for given superblock. It gets automatically cleaned up
   1543 * in generic_shutdown_super().
   1544 */
   1545int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
   1546{
   1547	struct backing_dev_info *bdi;
   1548	int err;
   1549	va_list args;
   1550
   1551	bdi = bdi_alloc(NUMA_NO_NODE);
   1552	if (!bdi)
   1553		return -ENOMEM;
   1554
   1555	va_start(args, fmt);
   1556	err = bdi_register_va(bdi, fmt, args);
   1557	va_end(args);
   1558	if (err) {
   1559		bdi_put(bdi);
   1560		return err;
   1561	}
   1562	WARN_ON(sb->s_bdi != &noop_backing_dev_info);
   1563	sb->s_bdi = bdi;
   1564	sb->s_iflags |= SB_I_PERSB_BDI;
   1565
   1566	return 0;
   1567}
   1568EXPORT_SYMBOL(super_setup_bdi_name);
   1569
   1570/*
   1571 * Setup private BDI for given superblock. I gets automatically cleaned up
   1572 * in generic_shutdown_super().
   1573 */
   1574int super_setup_bdi(struct super_block *sb)
   1575{
   1576	static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
   1577
   1578	return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
   1579				    atomic_long_inc_return(&bdi_seq));
   1580}
   1581EXPORT_SYMBOL(super_setup_bdi);
   1582
   1583/**
   1584 * sb_wait_write - wait until all writers to given file system finish
   1585 * @sb: the super for which we wait
   1586 * @level: type of writers we wait for (normal vs page fault)
   1587 *
   1588 * This function waits until there are no writers of given type to given file
   1589 * system.
   1590 */
   1591static void sb_wait_write(struct super_block *sb, int level)
   1592{
   1593	percpu_down_write(sb->s_writers.rw_sem + level-1);
   1594}
   1595
   1596/*
   1597 * We are going to return to userspace and forget about these locks, the
   1598 * ownership goes to the caller of thaw_super() which does unlock().
   1599 */
   1600static void lockdep_sb_freeze_release(struct super_block *sb)
   1601{
   1602	int level;
   1603
   1604	for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
   1605		percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
   1606}
   1607
   1608/*
   1609 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
   1610 */
   1611static void lockdep_sb_freeze_acquire(struct super_block *sb)
   1612{
   1613	int level;
   1614
   1615	for (level = 0; level < SB_FREEZE_LEVELS; ++level)
   1616		percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
   1617}
   1618
   1619static void sb_freeze_unlock(struct super_block *sb, int level)
   1620{
   1621	for (level--; level >= 0; level--)
   1622		percpu_up_write(sb->s_writers.rw_sem + level);
   1623}
   1624
   1625/**
   1626 * freeze_super - lock the filesystem and force it into a consistent state
   1627 * @sb: the super to lock
   1628 *
   1629 * Syncs the super to make sure the filesystem is consistent and calls the fs's
   1630 * freeze_fs.  Subsequent calls to this without first thawing the fs will return
   1631 * -EBUSY.
   1632 *
   1633 * During this function, sb->s_writers.frozen goes through these values:
   1634 *
   1635 * SB_UNFROZEN: File system is normal, all writes progress as usual.
   1636 *
   1637 * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
   1638 * writes should be blocked, though page faults are still allowed. We wait for
   1639 * all writes to complete and then proceed to the next stage.
   1640 *
   1641 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
   1642 * but internal fs threads can still modify the filesystem (although they
   1643 * should not dirty new pages or inodes), writeback can run etc. After waiting
   1644 * for all running page faults we sync the filesystem which will clean all
   1645 * dirty pages and inodes (no new dirty pages or inodes can be created when
   1646 * sync is running).
   1647 *
   1648 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
   1649 * modification are blocked (e.g. XFS preallocation truncation on inode
   1650 * reclaim). This is usually implemented by blocking new transactions for
   1651 * filesystems that have them and need this additional guard. After all
   1652 * internal writers are finished we call ->freeze_fs() to finish filesystem
   1653 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
   1654 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
   1655 *
   1656 * sb->s_writers.frozen is protected by sb->s_umount.
   1657 */
   1658int freeze_super(struct super_block *sb)
   1659{
   1660	int ret;
   1661
   1662	atomic_inc(&sb->s_active);
   1663	down_write(&sb->s_umount);
   1664	if (sb->s_writers.frozen != SB_UNFROZEN) {
   1665		deactivate_locked_super(sb);
   1666		return -EBUSY;
   1667	}
   1668
   1669	if (!(sb->s_flags & SB_BORN)) {
   1670		up_write(&sb->s_umount);
   1671		return 0;	/* sic - it's "nothing to do" */
   1672	}
   1673
   1674	if (sb_rdonly(sb)) {
   1675		/* Nothing to do really... */
   1676		sb->s_writers.frozen = SB_FREEZE_COMPLETE;
   1677		up_write(&sb->s_umount);
   1678		return 0;
   1679	}
   1680
   1681	sb->s_writers.frozen = SB_FREEZE_WRITE;
   1682	/* Release s_umount to preserve sb_start_write -> s_umount ordering */
   1683	up_write(&sb->s_umount);
   1684	sb_wait_write(sb, SB_FREEZE_WRITE);
   1685	down_write(&sb->s_umount);
   1686
   1687	/* Now we go and block page faults... */
   1688	sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
   1689	sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
   1690
   1691	/* All writers are done so after syncing there won't be dirty data */
   1692	ret = sync_filesystem(sb);
   1693	if (ret) {
   1694		sb->s_writers.frozen = SB_UNFROZEN;
   1695		sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
   1696		wake_up(&sb->s_writers.wait_unfrozen);
   1697		deactivate_locked_super(sb);
   1698		return ret;
   1699	}
   1700
   1701	/* Now wait for internal filesystem counter */
   1702	sb->s_writers.frozen = SB_FREEZE_FS;
   1703	sb_wait_write(sb, SB_FREEZE_FS);
   1704
   1705	if (sb->s_op->freeze_fs) {
   1706		ret = sb->s_op->freeze_fs(sb);
   1707		if (ret) {
   1708			printk(KERN_ERR
   1709				"VFS:Filesystem freeze failed\n");
   1710			sb->s_writers.frozen = SB_UNFROZEN;
   1711			sb_freeze_unlock(sb, SB_FREEZE_FS);
   1712			wake_up(&sb->s_writers.wait_unfrozen);
   1713			deactivate_locked_super(sb);
   1714			return ret;
   1715		}
   1716	}
   1717	/*
   1718	 * For debugging purposes so that fs can warn if it sees write activity
   1719	 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
   1720	 */
   1721	sb->s_writers.frozen = SB_FREEZE_COMPLETE;
   1722	lockdep_sb_freeze_release(sb);
   1723	up_write(&sb->s_umount);
   1724	return 0;
   1725}
   1726EXPORT_SYMBOL(freeze_super);
   1727
   1728static int thaw_super_locked(struct super_block *sb)
   1729{
   1730	int error;
   1731
   1732	if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
   1733		up_write(&sb->s_umount);
   1734		return -EINVAL;
   1735	}
   1736
   1737	if (sb_rdonly(sb)) {
   1738		sb->s_writers.frozen = SB_UNFROZEN;
   1739		goto out;
   1740	}
   1741
   1742	lockdep_sb_freeze_acquire(sb);
   1743
   1744	if (sb->s_op->unfreeze_fs) {
   1745		error = sb->s_op->unfreeze_fs(sb);
   1746		if (error) {
   1747			printk(KERN_ERR
   1748				"VFS:Filesystem thaw failed\n");
   1749			lockdep_sb_freeze_release(sb);
   1750			up_write(&sb->s_umount);
   1751			return error;
   1752		}
   1753	}
   1754
   1755	sb->s_writers.frozen = SB_UNFROZEN;
   1756	sb_freeze_unlock(sb, SB_FREEZE_FS);
   1757out:
   1758	wake_up(&sb->s_writers.wait_unfrozen);
   1759	deactivate_locked_super(sb);
   1760	return 0;
   1761}
   1762
   1763/**
   1764 * thaw_super -- unlock filesystem
   1765 * @sb: the super to thaw
   1766 *
   1767 * Unlocks the filesystem and marks it writeable again after freeze_super().
   1768 */
   1769int thaw_super(struct super_block *sb)
   1770{
   1771	down_write(&sb->s_umount);
   1772	return thaw_super_locked(sb);
   1773}
   1774EXPORT_SYMBOL(thaw_super);