cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

md.c (264982B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3   md.c : Multiple Devices driver for Linux
      4     Copyright (C) 1998, 1999, 2000 Ingo Molnar
      5
      6     completely rewritten, based on the MD driver code from Marc Zyngier
      7
      8   Changes:
      9
     10   - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
     11   - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
     12   - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
     13   - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
     14   - kmod support by: Cyrus Durgin
     15   - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
     16   - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
     17
     18   - lots of fixes and improvements to the RAID1/RAID5 and generic
     19     RAID code (such as request based resynchronization):
     20
     21     Neil Brown <neilb@cse.unsw.edu.au>.
     22
     23   - persistent bitmap code
     24     Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
     25
     26
     27   Errors, Warnings, etc.
     28   Please use:
     29     pr_crit() for error conditions that risk data loss
     30     pr_err() for error conditions that are unexpected, like an IO error
     31         or internal inconsistency
     32     pr_warn() for error conditions that could have been predicated, like
     33         adding a device to an array when it has incompatible metadata
     34     pr_info() for every interesting, very rare events, like an array starting
     35         or stopping, or resync starting or stopping
     36     pr_debug() for everything else.
     37
     38*/
     39
     40#include <linux/sched/mm.h>
     41#include <linux/sched/signal.h>
     42#include <linux/kthread.h>
     43#include <linux/blkdev.h>
     44#include <linux/blk-integrity.h>
     45#include <linux/badblocks.h>
     46#include <linux/sysctl.h>
     47#include <linux/seq_file.h>
     48#include <linux/fs.h>
     49#include <linux/poll.h>
     50#include <linux/ctype.h>
     51#include <linux/string.h>
     52#include <linux/hdreg.h>
     53#include <linux/proc_fs.h>
     54#include <linux/random.h>
     55#include <linux/major.h>
     56#include <linux/module.h>
     57#include <linux/reboot.h>
     58#include <linux/file.h>
     59#include <linux/compat.h>
     60#include <linux/delay.h>
     61#include <linux/raid/md_p.h>
     62#include <linux/raid/md_u.h>
     63#include <linux/raid/detect.h>
     64#include <linux/slab.h>
     65#include <linux/percpu-refcount.h>
     66#include <linux/part_stat.h>
     67
     68#include <trace/events/block.h>
     69#include "md.h"
     70#include "md-bitmap.h"
     71#include "md-cluster.h"
     72
     73/* pers_list is a list of registered personalities protected
     74 * by pers_lock.
     75 * pers_lock does extra service to protect accesses to
     76 * mddev->thread when the mutex cannot be held.
     77 */
     78static LIST_HEAD(pers_list);
     79static DEFINE_SPINLOCK(pers_lock);
     80
     81static struct kobj_type md_ktype;
     82
     83struct md_cluster_operations *md_cluster_ops;
     84EXPORT_SYMBOL(md_cluster_ops);
     85static struct module *md_cluster_mod;
     86
     87static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
     88static struct workqueue_struct *md_wq;
     89static struct workqueue_struct *md_misc_wq;
     90static struct workqueue_struct *md_rdev_misc_wq;
     91
     92static int remove_and_add_spares(struct mddev *mddev,
     93				 struct md_rdev *this);
     94static void mddev_detach(struct mddev *mddev);
     95
     96/*
     97 * Default number of read corrections we'll attempt on an rdev
     98 * before ejecting it from the array. We divide the read error
     99 * count by 2 for every hour elapsed between read errors.
    100 */
    101#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
    102/* Default safemode delay: 200 msec */
    103#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
    104/*
    105 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
    106 * is 1000 KB/sec, so the extra system load does not show up that much.
    107 * Increase it if you want to have more _guaranteed_ speed. Note that
    108 * the RAID driver will use the maximum available bandwidth if the IO
    109 * subsystem is idle. There is also an 'absolute maximum' reconstruction
    110 * speed limit - in case reconstruction slows down your system despite
    111 * idle IO detection.
    112 *
    113 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
    114 * or /sys/block/mdX/md/sync_speed_{min,max}
    115 */
    116
    117static int sysctl_speed_limit_min = 1000;
    118static int sysctl_speed_limit_max = 200000;
    119static inline int speed_min(struct mddev *mddev)
    120{
    121	return mddev->sync_speed_min ?
    122		mddev->sync_speed_min : sysctl_speed_limit_min;
    123}
    124
    125static inline int speed_max(struct mddev *mddev)
    126{
    127	return mddev->sync_speed_max ?
    128		mddev->sync_speed_max : sysctl_speed_limit_max;
    129}
    130
    131static void rdev_uninit_serial(struct md_rdev *rdev)
    132{
    133	if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
    134		return;
    135
    136	kvfree(rdev->serial);
    137	rdev->serial = NULL;
    138}
    139
    140static void rdevs_uninit_serial(struct mddev *mddev)
    141{
    142	struct md_rdev *rdev;
    143
    144	rdev_for_each(rdev, mddev)
    145		rdev_uninit_serial(rdev);
    146}
    147
    148static int rdev_init_serial(struct md_rdev *rdev)
    149{
    150	/* serial_nums equals with BARRIER_BUCKETS_NR */
    151	int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
    152	struct serial_in_rdev *serial = NULL;
    153
    154	if (test_bit(CollisionCheck, &rdev->flags))
    155		return 0;
    156
    157	serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
    158			  GFP_KERNEL);
    159	if (!serial)
    160		return -ENOMEM;
    161
    162	for (i = 0; i < serial_nums; i++) {
    163		struct serial_in_rdev *serial_tmp = &serial[i];
    164
    165		spin_lock_init(&serial_tmp->serial_lock);
    166		serial_tmp->serial_rb = RB_ROOT_CACHED;
    167		init_waitqueue_head(&serial_tmp->serial_io_wait);
    168	}
    169
    170	rdev->serial = serial;
    171	set_bit(CollisionCheck, &rdev->flags);
    172
    173	return 0;
    174}
    175
    176static int rdevs_init_serial(struct mddev *mddev)
    177{
    178	struct md_rdev *rdev;
    179	int ret = 0;
    180
    181	rdev_for_each(rdev, mddev) {
    182		ret = rdev_init_serial(rdev);
    183		if (ret)
    184			break;
    185	}
    186
    187	/* Free all resources if pool is not existed */
    188	if (ret && !mddev->serial_info_pool)
    189		rdevs_uninit_serial(mddev);
    190
    191	return ret;
    192}
    193
    194/*
    195 * rdev needs to enable serial stuffs if it meets the conditions:
    196 * 1. it is multi-queue device flaged with writemostly.
    197 * 2. the write-behind mode is enabled.
    198 */
    199static int rdev_need_serial(struct md_rdev *rdev)
    200{
    201	return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
    202		rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
    203		test_bit(WriteMostly, &rdev->flags));
    204}
    205
    206/*
    207 * Init resource for rdev(s), then create serial_info_pool if:
    208 * 1. rdev is the first device which return true from rdev_enable_serial.
    209 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
    210 */
    211void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
    212			      bool is_suspend)
    213{
    214	int ret = 0;
    215
    216	if (rdev && !rdev_need_serial(rdev) &&
    217	    !test_bit(CollisionCheck, &rdev->flags))
    218		return;
    219
    220	if (!is_suspend)
    221		mddev_suspend(mddev);
    222
    223	if (!rdev)
    224		ret = rdevs_init_serial(mddev);
    225	else
    226		ret = rdev_init_serial(rdev);
    227	if (ret)
    228		goto abort;
    229
    230	if (mddev->serial_info_pool == NULL) {
    231		/*
    232		 * already in memalloc noio context by
    233		 * mddev_suspend()
    234		 */
    235		mddev->serial_info_pool =
    236			mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
    237						sizeof(struct serial_info));
    238		if (!mddev->serial_info_pool) {
    239			rdevs_uninit_serial(mddev);
    240			pr_err("can't alloc memory pool for serialization\n");
    241		}
    242	}
    243
    244abort:
    245	if (!is_suspend)
    246		mddev_resume(mddev);
    247}
    248
    249/*
    250 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
    251 * 1. rdev is the last device flaged with CollisionCheck.
    252 * 2. when bitmap is destroyed while policy is not enabled.
    253 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
    254 */
    255void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
    256			       bool is_suspend)
    257{
    258	if (rdev && !test_bit(CollisionCheck, &rdev->flags))
    259		return;
    260
    261	if (mddev->serial_info_pool) {
    262		struct md_rdev *temp;
    263		int num = 0; /* used to track if other rdevs need the pool */
    264
    265		if (!is_suspend)
    266			mddev_suspend(mddev);
    267		rdev_for_each(temp, mddev) {
    268			if (!rdev) {
    269				if (!mddev->serialize_policy ||
    270				    !rdev_need_serial(temp))
    271					rdev_uninit_serial(temp);
    272				else
    273					num++;
    274			} else if (temp != rdev &&
    275				   test_bit(CollisionCheck, &temp->flags))
    276				num++;
    277		}
    278
    279		if (rdev)
    280			rdev_uninit_serial(rdev);
    281
    282		if (num)
    283			pr_info("The mempool could be used by other devices\n");
    284		else {
    285			mempool_destroy(mddev->serial_info_pool);
    286			mddev->serial_info_pool = NULL;
    287		}
    288		if (!is_suspend)
    289			mddev_resume(mddev);
    290	}
    291}
    292
    293static struct ctl_table_header *raid_table_header;
    294
    295static struct ctl_table raid_table[] = {
    296	{
    297		.procname	= "speed_limit_min",
    298		.data		= &sysctl_speed_limit_min,
    299		.maxlen		= sizeof(int),
    300		.mode		= S_IRUGO|S_IWUSR,
    301		.proc_handler	= proc_dointvec,
    302	},
    303	{
    304		.procname	= "speed_limit_max",
    305		.data		= &sysctl_speed_limit_max,
    306		.maxlen		= sizeof(int),
    307		.mode		= S_IRUGO|S_IWUSR,
    308		.proc_handler	= proc_dointvec,
    309	},
    310	{ }
    311};
    312
    313static struct ctl_table raid_dir_table[] = {
    314	{
    315		.procname	= "raid",
    316		.maxlen		= 0,
    317		.mode		= S_IRUGO|S_IXUGO,
    318		.child		= raid_table,
    319	},
    320	{ }
    321};
    322
    323static struct ctl_table raid_root_table[] = {
    324	{
    325		.procname	= "dev",
    326		.maxlen		= 0,
    327		.mode		= 0555,
    328		.child		= raid_dir_table,
    329	},
    330	{  }
    331};
    332
    333static int start_readonly;
    334
    335/*
    336 * The original mechanism for creating an md device is to create
    337 * a device node in /dev and to open it.  This causes races with device-close.
    338 * The preferred method is to write to the "new_array" module parameter.
    339 * This can avoid races.
    340 * Setting create_on_open to false disables the original mechanism
    341 * so all the races disappear.
    342 */
    343static bool create_on_open = true;
    344
    345/*
    346 * We have a system wide 'event count' that is incremented
    347 * on any 'interesting' event, and readers of /proc/mdstat
    348 * can use 'poll' or 'select' to find out when the event
    349 * count increases.
    350 *
    351 * Events are:
    352 *  start array, stop array, error, add device, remove device,
    353 *  start build, activate spare
    354 */
    355static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
    356static atomic_t md_event_count;
    357void md_new_event(void)
    358{
    359	atomic_inc(&md_event_count);
    360	wake_up(&md_event_waiters);
    361}
    362EXPORT_SYMBOL_GPL(md_new_event);
    363
    364/*
    365 * Enables to iterate over all existing md arrays
    366 * all_mddevs_lock protects this list.
    367 */
    368static LIST_HEAD(all_mddevs);
    369static DEFINE_SPINLOCK(all_mddevs_lock);
    370
    371/*
    372 * iterates through all used mddevs in the system.
    373 * We take care to grab the all_mddevs_lock whenever navigating
    374 * the list, and to always hold a refcount when unlocked.
    375 * Any code which breaks out of this loop while own
    376 * a reference to the current mddev and must mddev_put it.
    377 */
    378#define for_each_mddev(_mddev,_tmp)					\
    379									\
    380	for (({ spin_lock(&all_mddevs_lock);				\
    381		_tmp = all_mddevs.next;					\
    382		_mddev = NULL;});					\
    383	     ({ if (_tmp != &all_mddevs)				\
    384			mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
    385		spin_unlock(&all_mddevs_lock);				\
    386		if (_mddev) mddev_put(_mddev);				\
    387		_mddev = list_entry(_tmp, struct mddev, all_mddevs);	\
    388		_tmp != &all_mddevs;});					\
    389	     ({ spin_lock(&all_mddevs_lock);				\
    390		_tmp = _tmp->next;})					\
    391		)
    392
    393/* Rather than calling directly into the personality make_request function,
    394 * IO requests come here first so that we can check if the device is
    395 * being suspended pending a reconfiguration.
    396 * We hold a refcount over the call to ->make_request.  By the time that
    397 * call has finished, the bio has been linked into some internal structure
    398 * and so is visible to ->quiesce(), so we don't need the refcount any more.
    399 */
    400static bool is_suspended(struct mddev *mddev, struct bio *bio)
    401{
    402	if (mddev->suspended)
    403		return true;
    404	if (bio_data_dir(bio) != WRITE)
    405		return false;
    406	if (mddev->suspend_lo >= mddev->suspend_hi)
    407		return false;
    408	if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
    409		return false;
    410	if (bio_end_sector(bio) < mddev->suspend_lo)
    411		return false;
    412	return true;
    413}
    414
    415void md_handle_request(struct mddev *mddev, struct bio *bio)
    416{
    417check_suspended:
    418	rcu_read_lock();
    419	if (is_suspended(mddev, bio)) {
    420		DEFINE_WAIT(__wait);
    421		/* Bail out if REQ_NOWAIT is set for the bio */
    422		if (bio->bi_opf & REQ_NOWAIT) {
    423			rcu_read_unlock();
    424			bio_wouldblock_error(bio);
    425			return;
    426		}
    427		for (;;) {
    428			prepare_to_wait(&mddev->sb_wait, &__wait,
    429					TASK_UNINTERRUPTIBLE);
    430			if (!is_suspended(mddev, bio))
    431				break;
    432			rcu_read_unlock();
    433			schedule();
    434			rcu_read_lock();
    435		}
    436		finish_wait(&mddev->sb_wait, &__wait);
    437	}
    438	atomic_inc(&mddev->active_io);
    439	rcu_read_unlock();
    440
    441	if (!mddev->pers->make_request(mddev, bio)) {
    442		atomic_dec(&mddev->active_io);
    443		wake_up(&mddev->sb_wait);
    444		goto check_suspended;
    445	}
    446
    447	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
    448		wake_up(&mddev->sb_wait);
    449}
    450EXPORT_SYMBOL(md_handle_request);
    451
    452static void md_submit_bio(struct bio *bio)
    453{
    454	const int rw = bio_data_dir(bio);
    455	struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
    456
    457	if (mddev == NULL || mddev->pers == NULL) {
    458		bio_io_error(bio);
    459		return;
    460	}
    461
    462	if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
    463		bio_io_error(bio);
    464		return;
    465	}
    466
    467	blk_queue_split(&bio);
    468
    469	if (mddev->ro == 1 && unlikely(rw == WRITE)) {
    470		if (bio_sectors(bio) != 0)
    471			bio->bi_status = BLK_STS_IOERR;
    472		bio_endio(bio);
    473		return;
    474	}
    475
    476	/* bio could be mergeable after passing to underlayer */
    477	bio->bi_opf &= ~REQ_NOMERGE;
    478
    479	md_handle_request(mddev, bio);
    480}
    481
    482/* mddev_suspend makes sure no new requests are submitted
    483 * to the device, and that any requests that have been submitted
    484 * are completely handled.
    485 * Once mddev_detach() is called and completes, the module will be
    486 * completely unused.
    487 */
    488void mddev_suspend(struct mddev *mddev)
    489{
    490	WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
    491	lockdep_assert_held(&mddev->reconfig_mutex);
    492	if (mddev->suspended++)
    493		return;
    494	synchronize_rcu();
    495	wake_up(&mddev->sb_wait);
    496	set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
    497	smp_mb__after_atomic();
    498	wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
    499	mddev->pers->quiesce(mddev, 1);
    500	clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
    501	wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
    502
    503	del_timer_sync(&mddev->safemode_timer);
    504	/* restrict memory reclaim I/O during raid array is suspend */
    505	mddev->noio_flag = memalloc_noio_save();
    506}
    507EXPORT_SYMBOL_GPL(mddev_suspend);
    508
    509void mddev_resume(struct mddev *mddev)
    510{
    511	/* entred the memalloc scope from mddev_suspend() */
    512	memalloc_noio_restore(mddev->noio_flag);
    513	lockdep_assert_held(&mddev->reconfig_mutex);
    514	if (--mddev->suspended)
    515		return;
    516	wake_up(&mddev->sb_wait);
    517	mddev->pers->quiesce(mddev, 0);
    518
    519	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
    520	md_wakeup_thread(mddev->thread);
    521	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
    522}
    523EXPORT_SYMBOL_GPL(mddev_resume);
    524
    525/*
    526 * Generic flush handling for md
    527 */
    528
    529static void md_end_flush(struct bio *bio)
    530{
    531	struct md_rdev *rdev = bio->bi_private;
    532	struct mddev *mddev = rdev->mddev;
    533
    534	rdev_dec_pending(rdev, mddev);
    535
    536	if (atomic_dec_and_test(&mddev->flush_pending)) {
    537		/* The pre-request flush has finished */
    538		queue_work(md_wq, &mddev->flush_work);
    539	}
    540	bio_put(bio);
    541}
    542
    543static void md_submit_flush_data(struct work_struct *ws);
    544
    545static void submit_flushes(struct work_struct *ws)
    546{
    547	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
    548	struct md_rdev *rdev;
    549
    550	mddev->start_flush = ktime_get_boottime();
    551	INIT_WORK(&mddev->flush_work, md_submit_flush_data);
    552	atomic_set(&mddev->flush_pending, 1);
    553	rcu_read_lock();
    554	rdev_for_each_rcu(rdev, mddev)
    555		if (rdev->raid_disk >= 0 &&
    556		    !test_bit(Faulty, &rdev->flags)) {
    557			/* Take two references, one is dropped
    558			 * when request finishes, one after
    559			 * we reclaim rcu_read_lock
    560			 */
    561			struct bio *bi;
    562			atomic_inc(&rdev->nr_pending);
    563			atomic_inc(&rdev->nr_pending);
    564			rcu_read_unlock();
    565			bi = bio_alloc_bioset(rdev->bdev, 0,
    566					      REQ_OP_WRITE | REQ_PREFLUSH,
    567					      GFP_NOIO, &mddev->bio_set);
    568			bi->bi_end_io = md_end_flush;
    569			bi->bi_private = rdev;
    570			atomic_inc(&mddev->flush_pending);
    571			submit_bio(bi);
    572			rcu_read_lock();
    573			rdev_dec_pending(rdev, mddev);
    574		}
    575	rcu_read_unlock();
    576	if (atomic_dec_and_test(&mddev->flush_pending))
    577		queue_work(md_wq, &mddev->flush_work);
    578}
    579
    580static void md_submit_flush_data(struct work_struct *ws)
    581{
    582	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
    583	struct bio *bio = mddev->flush_bio;
    584
    585	/*
    586	 * must reset flush_bio before calling into md_handle_request to avoid a
    587	 * deadlock, because other bios passed md_handle_request suspend check
    588	 * could wait for this and below md_handle_request could wait for those
    589	 * bios because of suspend check
    590	 */
    591	spin_lock_irq(&mddev->lock);
    592	mddev->prev_flush_start = mddev->start_flush;
    593	mddev->flush_bio = NULL;
    594	spin_unlock_irq(&mddev->lock);
    595	wake_up(&mddev->sb_wait);
    596
    597	if (bio->bi_iter.bi_size == 0) {
    598		/* an empty barrier - all done */
    599		bio_endio(bio);
    600	} else {
    601		bio->bi_opf &= ~REQ_PREFLUSH;
    602		md_handle_request(mddev, bio);
    603	}
    604}
    605
    606/*
    607 * Manages consolidation of flushes and submitting any flushes needed for
    608 * a bio with REQ_PREFLUSH.  Returns true if the bio is finished or is
    609 * being finished in another context.  Returns false if the flushing is
    610 * complete but still needs the I/O portion of the bio to be processed.
    611 */
    612bool md_flush_request(struct mddev *mddev, struct bio *bio)
    613{
    614	ktime_t req_start = ktime_get_boottime();
    615	spin_lock_irq(&mddev->lock);
    616	/* flush requests wait until ongoing flush completes,
    617	 * hence coalescing all the pending requests.
    618	 */
    619	wait_event_lock_irq(mddev->sb_wait,
    620			    !mddev->flush_bio ||
    621			    ktime_before(req_start, mddev->prev_flush_start),
    622			    mddev->lock);
    623	/* new request after previous flush is completed */
    624	if (ktime_after(req_start, mddev->prev_flush_start)) {
    625		WARN_ON(mddev->flush_bio);
    626		mddev->flush_bio = bio;
    627		bio = NULL;
    628	}
    629	spin_unlock_irq(&mddev->lock);
    630
    631	if (!bio) {
    632		INIT_WORK(&mddev->flush_work, submit_flushes);
    633		queue_work(md_wq, &mddev->flush_work);
    634	} else {
    635		/* flush was performed for some other bio while we waited. */
    636		if (bio->bi_iter.bi_size == 0)
    637			/* an empty barrier - all done */
    638			bio_endio(bio);
    639		else {
    640			bio->bi_opf &= ~REQ_PREFLUSH;
    641			return false;
    642		}
    643	}
    644	return true;
    645}
    646EXPORT_SYMBOL(md_flush_request);
    647
    648static inline struct mddev *mddev_get(struct mddev *mddev)
    649{
    650	atomic_inc(&mddev->active);
    651	return mddev;
    652}
    653
    654static void mddev_delayed_delete(struct work_struct *ws);
    655
    656static void mddev_put(struct mddev *mddev)
    657{
    658	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
    659		return;
    660	if (!mddev->raid_disks && list_empty(&mddev->disks) &&
    661	    mddev->ctime == 0 && !mddev->hold_active) {
    662		/* Array is not configured at all, and not held active,
    663		 * so destroy it */
    664		list_del_init(&mddev->all_mddevs);
    665
    666		/*
    667		 * Call queue_work inside the spinlock so that
    668		 * flush_workqueue() after mddev_find will succeed in waiting
    669		 * for the work to be done.
    670		 */
    671		INIT_WORK(&mddev->del_work, mddev_delayed_delete);
    672		queue_work(md_misc_wq, &mddev->del_work);
    673	}
    674	spin_unlock(&all_mddevs_lock);
    675}
    676
    677static void md_safemode_timeout(struct timer_list *t);
    678
    679void mddev_init(struct mddev *mddev)
    680{
    681	kobject_init(&mddev->kobj, &md_ktype);
    682	mutex_init(&mddev->open_mutex);
    683	mutex_init(&mddev->reconfig_mutex);
    684	mutex_init(&mddev->bitmap_info.mutex);
    685	INIT_LIST_HEAD(&mddev->disks);
    686	INIT_LIST_HEAD(&mddev->all_mddevs);
    687	timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
    688	atomic_set(&mddev->active, 1);
    689	atomic_set(&mddev->openers, 0);
    690	atomic_set(&mddev->active_io, 0);
    691	spin_lock_init(&mddev->lock);
    692	atomic_set(&mddev->flush_pending, 0);
    693	init_waitqueue_head(&mddev->sb_wait);
    694	init_waitqueue_head(&mddev->recovery_wait);
    695	mddev->reshape_position = MaxSector;
    696	mddev->reshape_backwards = 0;
    697	mddev->last_sync_action = "none";
    698	mddev->resync_min = 0;
    699	mddev->resync_max = MaxSector;
    700	mddev->level = LEVEL_NONE;
    701}
    702EXPORT_SYMBOL_GPL(mddev_init);
    703
    704static struct mddev *mddev_find_locked(dev_t unit)
    705{
    706	struct mddev *mddev;
    707
    708	list_for_each_entry(mddev, &all_mddevs, all_mddevs)
    709		if (mddev->unit == unit)
    710			return mddev;
    711
    712	return NULL;
    713}
    714
    715/* find an unused unit number */
    716static dev_t mddev_alloc_unit(void)
    717{
    718	static int next_minor = 512;
    719	int start = next_minor;
    720	bool is_free = 0;
    721	dev_t dev = 0;
    722
    723	while (!is_free) {
    724		dev = MKDEV(MD_MAJOR, next_minor);
    725		next_minor++;
    726		if (next_minor > MINORMASK)
    727			next_minor = 0;
    728		if (next_minor == start)
    729			return 0;		/* Oh dear, all in use. */
    730		is_free = !mddev_find_locked(dev);
    731	}
    732
    733	return dev;
    734}
    735
    736static struct mddev *mddev_find(dev_t unit)
    737{
    738	struct mddev *mddev;
    739
    740	if (MAJOR(unit) != MD_MAJOR)
    741		unit &= ~((1 << MdpMinorShift) - 1);
    742
    743	spin_lock(&all_mddevs_lock);
    744	mddev = mddev_find_locked(unit);
    745	if (mddev)
    746		mddev_get(mddev);
    747	spin_unlock(&all_mddevs_lock);
    748
    749	return mddev;
    750}
    751
    752static struct mddev *mddev_alloc(dev_t unit)
    753{
    754	struct mddev *new;
    755	int error;
    756
    757	if (unit && MAJOR(unit) != MD_MAJOR)
    758		unit &= ~((1 << MdpMinorShift) - 1);
    759
    760	new = kzalloc(sizeof(*new), GFP_KERNEL);
    761	if (!new)
    762		return ERR_PTR(-ENOMEM);
    763	mddev_init(new);
    764
    765	spin_lock(&all_mddevs_lock);
    766	if (unit) {
    767		error = -EEXIST;
    768		if (mddev_find_locked(unit))
    769			goto out_free_new;
    770		new->unit = unit;
    771		if (MAJOR(unit) == MD_MAJOR)
    772			new->md_minor = MINOR(unit);
    773		else
    774			new->md_minor = MINOR(unit) >> MdpMinorShift;
    775		new->hold_active = UNTIL_IOCTL;
    776	} else {
    777		error = -ENODEV;
    778		new->unit = mddev_alloc_unit();
    779		if (!new->unit)
    780			goto out_free_new;
    781		new->md_minor = MINOR(new->unit);
    782		new->hold_active = UNTIL_STOP;
    783	}
    784
    785	list_add(&new->all_mddevs, &all_mddevs);
    786	spin_unlock(&all_mddevs_lock);
    787	return new;
    788out_free_new:
    789	spin_unlock(&all_mddevs_lock);
    790	kfree(new);
    791	return ERR_PTR(error);
    792}
    793
    794static const struct attribute_group md_redundancy_group;
    795
    796void mddev_unlock(struct mddev *mddev)
    797{
    798	if (mddev->to_remove) {
    799		/* These cannot be removed under reconfig_mutex as
    800		 * an access to the files will try to take reconfig_mutex
    801		 * while holding the file unremovable, which leads to
    802		 * a deadlock.
    803		 * So hold set sysfs_active while the remove in happeing,
    804		 * and anything else which might set ->to_remove or my
    805		 * otherwise change the sysfs namespace will fail with
    806		 * -EBUSY if sysfs_active is still set.
    807		 * We set sysfs_active under reconfig_mutex and elsewhere
    808		 * test it under the same mutex to ensure its correct value
    809		 * is seen.
    810		 */
    811		const struct attribute_group *to_remove = mddev->to_remove;
    812		mddev->to_remove = NULL;
    813		mddev->sysfs_active = 1;
    814		mutex_unlock(&mddev->reconfig_mutex);
    815
    816		if (mddev->kobj.sd) {
    817			if (to_remove != &md_redundancy_group)
    818				sysfs_remove_group(&mddev->kobj, to_remove);
    819			if (mddev->pers == NULL ||
    820			    mddev->pers->sync_request == NULL) {
    821				sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
    822				if (mddev->sysfs_action)
    823					sysfs_put(mddev->sysfs_action);
    824				if (mddev->sysfs_completed)
    825					sysfs_put(mddev->sysfs_completed);
    826				if (mddev->sysfs_degraded)
    827					sysfs_put(mddev->sysfs_degraded);
    828				mddev->sysfs_action = NULL;
    829				mddev->sysfs_completed = NULL;
    830				mddev->sysfs_degraded = NULL;
    831			}
    832		}
    833		mddev->sysfs_active = 0;
    834	} else
    835		mutex_unlock(&mddev->reconfig_mutex);
    836
    837	/* As we've dropped the mutex we need a spinlock to
    838	 * make sure the thread doesn't disappear
    839	 */
    840	spin_lock(&pers_lock);
    841	md_wakeup_thread(mddev->thread);
    842	wake_up(&mddev->sb_wait);
    843	spin_unlock(&pers_lock);
    844}
    845EXPORT_SYMBOL_GPL(mddev_unlock);
    846
    847struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
    848{
    849	struct md_rdev *rdev;
    850
    851	rdev_for_each_rcu(rdev, mddev)
    852		if (rdev->desc_nr == nr)
    853			return rdev;
    854
    855	return NULL;
    856}
    857EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
    858
    859static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
    860{
    861	struct md_rdev *rdev;
    862
    863	rdev_for_each(rdev, mddev)
    864		if (rdev->bdev->bd_dev == dev)
    865			return rdev;
    866
    867	return NULL;
    868}
    869
    870struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
    871{
    872	struct md_rdev *rdev;
    873
    874	rdev_for_each_rcu(rdev, mddev)
    875		if (rdev->bdev->bd_dev == dev)
    876			return rdev;
    877
    878	return NULL;
    879}
    880EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
    881
    882static struct md_personality *find_pers(int level, char *clevel)
    883{
    884	struct md_personality *pers;
    885	list_for_each_entry(pers, &pers_list, list) {
    886		if (level != LEVEL_NONE && pers->level == level)
    887			return pers;
    888		if (strcmp(pers->name, clevel)==0)
    889			return pers;
    890	}
    891	return NULL;
    892}
    893
    894/* return the offset of the super block in 512byte sectors */
    895static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
    896{
    897	return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
    898}
    899
    900static int alloc_disk_sb(struct md_rdev *rdev)
    901{
    902	rdev->sb_page = alloc_page(GFP_KERNEL);
    903	if (!rdev->sb_page)
    904		return -ENOMEM;
    905	return 0;
    906}
    907
    908void md_rdev_clear(struct md_rdev *rdev)
    909{
    910	if (rdev->sb_page) {
    911		put_page(rdev->sb_page);
    912		rdev->sb_loaded = 0;
    913		rdev->sb_page = NULL;
    914		rdev->sb_start = 0;
    915		rdev->sectors = 0;
    916	}
    917	if (rdev->bb_page) {
    918		put_page(rdev->bb_page);
    919		rdev->bb_page = NULL;
    920	}
    921	badblocks_exit(&rdev->badblocks);
    922}
    923EXPORT_SYMBOL_GPL(md_rdev_clear);
    924
    925static void super_written(struct bio *bio)
    926{
    927	struct md_rdev *rdev = bio->bi_private;
    928	struct mddev *mddev = rdev->mddev;
    929
    930	if (bio->bi_status) {
    931		pr_err("md: %s gets error=%d\n", __func__,
    932		       blk_status_to_errno(bio->bi_status));
    933		md_error(mddev, rdev);
    934		if (!test_bit(Faulty, &rdev->flags)
    935		    && (bio->bi_opf & MD_FAILFAST)) {
    936			set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
    937			set_bit(LastDev, &rdev->flags);
    938		}
    939	} else
    940		clear_bit(LastDev, &rdev->flags);
    941
    942	if (atomic_dec_and_test(&mddev->pending_writes))
    943		wake_up(&mddev->sb_wait);
    944	rdev_dec_pending(rdev, mddev);
    945	bio_put(bio);
    946}
    947
    948void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
    949		   sector_t sector, int size, struct page *page)
    950{
    951	/* write first size bytes of page to sector of rdev
    952	 * Increment mddev->pending_writes before returning
    953	 * and decrement it on completion, waking up sb_wait
    954	 * if zero is reached.
    955	 * If an error occurred, call md_error
    956	 */
    957	struct bio *bio;
    958
    959	if (!page)
    960		return;
    961
    962	if (test_bit(Faulty, &rdev->flags))
    963		return;
    964
    965	bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
    966			       1,
    967			       REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA,
    968			       GFP_NOIO, &mddev->sync_set);
    969
    970	atomic_inc(&rdev->nr_pending);
    971
    972	bio->bi_iter.bi_sector = sector;
    973	bio_add_page(bio, page, size, 0);
    974	bio->bi_private = rdev;
    975	bio->bi_end_io = super_written;
    976
    977	if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
    978	    test_bit(FailFast, &rdev->flags) &&
    979	    !test_bit(LastDev, &rdev->flags))
    980		bio->bi_opf |= MD_FAILFAST;
    981
    982	atomic_inc(&mddev->pending_writes);
    983	submit_bio(bio);
    984}
    985
    986int md_super_wait(struct mddev *mddev)
    987{
    988	/* wait for all superblock writes that were scheduled to complete */
    989	wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
    990	if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
    991		return -EAGAIN;
    992	return 0;
    993}
    994
    995int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
    996		 struct page *page, int op, int op_flags, bool metadata_op)
    997{
    998	struct bio bio;
    999	struct bio_vec bvec;
   1000
   1001	if (metadata_op && rdev->meta_bdev)
   1002		bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags);
   1003	else
   1004		bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags);
   1005
   1006	if (metadata_op)
   1007		bio.bi_iter.bi_sector = sector + rdev->sb_start;
   1008	else if (rdev->mddev->reshape_position != MaxSector &&
   1009		 (rdev->mddev->reshape_backwards ==
   1010		  (sector >= rdev->mddev->reshape_position)))
   1011		bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
   1012	else
   1013		bio.bi_iter.bi_sector = sector + rdev->data_offset;
   1014	bio_add_page(&bio, page, size, 0);
   1015
   1016	submit_bio_wait(&bio);
   1017
   1018	return !bio.bi_status;
   1019}
   1020EXPORT_SYMBOL_GPL(sync_page_io);
   1021
   1022static int read_disk_sb(struct md_rdev *rdev, int size)
   1023{
   1024	if (rdev->sb_loaded)
   1025		return 0;
   1026
   1027	if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
   1028		goto fail;
   1029	rdev->sb_loaded = 1;
   1030	return 0;
   1031
   1032fail:
   1033	pr_err("md: disabled device %pg, could not read superblock.\n",
   1034	       rdev->bdev);
   1035	return -EINVAL;
   1036}
   1037
   1038static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
   1039{
   1040	return	sb1->set_uuid0 == sb2->set_uuid0 &&
   1041		sb1->set_uuid1 == sb2->set_uuid1 &&
   1042		sb1->set_uuid2 == sb2->set_uuid2 &&
   1043		sb1->set_uuid3 == sb2->set_uuid3;
   1044}
   1045
   1046static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
   1047{
   1048	int ret;
   1049	mdp_super_t *tmp1, *tmp2;
   1050
   1051	tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
   1052	tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
   1053
   1054	if (!tmp1 || !tmp2) {
   1055		ret = 0;
   1056		goto abort;
   1057	}
   1058
   1059	*tmp1 = *sb1;
   1060	*tmp2 = *sb2;
   1061
   1062	/*
   1063	 * nr_disks is not constant
   1064	 */
   1065	tmp1->nr_disks = 0;
   1066	tmp2->nr_disks = 0;
   1067
   1068	ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
   1069abort:
   1070	kfree(tmp1);
   1071	kfree(tmp2);
   1072	return ret;
   1073}
   1074
   1075static u32 md_csum_fold(u32 csum)
   1076{
   1077	csum = (csum & 0xffff) + (csum >> 16);
   1078	return (csum & 0xffff) + (csum >> 16);
   1079}
   1080
   1081static unsigned int calc_sb_csum(mdp_super_t *sb)
   1082{
   1083	u64 newcsum = 0;
   1084	u32 *sb32 = (u32*)sb;
   1085	int i;
   1086	unsigned int disk_csum, csum;
   1087
   1088	disk_csum = sb->sb_csum;
   1089	sb->sb_csum = 0;
   1090
   1091	for (i = 0; i < MD_SB_BYTES/4 ; i++)
   1092		newcsum += sb32[i];
   1093	csum = (newcsum & 0xffffffff) + (newcsum>>32);
   1094
   1095#ifdef CONFIG_ALPHA
   1096	/* This used to use csum_partial, which was wrong for several
   1097	 * reasons including that different results are returned on
   1098	 * different architectures.  It isn't critical that we get exactly
   1099	 * the same return value as before (we always csum_fold before
   1100	 * testing, and that removes any differences).  However as we
   1101	 * know that csum_partial always returned a 16bit value on
   1102	 * alphas, do a fold to maximise conformity to previous behaviour.
   1103	 */
   1104	sb->sb_csum = md_csum_fold(disk_csum);
   1105#else
   1106	sb->sb_csum = disk_csum;
   1107#endif
   1108	return csum;
   1109}
   1110
   1111/*
   1112 * Handle superblock details.
   1113 * We want to be able to handle multiple superblock formats
   1114 * so we have a common interface to them all, and an array of
   1115 * different handlers.
   1116 * We rely on user-space to write the initial superblock, and support
   1117 * reading and updating of superblocks.
   1118 * Interface methods are:
   1119 *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
   1120 *      loads and validates a superblock on dev.
   1121 *      if refdev != NULL, compare superblocks on both devices
   1122 *    Return:
   1123 *      0 - dev has a superblock that is compatible with refdev
   1124 *      1 - dev has a superblock that is compatible and newer than refdev
   1125 *          so dev should be used as the refdev in future
   1126 *     -EINVAL superblock incompatible or invalid
   1127 *     -othererror e.g. -EIO
   1128 *
   1129 *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
   1130 *      Verify that dev is acceptable into mddev.
   1131 *       The first time, mddev->raid_disks will be 0, and data from
   1132 *       dev should be merged in.  Subsequent calls check that dev
   1133 *       is new enough.  Return 0 or -EINVAL
   1134 *
   1135 *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
   1136 *     Update the superblock for rdev with data in mddev
   1137 *     This does not write to disc.
   1138 *
   1139 */
   1140
   1141struct super_type  {
   1142	char		    *name;
   1143	struct module	    *owner;
   1144	int		    (*load_super)(struct md_rdev *rdev,
   1145					  struct md_rdev *refdev,
   1146					  int minor_version);
   1147	int		    (*validate_super)(struct mddev *mddev,
   1148					      struct md_rdev *rdev);
   1149	void		    (*sync_super)(struct mddev *mddev,
   1150					  struct md_rdev *rdev);
   1151	unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
   1152						sector_t num_sectors);
   1153	int		    (*allow_new_offset)(struct md_rdev *rdev,
   1154						unsigned long long new_offset);
   1155};
   1156
   1157/*
   1158 * Check that the given mddev has no bitmap.
   1159 *
   1160 * This function is called from the run method of all personalities that do not
   1161 * support bitmaps. It prints an error message and returns non-zero if mddev
   1162 * has a bitmap. Otherwise, it returns 0.
   1163 *
   1164 */
   1165int md_check_no_bitmap(struct mddev *mddev)
   1166{
   1167	if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
   1168		return 0;
   1169	pr_warn("%s: bitmaps are not supported for %s\n",
   1170		mdname(mddev), mddev->pers->name);
   1171	return 1;
   1172}
   1173EXPORT_SYMBOL(md_check_no_bitmap);
   1174
   1175/*
   1176 * load_super for 0.90.0
   1177 */
   1178static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
   1179{
   1180	mdp_super_t *sb;
   1181	int ret;
   1182	bool spare_disk = true;
   1183
   1184	/*
   1185	 * Calculate the position of the superblock (512byte sectors),
   1186	 * it's at the end of the disk.
   1187	 *
   1188	 * It also happens to be a multiple of 4Kb.
   1189	 */
   1190	rdev->sb_start = calc_dev_sboffset(rdev);
   1191
   1192	ret = read_disk_sb(rdev, MD_SB_BYTES);
   1193	if (ret)
   1194		return ret;
   1195
   1196	ret = -EINVAL;
   1197
   1198	sb = page_address(rdev->sb_page);
   1199
   1200	if (sb->md_magic != MD_SB_MAGIC) {
   1201		pr_warn("md: invalid raid superblock magic on %pg\n",
   1202			rdev->bdev);
   1203		goto abort;
   1204	}
   1205
   1206	if (sb->major_version != 0 ||
   1207	    sb->minor_version < 90 ||
   1208	    sb->minor_version > 91) {
   1209		pr_warn("Bad version number %d.%d on %pg\n",
   1210			sb->major_version, sb->minor_version, rdev->bdev);
   1211		goto abort;
   1212	}
   1213
   1214	if (sb->raid_disks <= 0)
   1215		goto abort;
   1216
   1217	if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
   1218		pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev);
   1219		goto abort;
   1220	}
   1221
   1222	rdev->preferred_minor = sb->md_minor;
   1223	rdev->data_offset = 0;
   1224	rdev->new_data_offset = 0;
   1225	rdev->sb_size = MD_SB_BYTES;
   1226	rdev->badblocks.shift = -1;
   1227
   1228	if (sb->level == LEVEL_MULTIPATH)
   1229		rdev->desc_nr = -1;
   1230	else
   1231		rdev->desc_nr = sb->this_disk.number;
   1232
   1233	/* not spare disk, or LEVEL_MULTIPATH */
   1234	if (sb->level == LEVEL_MULTIPATH ||
   1235		(rdev->desc_nr >= 0 &&
   1236		 rdev->desc_nr < MD_SB_DISKS &&
   1237		 sb->disks[rdev->desc_nr].state &
   1238		 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
   1239		spare_disk = false;
   1240
   1241	if (!refdev) {
   1242		if (!spare_disk)
   1243			ret = 1;
   1244		else
   1245			ret = 0;
   1246	} else {
   1247		__u64 ev1, ev2;
   1248		mdp_super_t *refsb = page_address(refdev->sb_page);
   1249		if (!md_uuid_equal(refsb, sb)) {
   1250			pr_warn("md: %pg has different UUID to %pg\n",
   1251				rdev->bdev, refdev->bdev);
   1252			goto abort;
   1253		}
   1254		if (!md_sb_equal(refsb, sb)) {
   1255			pr_warn("md: %pg has same UUID but different superblock to %pg\n",
   1256				rdev->bdev, refdev->bdev);
   1257			goto abort;
   1258		}
   1259		ev1 = md_event(sb);
   1260		ev2 = md_event(refsb);
   1261
   1262		if (!spare_disk && ev1 > ev2)
   1263			ret = 1;
   1264		else
   1265			ret = 0;
   1266	}
   1267	rdev->sectors = rdev->sb_start;
   1268	/* Limit to 4TB as metadata cannot record more than that.
   1269	 * (not needed for Linear and RAID0 as metadata doesn't
   1270	 * record this size)
   1271	 */
   1272	if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
   1273		rdev->sectors = (sector_t)(2ULL << 32) - 2;
   1274
   1275	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
   1276		/* "this cannot possibly happen" ... */
   1277		ret = -EINVAL;
   1278
   1279 abort:
   1280	return ret;
   1281}
   1282
   1283/*
   1284 * validate_super for 0.90.0
   1285 */
   1286static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
   1287{
   1288	mdp_disk_t *desc;
   1289	mdp_super_t *sb = page_address(rdev->sb_page);
   1290	__u64 ev1 = md_event(sb);
   1291
   1292	rdev->raid_disk = -1;
   1293	clear_bit(Faulty, &rdev->flags);
   1294	clear_bit(In_sync, &rdev->flags);
   1295	clear_bit(Bitmap_sync, &rdev->flags);
   1296	clear_bit(WriteMostly, &rdev->flags);
   1297
   1298	if (mddev->raid_disks == 0) {
   1299		mddev->major_version = 0;
   1300		mddev->minor_version = sb->minor_version;
   1301		mddev->patch_version = sb->patch_version;
   1302		mddev->external = 0;
   1303		mddev->chunk_sectors = sb->chunk_size >> 9;
   1304		mddev->ctime = sb->ctime;
   1305		mddev->utime = sb->utime;
   1306		mddev->level = sb->level;
   1307		mddev->clevel[0] = 0;
   1308		mddev->layout = sb->layout;
   1309		mddev->raid_disks = sb->raid_disks;
   1310		mddev->dev_sectors = ((sector_t)sb->size) * 2;
   1311		mddev->events = ev1;
   1312		mddev->bitmap_info.offset = 0;
   1313		mddev->bitmap_info.space = 0;
   1314		/* bitmap can use 60 K after the 4K superblocks */
   1315		mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
   1316		mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
   1317		mddev->reshape_backwards = 0;
   1318
   1319		if (mddev->minor_version >= 91) {
   1320			mddev->reshape_position = sb->reshape_position;
   1321			mddev->delta_disks = sb->delta_disks;
   1322			mddev->new_level = sb->new_level;
   1323			mddev->new_layout = sb->new_layout;
   1324			mddev->new_chunk_sectors = sb->new_chunk >> 9;
   1325			if (mddev->delta_disks < 0)
   1326				mddev->reshape_backwards = 1;
   1327		} else {
   1328			mddev->reshape_position = MaxSector;
   1329			mddev->delta_disks = 0;
   1330			mddev->new_level = mddev->level;
   1331			mddev->new_layout = mddev->layout;
   1332			mddev->new_chunk_sectors = mddev->chunk_sectors;
   1333		}
   1334		if (mddev->level == 0)
   1335			mddev->layout = -1;
   1336
   1337		if (sb->state & (1<<MD_SB_CLEAN))
   1338			mddev->recovery_cp = MaxSector;
   1339		else {
   1340			if (sb->events_hi == sb->cp_events_hi &&
   1341				sb->events_lo == sb->cp_events_lo) {
   1342				mddev->recovery_cp = sb->recovery_cp;
   1343			} else
   1344				mddev->recovery_cp = 0;
   1345		}
   1346
   1347		memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
   1348		memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
   1349		memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
   1350		memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
   1351
   1352		mddev->max_disks = MD_SB_DISKS;
   1353
   1354		if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
   1355		    mddev->bitmap_info.file == NULL) {
   1356			mddev->bitmap_info.offset =
   1357				mddev->bitmap_info.default_offset;
   1358			mddev->bitmap_info.space =
   1359				mddev->bitmap_info.default_space;
   1360		}
   1361
   1362	} else if (mddev->pers == NULL) {
   1363		/* Insist on good event counter while assembling, except
   1364		 * for spares (which don't need an event count) */
   1365		++ev1;
   1366		if (sb->disks[rdev->desc_nr].state & (
   1367			    (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
   1368			if (ev1 < mddev->events)
   1369				return -EINVAL;
   1370	} else if (mddev->bitmap) {
   1371		/* if adding to array with a bitmap, then we can accept an
   1372		 * older device ... but not too old.
   1373		 */
   1374		if (ev1 < mddev->bitmap->events_cleared)
   1375			return 0;
   1376		if (ev1 < mddev->events)
   1377			set_bit(Bitmap_sync, &rdev->flags);
   1378	} else {
   1379		if (ev1 < mddev->events)
   1380			/* just a hot-add of a new device, leave raid_disk at -1 */
   1381			return 0;
   1382	}
   1383
   1384	if (mddev->level != LEVEL_MULTIPATH) {
   1385		desc = sb->disks + rdev->desc_nr;
   1386
   1387		if (desc->state & (1<<MD_DISK_FAULTY))
   1388			set_bit(Faulty, &rdev->flags);
   1389		else if (desc->state & (1<<MD_DISK_SYNC) /* &&
   1390			    desc->raid_disk < mddev->raid_disks */) {
   1391			set_bit(In_sync, &rdev->flags);
   1392			rdev->raid_disk = desc->raid_disk;
   1393			rdev->saved_raid_disk = desc->raid_disk;
   1394		} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
   1395			/* active but not in sync implies recovery up to
   1396			 * reshape position.  We don't know exactly where
   1397			 * that is, so set to zero for now */
   1398			if (mddev->minor_version >= 91) {
   1399				rdev->recovery_offset = 0;
   1400				rdev->raid_disk = desc->raid_disk;
   1401			}
   1402		}
   1403		if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
   1404			set_bit(WriteMostly, &rdev->flags);
   1405		if (desc->state & (1<<MD_DISK_FAILFAST))
   1406			set_bit(FailFast, &rdev->flags);
   1407	} else /* MULTIPATH are always insync */
   1408		set_bit(In_sync, &rdev->flags);
   1409	return 0;
   1410}
   1411
   1412/*
   1413 * sync_super for 0.90.0
   1414 */
   1415static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
   1416{
   1417	mdp_super_t *sb;
   1418	struct md_rdev *rdev2;
   1419	int next_spare = mddev->raid_disks;
   1420
   1421	/* make rdev->sb match mddev data..
   1422	 *
   1423	 * 1/ zero out disks
   1424	 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
   1425	 * 3/ any empty disks < next_spare become removed
   1426	 *
   1427	 * disks[0] gets initialised to REMOVED because
   1428	 * we cannot be sure from other fields if it has
   1429	 * been initialised or not.
   1430	 */
   1431	int i;
   1432	int active=0, working=0,failed=0,spare=0,nr_disks=0;
   1433
   1434	rdev->sb_size = MD_SB_BYTES;
   1435
   1436	sb = page_address(rdev->sb_page);
   1437
   1438	memset(sb, 0, sizeof(*sb));
   1439
   1440	sb->md_magic = MD_SB_MAGIC;
   1441	sb->major_version = mddev->major_version;
   1442	sb->patch_version = mddev->patch_version;
   1443	sb->gvalid_words  = 0; /* ignored */
   1444	memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
   1445	memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
   1446	memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
   1447	memcpy(&sb->set_uuid3, mddev->uuid+12,4);
   1448
   1449	sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
   1450	sb->level = mddev->level;
   1451	sb->size = mddev->dev_sectors / 2;
   1452	sb->raid_disks = mddev->raid_disks;
   1453	sb->md_minor = mddev->md_minor;
   1454	sb->not_persistent = 0;
   1455	sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
   1456	sb->state = 0;
   1457	sb->events_hi = (mddev->events>>32);
   1458	sb->events_lo = (u32)mddev->events;
   1459
   1460	if (mddev->reshape_position == MaxSector)
   1461		sb->minor_version = 90;
   1462	else {
   1463		sb->minor_version = 91;
   1464		sb->reshape_position = mddev->reshape_position;
   1465		sb->new_level = mddev->new_level;
   1466		sb->delta_disks = mddev->delta_disks;
   1467		sb->new_layout = mddev->new_layout;
   1468		sb->new_chunk = mddev->new_chunk_sectors << 9;
   1469	}
   1470	mddev->minor_version = sb->minor_version;
   1471	if (mddev->in_sync)
   1472	{
   1473		sb->recovery_cp = mddev->recovery_cp;
   1474		sb->cp_events_hi = (mddev->events>>32);
   1475		sb->cp_events_lo = (u32)mddev->events;
   1476		if (mddev->recovery_cp == MaxSector)
   1477			sb->state = (1<< MD_SB_CLEAN);
   1478	} else
   1479		sb->recovery_cp = 0;
   1480
   1481	sb->layout = mddev->layout;
   1482	sb->chunk_size = mddev->chunk_sectors << 9;
   1483
   1484	if (mddev->bitmap && mddev->bitmap_info.file == NULL)
   1485		sb->state |= (1<<MD_SB_BITMAP_PRESENT);
   1486
   1487	sb->disks[0].state = (1<<MD_DISK_REMOVED);
   1488	rdev_for_each(rdev2, mddev) {
   1489		mdp_disk_t *d;
   1490		int desc_nr;
   1491		int is_active = test_bit(In_sync, &rdev2->flags);
   1492
   1493		if (rdev2->raid_disk >= 0 &&
   1494		    sb->minor_version >= 91)
   1495			/* we have nowhere to store the recovery_offset,
   1496			 * but if it is not below the reshape_position,
   1497			 * we can piggy-back on that.
   1498			 */
   1499			is_active = 1;
   1500		if (rdev2->raid_disk < 0 ||
   1501		    test_bit(Faulty, &rdev2->flags))
   1502			is_active = 0;
   1503		if (is_active)
   1504			desc_nr = rdev2->raid_disk;
   1505		else
   1506			desc_nr = next_spare++;
   1507		rdev2->desc_nr = desc_nr;
   1508		d = &sb->disks[rdev2->desc_nr];
   1509		nr_disks++;
   1510		d->number = rdev2->desc_nr;
   1511		d->major = MAJOR(rdev2->bdev->bd_dev);
   1512		d->minor = MINOR(rdev2->bdev->bd_dev);
   1513		if (is_active)
   1514			d->raid_disk = rdev2->raid_disk;
   1515		else
   1516			d->raid_disk = rdev2->desc_nr; /* compatibility */
   1517		if (test_bit(Faulty, &rdev2->flags))
   1518			d->state = (1<<MD_DISK_FAULTY);
   1519		else if (is_active) {
   1520			d->state = (1<<MD_DISK_ACTIVE);
   1521			if (test_bit(In_sync, &rdev2->flags))
   1522				d->state |= (1<<MD_DISK_SYNC);
   1523			active++;
   1524			working++;
   1525		} else {
   1526			d->state = 0;
   1527			spare++;
   1528			working++;
   1529		}
   1530		if (test_bit(WriteMostly, &rdev2->flags))
   1531			d->state |= (1<<MD_DISK_WRITEMOSTLY);
   1532		if (test_bit(FailFast, &rdev2->flags))
   1533			d->state |= (1<<MD_DISK_FAILFAST);
   1534	}
   1535	/* now set the "removed" and "faulty" bits on any missing devices */
   1536	for (i=0 ; i < mddev->raid_disks ; i++) {
   1537		mdp_disk_t *d = &sb->disks[i];
   1538		if (d->state == 0 && d->number == 0) {
   1539			d->number = i;
   1540			d->raid_disk = i;
   1541			d->state = (1<<MD_DISK_REMOVED);
   1542			d->state |= (1<<MD_DISK_FAULTY);
   1543			failed++;
   1544		}
   1545	}
   1546	sb->nr_disks = nr_disks;
   1547	sb->active_disks = active;
   1548	sb->working_disks = working;
   1549	sb->failed_disks = failed;
   1550	sb->spare_disks = spare;
   1551
   1552	sb->this_disk = sb->disks[rdev->desc_nr];
   1553	sb->sb_csum = calc_sb_csum(sb);
   1554}
   1555
   1556/*
   1557 * rdev_size_change for 0.90.0
   1558 */
   1559static unsigned long long
   1560super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
   1561{
   1562	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
   1563		return 0; /* component must fit device */
   1564	if (rdev->mddev->bitmap_info.offset)
   1565		return 0; /* can't move bitmap */
   1566	rdev->sb_start = calc_dev_sboffset(rdev);
   1567	if (!num_sectors || num_sectors > rdev->sb_start)
   1568		num_sectors = rdev->sb_start;
   1569	/* Limit to 4TB as metadata cannot record more than that.
   1570	 * 4TB == 2^32 KB, or 2*2^32 sectors.
   1571	 */
   1572	if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
   1573		num_sectors = (sector_t)(2ULL << 32) - 2;
   1574	do {
   1575		md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
   1576		       rdev->sb_page);
   1577	} while (md_super_wait(rdev->mddev) < 0);
   1578	return num_sectors;
   1579}
   1580
   1581static int
   1582super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
   1583{
   1584	/* non-zero offset changes not possible with v0.90 */
   1585	return new_offset == 0;
   1586}
   1587
   1588/*
   1589 * version 1 superblock
   1590 */
   1591
   1592static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
   1593{
   1594	__le32 disk_csum;
   1595	u32 csum;
   1596	unsigned long long newcsum;
   1597	int size = 256 + le32_to_cpu(sb->max_dev)*2;
   1598	__le32 *isuper = (__le32*)sb;
   1599
   1600	disk_csum = sb->sb_csum;
   1601	sb->sb_csum = 0;
   1602	newcsum = 0;
   1603	for (; size >= 4; size -= 4)
   1604		newcsum += le32_to_cpu(*isuper++);
   1605
   1606	if (size == 2)
   1607		newcsum += le16_to_cpu(*(__le16*) isuper);
   1608
   1609	csum = (newcsum & 0xffffffff) + (newcsum >> 32);
   1610	sb->sb_csum = disk_csum;
   1611	return cpu_to_le32(csum);
   1612}
   1613
   1614static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
   1615{
   1616	struct mdp_superblock_1 *sb;
   1617	int ret;
   1618	sector_t sb_start;
   1619	sector_t sectors;
   1620	int bmask;
   1621	bool spare_disk = true;
   1622
   1623	/*
   1624	 * Calculate the position of the superblock in 512byte sectors.
   1625	 * It is always aligned to a 4K boundary and
   1626	 * depeding on minor_version, it can be:
   1627	 * 0: At least 8K, but less than 12K, from end of device
   1628	 * 1: At start of device
   1629	 * 2: 4K from start of device.
   1630	 */
   1631	switch(minor_version) {
   1632	case 0:
   1633		sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
   1634		sb_start &= ~(sector_t)(4*2-1);
   1635		break;
   1636	case 1:
   1637		sb_start = 0;
   1638		break;
   1639	case 2:
   1640		sb_start = 8;
   1641		break;
   1642	default:
   1643		return -EINVAL;
   1644	}
   1645	rdev->sb_start = sb_start;
   1646
   1647	/* superblock is rarely larger than 1K, but it can be larger,
   1648	 * and it is safe to read 4k, so we do that
   1649	 */
   1650	ret = read_disk_sb(rdev, 4096);
   1651	if (ret) return ret;
   1652
   1653	sb = page_address(rdev->sb_page);
   1654
   1655	if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
   1656	    sb->major_version != cpu_to_le32(1) ||
   1657	    le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
   1658	    le64_to_cpu(sb->super_offset) != rdev->sb_start ||
   1659	    (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
   1660		return -EINVAL;
   1661
   1662	if (calc_sb_1_csum(sb) != sb->sb_csum) {
   1663		pr_warn("md: invalid superblock checksum on %pg\n",
   1664			rdev->bdev);
   1665		return -EINVAL;
   1666	}
   1667	if (le64_to_cpu(sb->data_size) < 10) {
   1668		pr_warn("md: data_size too small on %pg\n",
   1669			rdev->bdev);
   1670		return -EINVAL;
   1671	}
   1672	if (sb->pad0 ||
   1673	    sb->pad3[0] ||
   1674	    memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
   1675		/* Some padding is non-zero, might be a new feature */
   1676		return -EINVAL;
   1677
   1678	rdev->preferred_minor = 0xffff;
   1679	rdev->data_offset = le64_to_cpu(sb->data_offset);
   1680	rdev->new_data_offset = rdev->data_offset;
   1681	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
   1682	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
   1683		rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
   1684	atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
   1685
   1686	rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
   1687	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
   1688	if (rdev->sb_size & bmask)
   1689		rdev->sb_size = (rdev->sb_size | bmask) + 1;
   1690
   1691	if (minor_version
   1692	    && rdev->data_offset < sb_start + (rdev->sb_size/512))
   1693		return -EINVAL;
   1694	if (minor_version
   1695	    && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
   1696		return -EINVAL;
   1697
   1698	if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
   1699		rdev->desc_nr = -1;
   1700	else
   1701		rdev->desc_nr = le32_to_cpu(sb->dev_number);
   1702
   1703	if (!rdev->bb_page) {
   1704		rdev->bb_page = alloc_page(GFP_KERNEL);
   1705		if (!rdev->bb_page)
   1706			return -ENOMEM;
   1707	}
   1708	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
   1709	    rdev->badblocks.count == 0) {
   1710		/* need to load the bad block list.
   1711		 * Currently we limit it to one page.
   1712		 */
   1713		s32 offset;
   1714		sector_t bb_sector;
   1715		__le64 *bbp;
   1716		int i;
   1717		int sectors = le16_to_cpu(sb->bblog_size);
   1718		if (sectors > (PAGE_SIZE / 512))
   1719			return -EINVAL;
   1720		offset = le32_to_cpu(sb->bblog_offset);
   1721		if (offset == 0)
   1722			return -EINVAL;
   1723		bb_sector = (long long)offset;
   1724		if (!sync_page_io(rdev, bb_sector, sectors << 9,
   1725				  rdev->bb_page, REQ_OP_READ, 0, true))
   1726			return -EIO;
   1727		bbp = (__le64 *)page_address(rdev->bb_page);
   1728		rdev->badblocks.shift = sb->bblog_shift;
   1729		for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
   1730			u64 bb = le64_to_cpu(*bbp);
   1731			int count = bb & (0x3ff);
   1732			u64 sector = bb >> 10;
   1733			sector <<= sb->bblog_shift;
   1734			count <<= sb->bblog_shift;
   1735			if (bb + 1 == 0)
   1736				break;
   1737			if (badblocks_set(&rdev->badblocks, sector, count, 1))
   1738				return -EINVAL;
   1739		}
   1740	} else if (sb->bblog_offset != 0)
   1741		rdev->badblocks.shift = 0;
   1742
   1743	if ((le32_to_cpu(sb->feature_map) &
   1744	    (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
   1745		rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
   1746		rdev->ppl.size = le16_to_cpu(sb->ppl.size);
   1747		rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
   1748	}
   1749
   1750	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
   1751	    sb->level != 0)
   1752		return -EINVAL;
   1753
   1754	/* not spare disk, or LEVEL_MULTIPATH */
   1755	if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
   1756		(rdev->desc_nr >= 0 &&
   1757		rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
   1758		(le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
   1759		 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
   1760		spare_disk = false;
   1761
   1762	if (!refdev) {
   1763		if (!spare_disk)
   1764			ret = 1;
   1765		else
   1766			ret = 0;
   1767	} else {
   1768		__u64 ev1, ev2;
   1769		struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
   1770
   1771		if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
   1772		    sb->level != refsb->level ||
   1773		    sb->layout != refsb->layout ||
   1774		    sb->chunksize != refsb->chunksize) {
   1775			pr_warn("md: %pg has strangely different superblock to %pg\n",
   1776				rdev->bdev,
   1777				refdev->bdev);
   1778			return -EINVAL;
   1779		}
   1780		ev1 = le64_to_cpu(sb->events);
   1781		ev2 = le64_to_cpu(refsb->events);
   1782
   1783		if (!spare_disk && ev1 > ev2)
   1784			ret = 1;
   1785		else
   1786			ret = 0;
   1787	}
   1788	if (minor_version)
   1789		sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
   1790	else
   1791		sectors = rdev->sb_start;
   1792	if (sectors < le64_to_cpu(sb->data_size))
   1793		return -EINVAL;
   1794	rdev->sectors = le64_to_cpu(sb->data_size);
   1795	return ret;
   1796}
   1797
   1798static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
   1799{
   1800	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
   1801	__u64 ev1 = le64_to_cpu(sb->events);
   1802
   1803	rdev->raid_disk = -1;
   1804	clear_bit(Faulty, &rdev->flags);
   1805	clear_bit(In_sync, &rdev->flags);
   1806	clear_bit(Bitmap_sync, &rdev->flags);
   1807	clear_bit(WriteMostly, &rdev->flags);
   1808
   1809	if (mddev->raid_disks == 0) {
   1810		mddev->major_version = 1;
   1811		mddev->patch_version = 0;
   1812		mddev->external = 0;
   1813		mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
   1814		mddev->ctime = le64_to_cpu(sb->ctime);
   1815		mddev->utime = le64_to_cpu(sb->utime);
   1816		mddev->level = le32_to_cpu(sb->level);
   1817		mddev->clevel[0] = 0;
   1818		mddev->layout = le32_to_cpu(sb->layout);
   1819		mddev->raid_disks = le32_to_cpu(sb->raid_disks);
   1820		mddev->dev_sectors = le64_to_cpu(sb->size);
   1821		mddev->events = ev1;
   1822		mddev->bitmap_info.offset = 0;
   1823		mddev->bitmap_info.space = 0;
   1824		/* Default location for bitmap is 1K after superblock
   1825		 * using 3K - total of 4K
   1826		 */
   1827		mddev->bitmap_info.default_offset = 1024 >> 9;
   1828		mddev->bitmap_info.default_space = (4096-1024) >> 9;
   1829		mddev->reshape_backwards = 0;
   1830
   1831		mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
   1832		memcpy(mddev->uuid, sb->set_uuid, 16);
   1833
   1834		mddev->max_disks =  (4096-256)/2;
   1835
   1836		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
   1837		    mddev->bitmap_info.file == NULL) {
   1838			mddev->bitmap_info.offset =
   1839				(__s32)le32_to_cpu(sb->bitmap_offset);
   1840			/* Metadata doesn't record how much space is available.
   1841			 * For 1.0, we assume we can use up to the superblock
   1842			 * if before, else to 4K beyond superblock.
   1843			 * For others, assume no change is possible.
   1844			 */
   1845			if (mddev->minor_version > 0)
   1846				mddev->bitmap_info.space = 0;
   1847			else if (mddev->bitmap_info.offset > 0)
   1848				mddev->bitmap_info.space =
   1849					8 - mddev->bitmap_info.offset;
   1850			else
   1851				mddev->bitmap_info.space =
   1852					-mddev->bitmap_info.offset;
   1853		}
   1854
   1855		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
   1856			mddev->reshape_position = le64_to_cpu(sb->reshape_position);
   1857			mddev->delta_disks = le32_to_cpu(sb->delta_disks);
   1858			mddev->new_level = le32_to_cpu(sb->new_level);
   1859			mddev->new_layout = le32_to_cpu(sb->new_layout);
   1860			mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
   1861			if (mddev->delta_disks < 0 ||
   1862			    (mddev->delta_disks == 0 &&
   1863			     (le32_to_cpu(sb->feature_map)
   1864			      & MD_FEATURE_RESHAPE_BACKWARDS)))
   1865				mddev->reshape_backwards = 1;
   1866		} else {
   1867			mddev->reshape_position = MaxSector;
   1868			mddev->delta_disks = 0;
   1869			mddev->new_level = mddev->level;
   1870			mddev->new_layout = mddev->layout;
   1871			mddev->new_chunk_sectors = mddev->chunk_sectors;
   1872		}
   1873
   1874		if (mddev->level == 0 &&
   1875		    !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
   1876			mddev->layout = -1;
   1877
   1878		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
   1879			set_bit(MD_HAS_JOURNAL, &mddev->flags);
   1880
   1881		if (le32_to_cpu(sb->feature_map) &
   1882		    (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
   1883			if (le32_to_cpu(sb->feature_map) &
   1884			    (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
   1885				return -EINVAL;
   1886			if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
   1887			    (le32_to_cpu(sb->feature_map) &
   1888					    MD_FEATURE_MULTIPLE_PPLS))
   1889				return -EINVAL;
   1890			set_bit(MD_HAS_PPL, &mddev->flags);
   1891		}
   1892	} else if (mddev->pers == NULL) {
   1893		/* Insist of good event counter while assembling, except for
   1894		 * spares (which don't need an event count) */
   1895		++ev1;
   1896		if (rdev->desc_nr >= 0 &&
   1897		    rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
   1898		    (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
   1899		     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
   1900			if (ev1 < mddev->events)
   1901				return -EINVAL;
   1902	} else if (mddev->bitmap) {
   1903		/* If adding to array with a bitmap, then we can accept an
   1904		 * older device, but not too old.
   1905		 */
   1906		if (ev1 < mddev->bitmap->events_cleared)
   1907			return 0;
   1908		if (ev1 < mddev->events)
   1909			set_bit(Bitmap_sync, &rdev->flags);
   1910	} else {
   1911		if (ev1 < mddev->events)
   1912			/* just a hot-add of a new device, leave raid_disk at -1 */
   1913			return 0;
   1914	}
   1915	if (mddev->level != LEVEL_MULTIPATH) {
   1916		int role;
   1917		if (rdev->desc_nr < 0 ||
   1918		    rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
   1919			role = MD_DISK_ROLE_SPARE;
   1920			rdev->desc_nr = -1;
   1921		} else
   1922			role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
   1923		switch(role) {
   1924		case MD_DISK_ROLE_SPARE: /* spare */
   1925			break;
   1926		case MD_DISK_ROLE_FAULTY: /* faulty */
   1927			set_bit(Faulty, &rdev->flags);
   1928			break;
   1929		case MD_DISK_ROLE_JOURNAL: /* journal device */
   1930			if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
   1931				/* journal device without journal feature */
   1932				pr_warn("md: journal device provided without journal feature, ignoring the device\n");
   1933				return -EINVAL;
   1934			}
   1935			set_bit(Journal, &rdev->flags);
   1936			rdev->journal_tail = le64_to_cpu(sb->journal_tail);
   1937			rdev->raid_disk = 0;
   1938			break;
   1939		default:
   1940			rdev->saved_raid_disk = role;
   1941			if ((le32_to_cpu(sb->feature_map) &
   1942			     MD_FEATURE_RECOVERY_OFFSET)) {
   1943				rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
   1944				if (!(le32_to_cpu(sb->feature_map) &
   1945				      MD_FEATURE_RECOVERY_BITMAP))
   1946					rdev->saved_raid_disk = -1;
   1947			} else {
   1948				/*
   1949				 * If the array is FROZEN, then the device can't
   1950				 * be in_sync with rest of array.
   1951				 */
   1952				if (!test_bit(MD_RECOVERY_FROZEN,
   1953					      &mddev->recovery))
   1954					set_bit(In_sync, &rdev->flags);
   1955			}
   1956			rdev->raid_disk = role;
   1957			break;
   1958		}
   1959		if (sb->devflags & WriteMostly1)
   1960			set_bit(WriteMostly, &rdev->flags);
   1961		if (sb->devflags & FailFast1)
   1962			set_bit(FailFast, &rdev->flags);
   1963		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
   1964			set_bit(Replacement, &rdev->flags);
   1965	} else /* MULTIPATH are always insync */
   1966		set_bit(In_sync, &rdev->flags);
   1967
   1968	return 0;
   1969}
   1970
   1971static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
   1972{
   1973	struct mdp_superblock_1 *sb;
   1974	struct md_rdev *rdev2;
   1975	int max_dev, i;
   1976	/* make rdev->sb match mddev and rdev data. */
   1977
   1978	sb = page_address(rdev->sb_page);
   1979
   1980	sb->feature_map = 0;
   1981	sb->pad0 = 0;
   1982	sb->recovery_offset = cpu_to_le64(0);
   1983	memset(sb->pad3, 0, sizeof(sb->pad3));
   1984
   1985	sb->utime = cpu_to_le64((__u64)mddev->utime);
   1986	sb->events = cpu_to_le64(mddev->events);
   1987	if (mddev->in_sync)
   1988		sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
   1989	else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
   1990		sb->resync_offset = cpu_to_le64(MaxSector);
   1991	else
   1992		sb->resync_offset = cpu_to_le64(0);
   1993
   1994	sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
   1995
   1996	sb->raid_disks = cpu_to_le32(mddev->raid_disks);
   1997	sb->size = cpu_to_le64(mddev->dev_sectors);
   1998	sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
   1999	sb->level = cpu_to_le32(mddev->level);
   2000	sb->layout = cpu_to_le32(mddev->layout);
   2001	if (test_bit(FailFast, &rdev->flags))
   2002		sb->devflags |= FailFast1;
   2003	else
   2004		sb->devflags &= ~FailFast1;
   2005
   2006	if (test_bit(WriteMostly, &rdev->flags))
   2007		sb->devflags |= WriteMostly1;
   2008	else
   2009		sb->devflags &= ~WriteMostly1;
   2010	sb->data_offset = cpu_to_le64(rdev->data_offset);
   2011	sb->data_size = cpu_to_le64(rdev->sectors);
   2012
   2013	if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
   2014		sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
   2015		sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
   2016	}
   2017
   2018	if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
   2019	    !test_bit(In_sync, &rdev->flags)) {
   2020		sb->feature_map |=
   2021			cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
   2022		sb->recovery_offset =
   2023			cpu_to_le64(rdev->recovery_offset);
   2024		if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
   2025			sb->feature_map |=
   2026				cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
   2027	}
   2028	/* Note: recovery_offset and journal_tail share space  */
   2029	if (test_bit(Journal, &rdev->flags))
   2030		sb->journal_tail = cpu_to_le64(rdev->journal_tail);
   2031	if (test_bit(Replacement, &rdev->flags))
   2032		sb->feature_map |=
   2033			cpu_to_le32(MD_FEATURE_REPLACEMENT);
   2034
   2035	if (mddev->reshape_position != MaxSector) {
   2036		sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
   2037		sb->reshape_position = cpu_to_le64(mddev->reshape_position);
   2038		sb->new_layout = cpu_to_le32(mddev->new_layout);
   2039		sb->delta_disks = cpu_to_le32(mddev->delta_disks);
   2040		sb->new_level = cpu_to_le32(mddev->new_level);
   2041		sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
   2042		if (mddev->delta_disks == 0 &&
   2043		    mddev->reshape_backwards)
   2044			sb->feature_map
   2045				|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
   2046		if (rdev->new_data_offset != rdev->data_offset) {
   2047			sb->feature_map
   2048				|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
   2049			sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
   2050							     - rdev->data_offset));
   2051		}
   2052	}
   2053
   2054	if (mddev_is_clustered(mddev))
   2055		sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
   2056
   2057	if (rdev->badblocks.count == 0)
   2058		/* Nothing to do for bad blocks*/ ;
   2059	else if (sb->bblog_offset == 0)
   2060		/* Cannot record bad blocks on this device */
   2061		md_error(mddev, rdev);
   2062	else {
   2063		struct badblocks *bb = &rdev->badblocks;
   2064		__le64 *bbp = (__le64 *)page_address(rdev->bb_page);
   2065		u64 *p = bb->page;
   2066		sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
   2067		if (bb->changed) {
   2068			unsigned seq;
   2069
   2070retry:
   2071			seq = read_seqbegin(&bb->lock);
   2072
   2073			memset(bbp, 0xff, PAGE_SIZE);
   2074
   2075			for (i = 0 ; i < bb->count ; i++) {
   2076				u64 internal_bb = p[i];
   2077				u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
   2078						| BB_LEN(internal_bb));
   2079				bbp[i] = cpu_to_le64(store_bb);
   2080			}
   2081			bb->changed = 0;
   2082			if (read_seqretry(&bb->lock, seq))
   2083				goto retry;
   2084
   2085			bb->sector = (rdev->sb_start +
   2086				      (int)le32_to_cpu(sb->bblog_offset));
   2087			bb->size = le16_to_cpu(sb->bblog_size);
   2088		}
   2089	}
   2090
   2091	max_dev = 0;
   2092	rdev_for_each(rdev2, mddev)
   2093		if (rdev2->desc_nr+1 > max_dev)
   2094			max_dev = rdev2->desc_nr+1;
   2095
   2096	if (max_dev > le32_to_cpu(sb->max_dev)) {
   2097		int bmask;
   2098		sb->max_dev = cpu_to_le32(max_dev);
   2099		rdev->sb_size = max_dev * 2 + 256;
   2100		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
   2101		if (rdev->sb_size & bmask)
   2102			rdev->sb_size = (rdev->sb_size | bmask) + 1;
   2103	} else
   2104		max_dev = le32_to_cpu(sb->max_dev);
   2105
   2106	for (i=0; i<max_dev;i++)
   2107		sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
   2108
   2109	if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
   2110		sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
   2111
   2112	if (test_bit(MD_HAS_PPL, &mddev->flags)) {
   2113		if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
   2114			sb->feature_map |=
   2115			    cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
   2116		else
   2117			sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
   2118		sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
   2119		sb->ppl.size = cpu_to_le16(rdev->ppl.size);
   2120	}
   2121
   2122	rdev_for_each(rdev2, mddev) {
   2123		i = rdev2->desc_nr;
   2124		if (test_bit(Faulty, &rdev2->flags))
   2125			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
   2126		else if (test_bit(In_sync, &rdev2->flags))
   2127			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
   2128		else if (test_bit(Journal, &rdev2->flags))
   2129			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
   2130		else if (rdev2->raid_disk >= 0)
   2131			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
   2132		else
   2133			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
   2134	}
   2135
   2136	sb->sb_csum = calc_sb_1_csum(sb);
   2137}
   2138
   2139static sector_t super_1_choose_bm_space(sector_t dev_size)
   2140{
   2141	sector_t bm_space;
   2142
   2143	/* if the device is bigger than 8Gig, save 64k for bitmap
   2144	 * usage, if bigger than 200Gig, save 128k
   2145	 */
   2146	if (dev_size < 64*2)
   2147		bm_space = 0;
   2148	else if (dev_size - 64*2 >= 200*1024*1024*2)
   2149		bm_space = 128*2;
   2150	else if (dev_size - 4*2 > 8*1024*1024*2)
   2151		bm_space = 64*2;
   2152	else
   2153		bm_space = 4*2;
   2154	return bm_space;
   2155}
   2156
   2157static unsigned long long
   2158super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
   2159{
   2160	struct mdp_superblock_1 *sb;
   2161	sector_t max_sectors;
   2162	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
   2163		return 0; /* component must fit device */
   2164	if (rdev->data_offset != rdev->new_data_offset)
   2165		return 0; /* too confusing */
   2166	if (rdev->sb_start < rdev->data_offset) {
   2167		/* minor versions 1 and 2; superblock before data */
   2168		max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
   2169		if (!num_sectors || num_sectors > max_sectors)
   2170			num_sectors = max_sectors;
   2171	} else if (rdev->mddev->bitmap_info.offset) {
   2172		/* minor version 0 with bitmap we can't move */
   2173		return 0;
   2174	} else {
   2175		/* minor version 0; superblock after data */
   2176		sector_t sb_start, bm_space;
   2177		sector_t dev_size = bdev_nr_sectors(rdev->bdev);
   2178
   2179		/* 8K is for superblock */
   2180		sb_start = dev_size - 8*2;
   2181		sb_start &= ~(sector_t)(4*2 - 1);
   2182
   2183		bm_space = super_1_choose_bm_space(dev_size);
   2184
   2185		/* Space that can be used to store date needs to decrease
   2186		 * superblock bitmap space and bad block space(4K)
   2187		 */
   2188		max_sectors = sb_start - bm_space - 4*2;
   2189
   2190		if (!num_sectors || num_sectors > max_sectors)
   2191			num_sectors = max_sectors;
   2192		rdev->sb_start = sb_start;
   2193	}
   2194	sb = page_address(rdev->sb_page);
   2195	sb->data_size = cpu_to_le64(num_sectors);
   2196	sb->super_offset = cpu_to_le64(rdev->sb_start);
   2197	sb->sb_csum = calc_sb_1_csum(sb);
   2198	do {
   2199		md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
   2200			       rdev->sb_page);
   2201	} while (md_super_wait(rdev->mddev) < 0);
   2202	return num_sectors;
   2203
   2204}
   2205
   2206static int
   2207super_1_allow_new_offset(struct md_rdev *rdev,
   2208			 unsigned long long new_offset)
   2209{
   2210	/* All necessary checks on new >= old have been done */
   2211	struct bitmap *bitmap;
   2212	if (new_offset >= rdev->data_offset)
   2213		return 1;
   2214
   2215	/* with 1.0 metadata, there is no metadata to tread on
   2216	 * so we can always move back */
   2217	if (rdev->mddev->minor_version == 0)
   2218		return 1;
   2219
   2220	/* otherwise we must be sure not to step on
   2221	 * any metadata, so stay:
   2222	 * 36K beyond start of superblock
   2223	 * beyond end of badblocks
   2224	 * beyond write-intent bitmap
   2225	 */
   2226	if (rdev->sb_start + (32+4)*2 > new_offset)
   2227		return 0;
   2228	bitmap = rdev->mddev->bitmap;
   2229	if (bitmap && !rdev->mddev->bitmap_info.file &&
   2230	    rdev->sb_start + rdev->mddev->bitmap_info.offset +
   2231	    bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
   2232		return 0;
   2233	if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
   2234		return 0;
   2235
   2236	return 1;
   2237}
   2238
   2239static struct super_type super_types[] = {
   2240	[0] = {
   2241		.name	= "0.90.0",
   2242		.owner	= THIS_MODULE,
   2243		.load_super	    = super_90_load,
   2244		.validate_super	    = super_90_validate,
   2245		.sync_super	    = super_90_sync,
   2246		.rdev_size_change   = super_90_rdev_size_change,
   2247		.allow_new_offset   = super_90_allow_new_offset,
   2248	},
   2249	[1] = {
   2250		.name	= "md-1",
   2251		.owner	= THIS_MODULE,
   2252		.load_super	    = super_1_load,
   2253		.validate_super	    = super_1_validate,
   2254		.sync_super	    = super_1_sync,
   2255		.rdev_size_change   = super_1_rdev_size_change,
   2256		.allow_new_offset   = super_1_allow_new_offset,
   2257	},
   2258};
   2259
   2260static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
   2261{
   2262	if (mddev->sync_super) {
   2263		mddev->sync_super(mddev, rdev);
   2264		return;
   2265	}
   2266
   2267	BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
   2268
   2269	super_types[mddev->major_version].sync_super(mddev, rdev);
   2270}
   2271
   2272static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
   2273{
   2274	struct md_rdev *rdev, *rdev2;
   2275
   2276	rcu_read_lock();
   2277	rdev_for_each_rcu(rdev, mddev1) {
   2278		if (test_bit(Faulty, &rdev->flags) ||
   2279		    test_bit(Journal, &rdev->flags) ||
   2280		    rdev->raid_disk == -1)
   2281			continue;
   2282		rdev_for_each_rcu(rdev2, mddev2) {
   2283			if (test_bit(Faulty, &rdev2->flags) ||
   2284			    test_bit(Journal, &rdev2->flags) ||
   2285			    rdev2->raid_disk == -1)
   2286				continue;
   2287			if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
   2288				rcu_read_unlock();
   2289				return 1;
   2290			}
   2291		}
   2292	}
   2293	rcu_read_unlock();
   2294	return 0;
   2295}
   2296
   2297static LIST_HEAD(pending_raid_disks);
   2298
   2299/*
   2300 * Try to register data integrity profile for an mddev
   2301 *
   2302 * This is called when an array is started and after a disk has been kicked
   2303 * from the array. It only succeeds if all working and active component devices
   2304 * are integrity capable with matching profiles.
   2305 */
   2306int md_integrity_register(struct mddev *mddev)
   2307{
   2308	struct md_rdev *rdev, *reference = NULL;
   2309
   2310	if (list_empty(&mddev->disks))
   2311		return 0; /* nothing to do */
   2312	if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
   2313		return 0; /* shouldn't register, or already is */
   2314	rdev_for_each(rdev, mddev) {
   2315		/* skip spares and non-functional disks */
   2316		if (test_bit(Faulty, &rdev->flags))
   2317			continue;
   2318		if (rdev->raid_disk < 0)
   2319			continue;
   2320		if (!reference) {
   2321			/* Use the first rdev as the reference */
   2322			reference = rdev;
   2323			continue;
   2324		}
   2325		/* does this rdev's profile match the reference profile? */
   2326		if (blk_integrity_compare(reference->bdev->bd_disk,
   2327				rdev->bdev->bd_disk) < 0)
   2328			return -EINVAL;
   2329	}
   2330	if (!reference || !bdev_get_integrity(reference->bdev))
   2331		return 0;
   2332	/*
   2333	 * All component devices are integrity capable and have matching
   2334	 * profiles, register the common profile for the md device.
   2335	 */
   2336	blk_integrity_register(mddev->gendisk,
   2337			       bdev_get_integrity(reference->bdev));
   2338
   2339	pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
   2340	if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
   2341	    (mddev->level != 1 && mddev->level != 10 &&
   2342	     bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) {
   2343		/*
   2344		 * No need to handle the failure of bioset_integrity_create,
   2345		 * because the function is called by md_run() -> pers->run(),
   2346		 * md_run calls bioset_exit -> bioset_integrity_free in case
   2347		 * of failure case.
   2348		 */
   2349		pr_err("md: failed to create integrity pool for %s\n",
   2350		       mdname(mddev));
   2351		return -EINVAL;
   2352	}
   2353	return 0;
   2354}
   2355EXPORT_SYMBOL(md_integrity_register);
   2356
   2357/*
   2358 * Attempt to add an rdev, but only if it is consistent with the current
   2359 * integrity profile
   2360 */
   2361int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
   2362{
   2363	struct blk_integrity *bi_mddev;
   2364
   2365	if (!mddev->gendisk)
   2366		return 0;
   2367
   2368	bi_mddev = blk_get_integrity(mddev->gendisk);
   2369
   2370	if (!bi_mddev) /* nothing to do */
   2371		return 0;
   2372
   2373	if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
   2374		pr_err("%s: incompatible integrity profile for %pg\n",
   2375		       mdname(mddev), rdev->bdev);
   2376		return -ENXIO;
   2377	}
   2378
   2379	return 0;
   2380}
   2381EXPORT_SYMBOL(md_integrity_add_rdev);
   2382
   2383static bool rdev_read_only(struct md_rdev *rdev)
   2384{
   2385	return bdev_read_only(rdev->bdev) ||
   2386		(rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
   2387}
   2388
   2389static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
   2390{
   2391	char b[BDEVNAME_SIZE];
   2392	int err;
   2393
   2394	/* prevent duplicates */
   2395	if (find_rdev(mddev, rdev->bdev->bd_dev))
   2396		return -EEXIST;
   2397
   2398	if (rdev_read_only(rdev) && mddev->pers)
   2399		return -EROFS;
   2400
   2401	/* make sure rdev->sectors exceeds mddev->dev_sectors */
   2402	if (!test_bit(Journal, &rdev->flags) &&
   2403	    rdev->sectors &&
   2404	    (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
   2405		if (mddev->pers) {
   2406			/* Cannot change size, so fail
   2407			 * If mddev->level <= 0, then we don't care
   2408			 * about aligning sizes (e.g. linear)
   2409			 */
   2410			if (mddev->level > 0)
   2411				return -ENOSPC;
   2412		} else
   2413			mddev->dev_sectors = rdev->sectors;
   2414	}
   2415
   2416	/* Verify rdev->desc_nr is unique.
   2417	 * If it is -1, assign a free number, else
   2418	 * check number is not in use
   2419	 */
   2420	rcu_read_lock();
   2421	if (rdev->desc_nr < 0) {
   2422		int choice = 0;
   2423		if (mddev->pers)
   2424			choice = mddev->raid_disks;
   2425		while (md_find_rdev_nr_rcu(mddev, choice))
   2426			choice++;
   2427		rdev->desc_nr = choice;
   2428	} else {
   2429		if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
   2430			rcu_read_unlock();
   2431			return -EBUSY;
   2432		}
   2433	}
   2434	rcu_read_unlock();
   2435	if (!test_bit(Journal, &rdev->flags) &&
   2436	    mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
   2437		pr_warn("md: %s: array is limited to %d devices\n",
   2438			mdname(mddev), mddev->max_disks);
   2439		return -EBUSY;
   2440	}
   2441	bdevname(rdev->bdev,b);
   2442	strreplace(b, '/', '!');
   2443
   2444	rdev->mddev = mddev;
   2445	pr_debug("md: bind<%s>\n", b);
   2446
   2447	if (mddev->raid_disks)
   2448		mddev_create_serial_pool(mddev, rdev, false);
   2449
   2450	if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
   2451		goto fail;
   2452
   2453	/* failure here is OK */
   2454	err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
   2455	rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
   2456	rdev->sysfs_unack_badblocks =
   2457		sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
   2458	rdev->sysfs_badblocks =
   2459		sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
   2460
   2461	list_add_rcu(&rdev->same_set, &mddev->disks);
   2462	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
   2463
   2464	/* May as well allow recovery to be retried once */
   2465	mddev->recovery_disabled++;
   2466
   2467	return 0;
   2468
   2469 fail:
   2470	pr_warn("md: failed to register dev-%s for %s\n",
   2471		b, mdname(mddev));
   2472	return err;
   2473}
   2474
   2475static void rdev_delayed_delete(struct work_struct *ws)
   2476{
   2477	struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
   2478	kobject_del(&rdev->kobj);
   2479	kobject_put(&rdev->kobj);
   2480}
   2481
   2482static void unbind_rdev_from_array(struct md_rdev *rdev)
   2483{
   2484	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
   2485	list_del_rcu(&rdev->same_set);
   2486	pr_debug("md: unbind<%pg>\n", rdev->bdev);
   2487	mddev_destroy_serial_pool(rdev->mddev, rdev, false);
   2488	rdev->mddev = NULL;
   2489	sysfs_remove_link(&rdev->kobj, "block");
   2490	sysfs_put(rdev->sysfs_state);
   2491	sysfs_put(rdev->sysfs_unack_badblocks);
   2492	sysfs_put(rdev->sysfs_badblocks);
   2493	rdev->sysfs_state = NULL;
   2494	rdev->sysfs_unack_badblocks = NULL;
   2495	rdev->sysfs_badblocks = NULL;
   2496	rdev->badblocks.count = 0;
   2497	/* We need to delay this, otherwise we can deadlock when
   2498	 * writing to 'remove' to "dev/state".  We also need
   2499	 * to delay it due to rcu usage.
   2500	 */
   2501	synchronize_rcu();
   2502	INIT_WORK(&rdev->del_work, rdev_delayed_delete);
   2503	kobject_get(&rdev->kobj);
   2504	queue_work(md_rdev_misc_wq, &rdev->del_work);
   2505}
   2506
   2507/*
   2508 * prevent the device from being mounted, repartitioned or
   2509 * otherwise reused by a RAID array (or any other kernel
   2510 * subsystem), by bd_claiming the device.
   2511 */
   2512static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
   2513{
   2514	int err = 0;
   2515	struct block_device *bdev;
   2516
   2517	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
   2518				 shared ? (struct md_rdev *)lock_rdev : rdev);
   2519	if (IS_ERR(bdev)) {
   2520		pr_warn("md: could not open device unknown-block(%u,%u).\n",
   2521			MAJOR(dev), MINOR(dev));
   2522		return PTR_ERR(bdev);
   2523	}
   2524	rdev->bdev = bdev;
   2525	return err;
   2526}
   2527
   2528static void unlock_rdev(struct md_rdev *rdev)
   2529{
   2530	struct block_device *bdev = rdev->bdev;
   2531	rdev->bdev = NULL;
   2532	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
   2533}
   2534
   2535void md_autodetect_dev(dev_t dev);
   2536
   2537static void export_rdev(struct md_rdev *rdev)
   2538{
   2539	pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
   2540	md_rdev_clear(rdev);
   2541#ifndef MODULE
   2542	if (test_bit(AutoDetected, &rdev->flags))
   2543		md_autodetect_dev(rdev->bdev->bd_dev);
   2544#endif
   2545	unlock_rdev(rdev);
   2546	kobject_put(&rdev->kobj);
   2547}
   2548
   2549void md_kick_rdev_from_array(struct md_rdev *rdev)
   2550{
   2551	unbind_rdev_from_array(rdev);
   2552	export_rdev(rdev);
   2553}
   2554EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
   2555
   2556static void export_array(struct mddev *mddev)
   2557{
   2558	struct md_rdev *rdev;
   2559
   2560	while (!list_empty(&mddev->disks)) {
   2561		rdev = list_first_entry(&mddev->disks, struct md_rdev,
   2562					same_set);
   2563		md_kick_rdev_from_array(rdev);
   2564	}
   2565	mddev->raid_disks = 0;
   2566	mddev->major_version = 0;
   2567}
   2568
   2569static bool set_in_sync(struct mddev *mddev)
   2570{
   2571	lockdep_assert_held(&mddev->lock);
   2572	if (!mddev->in_sync) {
   2573		mddev->sync_checkers++;
   2574		spin_unlock(&mddev->lock);
   2575		percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
   2576		spin_lock(&mddev->lock);
   2577		if (!mddev->in_sync &&
   2578		    percpu_ref_is_zero(&mddev->writes_pending)) {
   2579			mddev->in_sync = 1;
   2580			/*
   2581			 * Ensure ->in_sync is visible before we clear
   2582			 * ->sync_checkers.
   2583			 */
   2584			smp_mb();
   2585			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
   2586			sysfs_notify_dirent_safe(mddev->sysfs_state);
   2587		}
   2588		if (--mddev->sync_checkers == 0)
   2589			percpu_ref_switch_to_percpu(&mddev->writes_pending);
   2590	}
   2591	if (mddev->safemode == 1)
   2592		mddev->safemode = 0;
   2593	return mddev->in_sync;
   2594}
   2595
   2596static void sync_sbs(struct mddev *mddev, int nospares)
   2597{
   2598	/* Update each superblock (in-memory image), but
   2599	 * if we are allowed to, skip spares which already
   2600	 * have the right event counter, or have one earlier
   2601	 * (which would mean they aren't being marked as dirty
   2602	 * with the rest of the array)
   2603	 */
   2604	struct md_rdev *rdev;
   2605	rdev_for_each(rdev, mddev) {
   2606		if (rdev->sb_events == mddev->events ||
   2607		    (nospares &&
   2608		     rdev->raid_disk < 0 &&
   2609		     rdev->sb_events+1 == mddev->events)) {
   2610			/* Don't update this superblock */
   2611			rdev->sb_loaded = 2;
   2612		} else {
   2613			sync_super(mddev, rdev);
   2614			rdev->sb_loaded = 1;
   2615		}
   2616	}
   2617}
   2618
   2619static bool does_sb_need_changing(struct mddev *mddev)
   2620{
   2621	struct md_rdev *rdev = NULL, *iter;
   2622	struct mdp_superblock_1 *sb;
   2623	int role;
   2624
   2625	/* Find a good rdev */
   2626	rdev_for_each(iter, mddev)
   2627		if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
   2628			rdev = iter;
   2629			break;
   2630		}
   2631
   2632	/* No good device found. */
   2633	if (!rdev)
   2634		return false;
   2635
   2636	sb = page_address(rdev->sb_page);
   2637	/* Check if a device has become faulty or a spare become active */
   2638	rdev_for_each(rdev, mddev) {
   2639		role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
   2640		/* Device activated? */
   2641		if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 &&
   2642		    !test_bit(Faulty, &rdev->flags))
   2643			return true;
   2644		/* Device turned faulty? */
   2645		if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX))
   2646			return true;
   2647	}
   2648
   2649	/* Check if any mddev parameters have changed */
   2650	if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
   2651	    (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
   2652	    (mddev->layout != le32_to_cpu(sb->layout)) ||
   2653	    (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
   2654	    (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
   2655		return true;
   2656
   2657	return false;
   2658}
   2659
   2660void md_update_sb(struct mddev *mddev, int force_change)
   2661{
   2662	struct md_rdev *rdev;
   2663	int sync_req;
   2664	int nospares = 0;
   2665	int any_badblocks_changed = 0;
   2666	int ret = -1;
   2667
   2668	if (mddev->ro) {
   2669		if (force_change)
   2670			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   2671		return;
   2672	}
   2673
   2674repeat:
   2675	if (mddev_is_clustered(mddev)) {
   2676		if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
   2677			force_change = 1;
   2678		if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
   2679			nospares = 1;
   2680		ret = md_cluster_ops->metadata_update_start(mddev);
   2681		/* Has someone else has updated the sb */
   2682		if (!does_sb_need_changing(mddev)) {
   2683			if (ret == 0)
   2684				md_cluster_ops->metadata_update_cancel(mddev);
   2685			bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
   2686							 BIT(MD_SB_CHANGE_DEVS) |
   2687							 BIT(MD_SB_CHANGE_CLEAN));
   2688			return;
   2689		}
   2690	}
   2691
   2692	/*
   2693	 * First make sure individual recovery_offsets are correct
   2694	 * curr_resync_completed can only be used during recovery.
   2695	 * During reshape/resync it might use array-addresses rather
   2696	 * that device addresses.
   2697	 */
   2698	rdev_for_each(rdev, mddev) {
   2699		if (rdev->raid_disk >= 0 &&
   2700		    mddev->delta_disks >= 0 &&
   2701		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
   2702		    test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
   2703		    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
   2704		    !test_bit(Journal, &rdev->flags) &&
   2705		    !test_bit(In_sync, &rdev->flags) &&
   2706		    mddev->curr_resync_completed > rdev->recovery_offset)
   2707				rdev->recovery_offset = mddev->curr_resync_completed;
   2708
   2709	}
   2710	if (!mddev->persistent) {
   2711		clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
   2712		clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   2713		if (!mddev->external) {
   2714			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
   2715			rdev_for_each(rdev, mddev) {
   2716				if (rdev->badblocks.changed) {
   2717					rdev->badblocks.changed = 0;
   2718					ack_all_badblocks(&rdev->badblocks);
   2719					md_error(mddev, rdev);
   2720				}
   2721				clear_bit(Blocked, &rdev->flags);
   2722				clear_bit(BlockedBadBlocks, &rdev->flags);
   2723				wake_up(&rdev->blocked_wait);
   2724			}
   2725		}
   2726		wake_up(&mddev->sb_wait);
   2727		return;
   2728	}
   2729
   2730	spin_lock(&mddev->lock);
   2731
   2732	mddev->utime = ktime_get_real_seconds();
   2733
   2734	if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
   2735		force_change = 1;
   2736	if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
   2737		/* just a clean<-> dirty transition, possibly leave spares alone,
   2738		 * though if events isn't the right even/odd, we will have to do
   2739		 * spares after all
   2740		 */
   2741		nospares = 1;
   2742	if (force_change)
   2743		nospares = 0;
   2744	if (mddev->degraded)
   2745		/* If the array is degraded, then skipping spares is both
   2746		 * dangerous and fairly pointless.
   2747		 * Dangerous because a device that was removed from the array
   2748		 * might have a event_count that still looks up-to-date,
   2749		 * so it can be re-added without a resync.
   2750		 * Pointless because if there are any spares to skip,
   2751		 * then a recovery will happen and soon that array won't
   2752		 * be degraded any more and the spare can go back to sleep then.
   2753		 */
   2754		nospares = 0;
   2755
   2756	sync_req = mddev->in_sync;
   2757
   2758	/* If this is just a dirty<->clean transition, and the array is clean
   2759	 * and 'events' is odd, we can roll back to the previous clean state */
   2760	if (nospares
   2761	    && (mddev->in_sync && mddev->recovery_cp == MaxSector)
   2762	    && mddev->can_decrease_events
   2763	    && mddev->events != 1) {
   2764		mddev->events--;
   2765		mddev->can_decrease_events = 0;
   2766	} else {
   2767		/* otherwise we have to go forward and ... */
   2768		mddev->events ++;
   2769		mddev->can_decrease_events = nospares;
   2770	}
   2771
   2772	/*
   2773	 * This 64-bit counter should never wrap.
   2774	 * Either we are in around ~1 trillion A.C., assuming
   2775	 * 1 reboot per second, or we have a bug...
   2776	 */
   2777	WARN_ON(mddev->events == 0);
   2778
   2779	rdev_for_each(rdev, mddev) {
   2780		if (rdev->badblocks.changed)
   2781			any_badblocks_changed++;
   2782		if (test_bit(Faulty, &rdev->flags))
   2783			set_bit(FaultRecorded, &rdev->flags);
   2784	}
   2785
   2786	sync_sbs(mddev, nospares);
   2787	spin_unlock(&mddev->lock);
   2788
   2789	pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
   2790		 mdname(mddev), mddev->in_sync);
   2791
   2792	if (mddev->queue)
   2793		blk_add_trace_msg(mddev->queue, "md md_update_sb");
   2794rewrite:
   2795	md_bitmap_update_sb(mddev->bitmap);
   2796	rdev_for_each(rdev, mddev) {
   2797		if (rdev->sb_loaded != 1)
   2798			continue; /* no noise on spare devices */
   2799
   2800		if (!test_bit(Faulty, &rdev->flags)) {
   2801			md_super_write(mddev,rdev,
   2802				       rdev->sb_start, rdev->sb_size,
   2803				       rdev->sb_page);
   2804			pr_debug("md: (write) %pg's sb offset: %llu\n",
   2805				 rdev->bdev,
   2806				 (unsigned long long)rdev->sb_start);
   2807			rdev->sb_events = mddev->events;
   2808			if (rdev->badblocks.size) {
   2809				md_super_write(mddev, rdev,
   2810					       rdev->badblocks.sector,
   2811					       rdev->badblocks.size << 9,
   2812					       rdev->bb_page);
   2813				rdev->badblocks.size = 0;
   2814			}
   2815
   2816		} else
   2817			pr_debug("md: %pg (skipping faulty)\n",
   2818				 rdev->bdev);
   2819
   2820		if (mddev->level == LEVEL_MULTIPATH)
   2821			/* only need to write one superblock... */
   2822			break;
   2823	}
   2824	if (md_super_wait(mddev) < 0)
   2825		goto rewrite;
   2826	/* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
   2827
   2828	if (mddev_is_clustered(mddev) && ret == 0)
   2829		md_cluster_ops->metadata_update_finish(mddev);
   2830
   2831	if (mddev->in_sync != sync_req ||
   2832	    !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
   2833			       BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
   2834		/* have to write it out again */
   2835		goto repeat;
   2836	wake_up(&mddev->sb_wait);
   2837	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
   2838		sysfs_notify_dirent_safe(mddev->sysfs_completed);
   2839
   2840	rdev_for_each(rdev, mddev) {
   2841		if (test_and_clear_bit(FaultRecorded, &rdev->flags))
   2842			clear_bit(Blocked, &rdev->flags);
   2843
   2844		if (any_badblocks_changed)
   2845			ack_all_badblocks(&rdev->badblocks);
   2846		clear_bit(BlockedBadBlocks, &rdev->flags);
   2847		wake_up(&rdev->blocked_wait);
   2848	}
   2849}
   2850EXPORT_SYMBOL(md_update_sb);
   2851
   2852static int add_bound_rdev(struct md_rdev *rdev)
   2853{
   2854	struct mddev *mddev = rdev->mddev;
   2855	int err = 0;
   2856	bool add_journal = test_bit(Journal, &rdev->flags);
   2857
   2858	if (!mddev->pers->hot_remove_disk || add_journal) {
   2859		/* If there is hot_add_disk but no hot_remove_disk
   2860		 * then added disks for geometry changes,
   2861		 * and should be added immediately.
   2862		 */
   2863		super_types[mddev->major_version].
   2864			validate_super(mddev, rdev);
   2865		if (add_journal)
   2866			mddev_suspend(mddev);
   2867		err = mddev->pers->hot_add_disk(mddev, rdev);
   2868		if (add_journal)
   2869			mddev_resume(mddev);
   2870		if (err) {
   2871			md_kick_rdev_from_array(rdev);
   2872			return err;
   2873		}
   2874	}
   2875	sysfs_notify_dirent_safe(rdev->sysfs_state);
   2876
   2877	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   2878	if (mddev->degraded)
   2879		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
   2880	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   2881	md_new_event();
   2882	md_wakeup_thread(mddev->thread);
   2883	return 0;
   2884}
   2885
   2886/* words written to sysfs files may, or may not, be \n terminated.
   2887 * We want to accept with case. For this we use cmd_match.
   2888 */
   2889static int cmd_match(const char *cmd, const char *str)
   2890{
   2891	/* See if cmd, written into a sysfs file, matches
   2892	 * str.  They must either be the same, or cmd can
   2893	 * have a trailing newline
   2894	 */
   2895	while (*cmd && *str && *cmd == *str) {
   2896		cmd++;
   2897		str++;
   2898	}
   2899	if (*cmd == '\n')
   2900		cmd++;
   2901	if (*str || *cmd)
   2902		return 0;
   2903	return 1;
   2904}
   2905
   2906struct rdev_sysfs_entry {
   2907	struct attribute attr;
   2908	ssize_t (*show)(struct md_rdev *, char *);
   2909	ssize_t (*store)(struct md_rdev *, const char *, size_t);
   2910};
   2911
   2912static ssize_t
   2913state_show(struct md_rdev *rdev, char *page)
   2914{
   2915	char *sep = ",";
   2916	size_t len = 0;
   2917	unsigned long flags = READ_ONCE(rdev->flags);
   2918
   2919	if (test_bit(Faulty, &flags) ||
   2920	    (!test_bit(ExternalBbl, &flags) &&
   2921	    rdev->badblocks.unacked_exist))
   2922		len += sprintf(page+len, "faulty%s", sep);
   2923	if (test_bit(In_sync, &flags))
   2924		len += sprintf(page+len, "in_sync%s", sep);
   2925	if (test_bit(Journal, &flags))
   2926		len += sprintf(page+len, "journal%s", sep);
   2927	if (test_bit(WriteMostly, &flags))
   2928		len += sprintf(page+len, "write_mostly%s", sep);
   2929	if (test_bit(Blocked, &flags) ||
   2930	    (rdev->badblocks.unacked_exist
   2931	     && !test_bit(Faulty, &flags)))
   2932		len += sprintf(page+len, "blocked%s", sep);
   2933	if (!test_bit(Faulty, &flags) &&
   2934	    !test_bit(Journal, &flags) &&
   2935	    !test_bit(In_sync, &flags))
   2936		len += sprintf(page+len, "spare%s", sep);
   2937	if (test_bit(WriteErrorSeen, &flags))
   2938		len += sprintf(page+len, "write_error%s", sep);
   2939	if (test_bit(WantReplacement, &flags))
   2940		len += sprintf(page+len, "want_replacement%s", sep);
   2941	if (test_bit(Replacement, &flags))
   2942		len += sprintf(page+len, "replacement%s", sep);
   2943	if (test_bit(ExternalBbl, &flags))
   2944		len += sprintf(page+len, "external_bbl%s", sep);
   2945	if (test_bit(FailFast, &flags))
   2946		len += sprintf(page+len, "failfast%s", sep);
   2947
   2948	if (len)
   2949		len -= strlen(sep);
   2950
   2951	return len+sprintf(page+len, "\n");
   2952}
   2953
   2954static ssize_t
   2955state_store(struct md_rdev *rdev, const char *buf, size_t len)
   2956{
   2957	/* can write
   2958	 *  faulty  - simulates an error
   2959	 *  remove  - disconnects the device
   2960	 *  writemostly - sets write_mostly
   2961	 *  -writemostly - clears write_mostly
   2962	 *  blocked - sets the Blocked flags
   2963	 *  -blocked - clears the Blocked and possibly simulates an error
   2964	 *  insync - sets Insync providing device isn't active
   2965	 *  -insync - clear Insync for a device with a slot assigned,
   2966	 *            so that it gets rebuilt based on bitmap
   2967	 *  write_error - sets WriteErrorSeen
   2968	 *  -write_error - clears WriteErrorSeen
   2969	 *  {,-}failfast - set/clear FailFast
   2970	 */
   2971
   2972	struct mddev *mddev = rdev->mddev;
   2973	int err = -EINVAL;
   2974	bool need_update_sb = false;
   2975
   2976	if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
   2977		md_error(rdev->mddev, rdev);
   2978
   2979		if (test_bit(MD_BROKEN, &rdev->mddev->flags))
   2980			err = -EBUSY;
   2981		else
   2982			err = 0;
   2983	} else if (cmd_match(buf, "remove")) {
   2984		if (rdev->mddev->pers) {
   2985			clear_bit(Blocked, &rdev->flags);
   2986			remove_and_add_spares(rdev->mddev, rdev);
   2987		}
   2988		if (rdev->raid_disk >= 0)
   2989			err = -EBUSY;
   2990		else {
   2991			err = 0;
   2992			if (mddev_is_clustered(mddev))
   2993				err = md_cluster_ops->remove_disk(mddev, rdev);
   2994
   2995			if (err == 0) {
   2996				md_kick_rdev_from_array(rdev);
   2997				if (mddev->pers) {
   2998					set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   2999					md_wakeup_thread(mddev->thread);
   3000				}
   3001				md_new_event();
   3002			}
   3003		}
   3004	} else if (cmd_match(buf, "writemostly")) {
   3005		set_bit(WriteMostly, &rdev->flags);
   3006		mddev_create_serial_pool(rdev->mddev, rdev, false);
   3007		need_update_sb = true;
   3008		err = 0;
   3009	} else if (cmd_match(buf, "-writemostly")) {
   3010		mddev_destroy_serial_pool(rdev->mddev, rdev, false);
   3011		clear_bit(WriteMostly, &rdev->flags);
   3012		need_update_sb = true;
   3013		err = 0;
   3014	} else if (cmd_match(buf, "blocked")) {
   3015		set_bit(Blocked, &rdev->flags);
   3016		err = 0;
   3017	} else if (cmd_match(buf, "-blocked")) {
   3018		if (!test_bit(Faulty, &rdev->flags) &&
   3019		    !test_bit(ExternalBbl, &rdev->flags) &&
   3020		    rdev->badblocks.unacked_exist) {
   3021			/* metadata handler doesn't understand badblocks,
   3022			 * so we need to fail the device
   3023			 */
   3024			md_error(rdev->mddev, rdev);
   3025		}
   3026		clear_bit(Blocked, &rdev->flags);
   3027		clear_bit(BlockedBadBlocks, &rdev->flags);
   3028		wake_up(&rdev->blocked_wait);
   3029		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
   3030		md_wakeup_thread(rdev->mddev->thread);
   3031
   3032		err = 0;
   3033	} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
   3034		set_bit(In_sync, &rdev->flags);
   3035		err = 0;
   3036	} else if (cmd_match(buf, "failfast")) {
   3037		set_bit(FailFast, &rdev->flags);
   3038		need_update_sb = true;
   3039		err = 0;
   3040	} else if (cmd_match(buf, "-failfast")) {
   3041		clear_bit(FailFast, &rdev->flags);
   3042		need_update_sb = true;
   3043		err = 0;
   3044	} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
   3045		   !test_bit(Journal, &rdev->flags)) {
   3046		if (rdev->mddev->pers == NULL) {
   3047			clear_bit(In_sync, &rdev->flags);
   3048			rdev->saved_raid_disk = rdev->raid_disk;
   3049			rdev->raid_disk = -1;
   3050			err = 0;
   3051		}
   3052	} else if (cmd_match(buf, "write_error")) {
   3053		set_bit(WriteErrorSeen, &rdev->flags);
   3054		err = 0;
   3055	} else if (cmd_match(buf, "-write_error")) {
   3056		clear_bit(WriteErrorSeen, &rdev->flags);
   3057		err = 0;
   3058	} else if (cmd_match(buf, "want_replacement")) {
   3059		/* Any non-spare device that is not a replacement can
   3060		 * become want_replacement at any time, but we then need to
   3061		 * check if recovery is needed.
   3062		 */
   3063		if (rdev->raid_disk >= 0 &&
   3064		    !test_bit(Journal, &rdev->flags) &&
   3065		    !test_bit(Replacement, &rdev->flags))
   3066			set_bit(WantReplacement, &rdev->flags);
   3067		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
   3068		md_wakeup_thread(rdev->mddev->thread);
   3069		err = 0;
   3070	} else if (cmd_match(buf, "-want_replacement")) {
   3071		/* Clearing 'want_replacement' is always allowed.
   3072		 * Once replacements starts it is too late though.
   3073		 */
   3074		err = 0;
   3075		clear_bit(WantReplacement, &rdev->flags);
   3076	} else if (cmd_match(buf, "replacement")) {
   3077		/* Can only set a device as a replacement when array has not
   3078		 * yet been started.  Once running, replacement is automatic
   3079		 * from spares, or by assigning 'slot'.
   3080		 */
   3081		if (rdev->mddev->pers)
   3082			err = -EBUSY;
   3083		else {
   3084			set_bit(Replacement, &rdev->flags);
   3085			err = 0;
   3086		}
   3087	} else if (cmd_match(buf, "-replacement")) {
   3088		/* Similarly, can only clear Replacement before start */
   3089		if (rdev->mddev->pers)
   3090			err = -EBUSY;
   3091		else {
   3092			clear_bit(Replacement, &rdev->flags);
   3093			err = 0;
   3094		}
   3095	} else if (cmd_match(buf, "re-add")) {
   3096		if (!rdev->mddev->pers)
   3097			err = -EINVAL;
   3098		else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
   3099				rdev->saved_raid_disk >= 0) {
   3100			/* clear_bit is performed _after_ all the devices
   3101			 * have their local Faulty bit cleared. If any writes
   3102			 * happen in the meantime in the local node, they
   3103			 * will land in the local bitmap, which will be synced
   3104			 * by this node eventually
   3105			 */
   3106			if (!mddev_is_clustered(rdev->mddev) ||
   3107			    (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
   3108				clear_bit(Faulty, &rdev->flags);
   3109				err = add_bound_rdev(rdev);
   3110			}
   3111		} else
   3112			err = -EBUSY;
   3113	} else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
   3114		set_bit(ExternalBbl, &rdev->flags);
   3115		rdev->badblocks.shift = 0;
   3116		err = 0;
   3117	} else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
   3118		clear_bit(ExternalBbl, &rdev->flags);
   3119		err = 0;
   3120	}
   3121	if (need_update_sb)
   3122		md_update_sb(mddev, 1);
   3123	if (!err)
   3124		sysfs_notify_dirent_safe(rdev->sysfs_state);
   3125	return err ? err : len;
   3126}
   3127static struct rdev_sysfs_entry rdev_state =
   3128__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
   3129
   3130static ssize_t
   3131errors_show(struct md_rdev *rdev, char *page)
   3132{
   3133	return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
   3134}
   3135
   3136static ssize_t
   3137errors_store(struct md_rdev *rdev, const char *buf, size_t len)
   3138{
   3139	unsigned int n;
   3140	int rv;
   3141
   3142	rv = kstrtouint(buf, 10, &n);
   3143	if (rv < 0)
   3144		return rv;
   3145	atomic_set(&rdev->corrected_errors, n);
   3146	return len;
   3147}
   3148static struct rdev_sysfs_entry rdev_errors =
   3149__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
   3150
   3151static ssize_t
   3152slot_show(struct md_rdev *rdev, char *page)
   3153{
   3154	if (test_bit(Journal, &rdev->flags))
   3155		return sprintf(page, "journal\n");
   3156	else if (rdev->raid_disk < 0)
   3157		return sprintf(page, "none\n");
   3158	else
   3159		return sprintf(page, "%d\n", rdev->raid_disk);
   3160}
   3161
   3162static ssize_t
   3163slot_store(struct md_rdev *rdev, const char *buf, size_t len)
   3164{
   3165	int slot;
   3166	int err;
   3167
   3168	if (test_bit(Journal, &rdev->flags))
   3169		return -EBUSY;
   3170	if (strncmp(buf, "none", 4)==0)
   3171		slot = -1;
   3172	else {
   3173		err = kstrtouint(buf, 10, (unsigned int *)&slot);
   3174		if (err < 0)
   3175			return err;
   3176	}
   3177	if (rdev->mddev->pers && slot == -1) {
   3178		/* Setting 'slot' on an active array requires also
   3179		 * updating the 'rd%d' link, and communicating
   3180		 * with the personality with ->hot_*_disk.
   3181		 * For now we only support removing
   3182		 * failed/spare devices.  This normally happens automatically,
   3183		 * but not when the metadata is externally managed.
   3184		 */
   3185		if (rdev->raid_disk == -1)
   3186			return -EEXIST;
   3187		/* personality does all needed checks */
   3188		if (rdev->mddev->pers->hot_remove_disk == NULL)
   3189			return -EINVAL;
   3190		clear_bit(Blocked, &rdev->flags);
   3191		remove_and_add_spares(rdev->mddev, rdev);
   3192		if (rdev->raid_disk >= 0)
   3193			return -EBUSY;
   3194		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
   3195		md_wakeup_thread(rdev->mddev->thread);
   3196	} else if (rdev->mddev->pers) {
   3197		/* Activating a spare .. or possibly reactivating
   3198		 * if we ever get bitmaps working here.
   3199		 */
   3200		int err;
   3201
   3202		if (rdev->raid_disk != -1)
   3203			return -EBUSY;
   3204
   3205		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
   3206			return -EBUSY;
   3207
   3208		if (rdev->mddev->pers->hot_add_disk == NULL)
   3209			return -EINVAL;
   3210
   3211		if (slot >= rdev->mddev->raid_disks &&
   3212		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
   3213			return -ENOSPC;
   3214
   3215		rdev->raid_disk = slot;
   3216		if (test_bit(In_sync, &rdev->flags))
   3217			rdev->saved_raid_disk = slot;
   3218		else
   3219			rdev->saved_raid_disk = -1;
   3220		clear_bit(In_sync, &rdev->flags);
   3221		clear_bit(Bitmap_sync, &rdev->flags);
   3222		err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
   3223		if (err) {
   3224			rdev->raid_disk = -1;
   3225			return err;
   3226		} else
   3227			sysfs_notify_dirent_safe(rdev->sysfs_state);
   3228		/* failure here is OK */;
   3229		sysfs_link_rdev(rdev->mddev, rdev);
   3230		/* don't wakeup anyone, leave that to userspace. */
   3231	} else {
   3232		if (slot >= rdev->mddev->raid_disks &&
   3233		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
   3234			return -ENOSPC;
   3235		rdev->raid_disk = slot;
   3236		/* assume it is working */
   3237		clear_bit(Faulty, &rdev->flags);
   3238		clear_bit(WriteMostly, &rdev->flags);
   3239		set_bit(In_sync, &rdev->flags);
   3240		sysfs_notify_dirent_safe(rdev->sysfs_state);
   3241	}
   3242	return len;
   3243}
   3244
   3245static struct rdev_sysfs_entry rdev_slot =
   3246__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
   3247
   3248static ssize_t
   3249offset_show(struct md_rdev *rdev, char *page)
   3250{
   3251	return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
   3252}
   3253
   3254static ssize_t
   3255offset_store(struct md_rdev *rdev, const char *buf, size_t len)
   3256{
   3257	unsigned long long offset;
   3258	if (kstrtoull(buf, 10, &offset) < 0)
   3259		return -EINVAL;
   3260	if (rdev->mddev->pers && rdev->raid_disk >= 0)
   3261		return -EBUSY;
   3262	if (rdev->sectors && rdev->mddev->external)
   3263		/* Must set offset before size, so overlap checks
   3264		 * can be sane */
   3265		return -EBUSY;
   3266	rdev->data_offset = offset;
   3267	rdev->new_data_offset = offset;
   3268	return len;
   3269}
   3270
   3271static struct rdev_sysfs_entry rdev_offset =
   3272__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
   3273
   3274static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
   3275{
   3276	return sprintf(page, "%llu\n",
   3277		       (unsigned long long)rdev->new_data_offset);
   3278}
   3279
   3280static ssize_t new_offset_store(struct md_rdev *rdev,
   3281				const char *buf, size_t len)
   3282{
   3283	unsigned long long new_offset;
   3284	struct mddev *mddev = rdev->mddev;
   3285
   3286	if (kstrtoull(buf, 10, &new_offset) < 0)
   3287		return -EINVAL;
   3288
   3289	if (mddev->sync_thread ||
   3290	    test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
   3291		return -EBUSY;
   3292	if (new_offset == rdev->data_offset)
   3293		/* reset is always permitted */
   3294		;
   3295	else if (new_offset > rdev->data_offset) {
   3296		/* must not push array size beyond rdev_sectors */
   3297		if (new_offset - rdev->data_offset
   3298		    + mddev->dev_sectors > rdev->sectors)
   3299				return -E2BIG;
   3300	}
   3301	/* Metadata worries about other space details. */
   3302
   3303	/* decreasing the offset is inconsistent with a backwards
   3304	 * reshape.
   3305	 */
   3306	if (new_offset < rdev->data_offset &&
   3307	    mddev->reshape_backwards)
   3308		return -EINVAL;
   3309	/* Increasing offset is inconsistent with forwards
   3310	 * reshape.  reshape_direction should be set to
   3311	 * 'backwards' first.
   3312	 */
   3313	if (new_offset > rdev->data_offset &&
   3314	    !mddev->reshape_backwards)
   3315		return -EINVAL;
   3316
   3317	if (mddev->pers && mddev->persistent &&
   3318	    !super_types[mddev->major_version]
   3319	    .allow_new_offset(rdev, new_offset))
   3320		return -E2BIG;
   3321	rdev->new_data_offset = new_offset;
   3322	if (new_offset > rdev->data_offset)
   3323		mddev->reshape_backwards = 1;
   3324	else if (new_offset < rdev->data_offset)
   3325		mddev->reshape_backwards = 0;
   3326
   3327	return len;
   3328}
   3329static struct rdev_sysfs_entry rdev_new_offset =
   3330__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
   3331
   3332static ssize_t
   3333rdev_size_show(struct md_rdev *rdev, char *page)
   3334{
   3335	return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
   3336}
   3337
   3338static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
   3339{
   3340	/* check if two start/length pairs overlap */
   3341	if (s1+l1 <= s2)
   3342		return 0;
   3343	if (s2+l2 <= s1)
   3344		return 0;
   3345	return 1;
   3346}
   3347
   3348static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
   3349{
   3350	unsigned long long blocks;
   3351	sector_t new;
   3352
   3353	if (kstrtoull(buf, 10, &blocks) < 0)
   3354		return -EINVAL;
   3355
   3356	if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
   3357		return -EINVAL; /* sector conversion overflow */
   3358
   3359	new = blocks * 2;
   3360	if (new != blocks * 2)
   3361		return -EINVAL; /* unsigned long long to sector_t overflow */
   3362
   3363	*sectors = new;
   3364	return 0;
   3365}
   3366
   3367static ssize_t
   3368rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
   3369{
   3370	struct mddev *my_mddev = rdev->mddev;
   3371	sector_t oldsectors = rdev->sectors;
   3372	sector_t sectors;
   3373
   3374	if (test_bit(Journal, &rdev->flags))
   3375		return -EBUSY;
   3376	if (strict_blocks_to_sectors(buf, &sectors) < 0)
   3377		return -EINVAL;
   3378	if (rdev->data_offset != rdev->new_data_offset)
   3379		return -EINVAL; /* too confusing */
   3380	if (my_mddev->pers && rdev->raid_disk >= 0) {
   3381		if (my_mddev->persistent) {
   3382			sectors = super_types[my_mddev->major_version].
   3383				rdev_size_change(rdev, sectors);
   3384			if (!sectors)
   3385				return -EBUSY;
   3386		} else if (!sectors)
   3387			sectors = bdev_nr_sectors(rdev->bdev) -
   3388				rdev->data_offset;
   3389		if (!my_mddev->pers->resize)
   3390			/* Cannot change size for RAID0 or Linear etc */
   3391			return -EINVAL;
   3392	}
   3393	if (sectors < my_mddev->dev_sectors)
   3394		return -EINVAL; /* component must fit device */
   3395
   3396	rdev->sectors = sectors;
   3397	if (sectors > oldsectors && my_mddev->external) {
   3398		/* Need to check that all other rdevs with the same
   3399		 * ->bdev do not overlap.  'rcu' is sufficient to walk
   3400		 * the rdev lists safely.
   3401		 * This check does not provide a hard guarantee, it
   3402		 * just helps avoid dangerous mistakes.
   3403		 */
   3404		struct mddev *mddev;
   3405		int overlap = 0;
   3406		struct list_head *tmp;
   3407
   3408		rcu_read_lock();
   3409		for_each_mddev(mddev, tmp) {
   3410			struct md_rdev *rdev2;
   3411
   3412			rdev_for_each(rdev2, mddev)
   3413				if (rdev->bdev == rdev2->bdev &&
   3414				    rdev != rdev2 &&
   3415				    overlaps(rdev->data_offset, rdev->sectors,
   3416					     rdev2->data_offset,
   3417					     rdev2->sectors)) {
   3418					overlap = 1;
   3419					break;
   3420				}
   3421			if (overlap) {
   3422				mddev_put(mddev);
   3423				break;
   3424			}
   3425		}
   3426		rcu_read_unlock();
   3427		if (overlap) {
   3428			/* Someone else could have slipped in a size
   3429			 * change here, but doing so is just silly.
   3430			 * We put oldsectors back because we *know* it is
   3431			 * safe, and trust userspace not to race with
   3432			 * itself
   3433			 */
   3434			rdev->sectors = oldsectors;
   3435			return -EBUSY;
   3436		}
   3437	}
   3438	return len;
   3439}
   3440
   3441static struct rdev_sysfs_entry rdev_size =
   3442__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
   3443
   3444static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
   3445{
   3446	unsigned long long recovery_start = rdev->recovery_offset;
   3447
   3448	if (test_bit(In_sync, &rdev->flags) ||
   3449	    recovery_start == MaxSector)
   3450		return sprintf(page, "none\n");
   3451
   3452	return sprintf(page, "%llu\n", recovery_start);
   3453}
   3454
   3455static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
   3456{
   3457	unsigned long long recovery_start;
   3458
   3459	if (cmd_match(buf, "none"))
   3460		recovery_start = MaxSector;
   3461	else if (kstrtoull(buf, 10, &recovery_start))
   3462		return -EINVAL;
   3463
   3464	if (rdev->mddev->pers &&
   3465	    rdev->raid_disk >= 0)
   3466		return -EBUSY;
   3467
   3468	rdev->recovery_offset = recovery_start;
   3469	if (recovery_start == MaxSector)
   3470		set_bit(In_sync, &rdev->flags);
   3471	else
   3472		clear_bit(In_sync, &rdev->flags);
   3473	return len;
   3474}
   3475
   3476static struct rdev_sysfs_entry rdev_recovery_start =
   3477__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
   3478
   3479/* sysfs access to bad-blocks list.
   3480 * We present two files.
   3481 * 'bad-blocks' lists sector numbers and lengths of ranges that
   3482 *    are recorded as bad.  The list is truncated to fit within
   3483 *    the one-page limit of sysfs.
   3484 *    Writing "sector length" to this file adds an acknowledged
   3485 *    bad block list.
   3486 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
   3487 *    been acknowledged.  Writing to this file adds bad blocks
   3488 *    without acknowledging them.  This is largely for testing.
   3489 */
   3490static ssize_t bb_show(struct md_rdev *rdev, char *page)
   3491{
   3492	return badblocks_show(&rdev->badblocks, page, 0);
   3493}
   3494static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
   3495{
   3496	int rv = badblocks_store(&rdev->badblocks, page, len, 0);
   3497	/* Maybe that ack was all we needed */
   3498	if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
   3499		wake_up(&rdev->blocked_wait);
   3500	return rv;
   3501}
   3502static struct rdev_sysfs_entry rdev_bad_blocks =
   3503__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
   3504
   3505static ssize_t ubb_show(struct md_rdev *rdev, char *page)
   3506{
   3507	return badblocks_show(&rdev->badblocks, page, 1);
   3508}
   3509static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
   3510{
   3511	return badblocks_store(&rdev->badblocks, page, len, 1);
   3512}
   3513static struct rdev_sysfs_entry rdev_unack_bad_blocks =
   3514__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
   3515
   3516static ssize_t
   3517ppl_sector_show(struct md_rdev *rdev, char *page)
   3518{
   3519	return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
   3520}
   3521
   3522static ssize_t
   3523ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
   3524{
   3525	unsigned long long sector;
   3526
   3527	if (kstrtoull(buf, 10, &sector) < 0)
   3528		return -EINVAL;
   3529	if (sector != (sector_t)sector)
   3530		return -EINVAL;
   3531
   3532	if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
   3533	    rdev->raid_disk >= 0)
   3534		return -EBUSY;
   3535
   3536	if (rdev->mddev->persistent) {
   3537		if (rdev->mddev->major_version == 0)
   3538			return -EINVAL;
   3539		if ((sector > rdev->sb_start &&
   3540		     sector - rdev->sb_start > S16_MAX) ||
   3541		    (sector < rdev->sb_start &&
   3542		     rdev->sb_start - sector > -S16_MIN))
   3543			return -EINVAL;
   3544		rdev->ppl.offset = sector - rdev->sb_start;
   3545	} else if (!rdev->mddev->external) {
   3546		return -EBUSY;
   3547	}
   3548	rdev->ppl.sector = sector;
   3549	return len;
   3550}
   3551
   3552static struct rdev_sysfs_entry rdev_ppl_sector =
   3553__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
   3554
   3555static ssize_t
   3556ppl_size_show(struct md_rdev *rdev, char *page)
   3557{
   3558	return sprintf(page, "%u\n", rdev->ppl.size);
   3559}
   3560
   3561static ssize_t
   3562ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
   3563{
   3564	unsigned int size;
   3565
   3566	if (kstrtouint(buf, 10, &size) < 0)
   3567		return -EINVAL;
   3568
   3569	if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
   3570	    rdev->raid_disk >= 0)
   3571		return -EBUSY;
   3572
   3573	if (rdev->mddev->persistent) {
   3574		if (rdev->mddev->major_version == 0)
   3575			return -EINVAL;
   3576		if (size > U16_MAX)
   3577			return -EINVAL;
   3578	} else if (!rdev->mddev->external) {
   3579		return -EBUSY;
   3580	}
   3581	rdev->ppl.size = size;
   3582	return len;
   3583}
   3584
   3585static struct rdev_sysfs_entry rdev_ppl_size =
   3586__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
   3587
   3588static struct attribute *rdev_default_attrs[] = {
   3589	&rdev_state.attr,
   3590	&rdev_errors.attr,
   3591	&rdev_slot.attr,
   3592	&rdev_offset.attr,
   3593	&rdev_new_offset.attr,
   3594	&rdev_size.attr,
   3595	&rdev_recovery_start.attr,
   3596	&rdev_bad_blocks.attr,
   3597	&rdev_unack_bad_blocks.attr,
   3598	&rdev_ppl_sector.attr,
   3599	&rdev_ppl_size.attr,
   3600	NULL,
   3601};
   3602ATTRIBUTE_GROUPS(rdev_default);
   3603static ssize_t
   3604rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
   3605{
   3606	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
   3607	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
   3608
   3609	if (!entry->show)
   3610		return -EIO;
   3611	if (!rdev->mddev)
   3612		return -ENODEV;
   3613	return entry->show(rdev, page);
   3614}
   3615
   3616static ssize_t
   3617rdev_attr_store(struct kobject *kobj, struct attribute *attr,
   3618	      const char *page, size_t length)
   3619{
   3620	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
   3621	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
   3622	ssize_t rv;
   3623	struct mddev *mddev = rdev->mddev;
   3624
   3625	if (!entry->store)
   3626		return -EIO;
   3627	if (!capable(CAP_SYS_ADMIN))
   3628		return -EACCES;
   3629	rv = mddev ? mddev_lock(mddev) : -ENODEV;
   3630	if (!rv) {
   3631		if (rdev->mddev == NULL)
   3632			rv = -ENODEV;
   3633		else
   3634			rv = entry->store(rdev, page, length);
   3635		mddev_unlock(mddev);
   3636	}
   3637	return rv;
   3638}
   3639
   3640static void rdev_free(struct kobject *ko)
   3641{
   3642	struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
   3643	kfree(rdev);
   3644}
   3645static const struct sysfs_ops rdev_sysfs_ops = {
   3646	.show		= rdev_attr_show,
   3647	.store		= rdev_attr_store,
   3648};
   3649static struct kobj_type rdev_ktype = {
   3650	.release	= rdev_free,
   3651	.sysfs_ops	= &rdev_sysfs_ops,
   3652	.default_groups	= rdev_default_groups,
   3653};
   3654
   3655int md_rdev_init(struct md_rdev *rdev)
   3656{
   3657	rdev->desc_nr = -1;
   3658	rdev->saved_raid_disk = -1;
   3659	rdev->raid_disk = -1;
   3660	rdev->flags = 0;
   3661	rdev->data_offset = 0;
   3662	rdev->new_data_offset = 0;
   3663	rdev->sb_events = 0;
   3664	rdev->last_read_error = 0;
   3665	rdev->sb_loaded = 0;
   3666	rdev->bb_page = NULL;
   3667	atomic_set(&rdev->nr_pending, 0);
   3668	atomic_set(&rdev->read_errors, 0);
   3669	atomic_set(&rdev->corrected_errors, 0);
   3670
   3671	INIT_LIST_HEAD(&rdev->same_set);
   3672	init_waitqueue_head(&rdev->blocked_wait);
   3673
   3674	/* Add space to store bad block list.
   3675	 * This reserves the space even on arrays where it cannot
   3676	 * be used - I wonder if that matters
   3677	 */
   3678	return badblocks_init(&rdev->badblocks, 0);
   3679}
   3680EXPORT_SYMBOL_GPL(md_rdev_init);
   3681/*
   3682 * Import a device. If 'super_format' >= 0, then sanity check the superblock
   3683 *
   3684 * mark the device faulty if:
   3685 *
   3686 *   - the device is nonexistent (zero size)
   3687 *   - the device has no valid superblock
   3688 *
   3689 * a faulty rdev _never_ has rdev->sb set.
   3690 */
   3691static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
   3692{
   3693	int err;
   3694	struct md_rdev *rdev;
   3695	sector_t size;
   3696
   3697	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
   3698	if (!rdev)
   3699		return ERR_PTR(-ENOMEM);
   3700
   3701	err = md_rdev_init(rdev);
   3702	if (err)
   3703		goto abort_free;
   3704	err = alloc_disk_sb(rdev);
   3705	if (err)
   3706		goto abort_free;
   3707
   3708	err = lock_rdev(rdev, newdev, super_format == -2);
   3709	if (err)
   3710		goto abort_free;
   3711
   3712	kobject_init(&rdev->kobj, &rdev_ktype);
   3713
   3714	size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
   3715	if (!size) {
   3716		pr_warn("md: %pg has zero or unknown size, marking faulty!\n",
   3717			rdev->bdev);
   3718		err = -EINVAL;
   3719		goto abort_free;
   3720	}
   3721
   3722	if (super_format >= 0) {
   3723		err = super_types[super_format].
   3724			load_super(rdev, NULL, super_minor);
   3725		if (err == -EINVAL) {
   3726			pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n",
   3727				rdev->bdev,
   3728				super_format, super_minor);
   3729			goto abort_free;
   3730		}
   3731		if (err < 0) {
   3732			pr_warn("md: could not read %pg's sb, not importing!\n",
   3733				rdev->bdev);
   3734			goto abort_free;
   3735		}
   3736	}
   3737
   3738	return rdev;
   3739
   3740abort_free:
   3741	if (rdev->bdev)
   3742		unlock_rdev(rdev);
   3743	md_rdev_clear(rdev);
   3744	kfree(rdev);
   3745	return ERR_PTR(err);
   3746}
   3747
   3748/*
   3749 * Check a full RAID array for plausibility
   3750 */
   3751
   3752static int analyze_sbs(struct mddev *mddev)
   3753{
   3754	int i;
   3755	struct md_rdev *rdev, *freshest, *tmp;
   3756
   3757	freshest = NULL;
   3758	rdev_for_each_safe(rdev, tmp, mddev)
   3759		switch (super_types[mddev->major_version].
   3760			load_super(rdev, freshest, mddev->minor_version)) {
   3761		case 1:
   3762			freshest = rdev;
   3763			break;
   3764		case 0:
   3765			break;
   3766		default:
   3767			pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n",
   3768				rdev->bdev);
   3769			md_kick_rdev_from_array(rdev);
   3770		}
   3771
   3772	/* Cannot find a valid fresh disk */
   3773	if (!freshest) {
   3774		pr_warn("md: cannot find a valid disk\n");
   3775		return -EINVAL;
   3776	}
   3777
   3778	super_types[mddev->major_version].
   3779		validate_super(mddev, freshest);
   3780
   3781	i = 0;
   3782	rdev_for_each_safe(rdev, tmp, mddev) {
   3783		if (mddev->max_disks &&
   3784		    (rdev->desc_nr >= mddev->max_disks ||
   3785		     i > mddev->max_disks)) {
   3786			pr_warn("md: %s: %pg: only %d devices permitted\n",
   3787				mdname(mddev), rdev->bdev,
   3788				mddev->max_disks);
   3789			md_kick_rdev_from_array(rdev);
   3790			continue;
   3791		}
   3792		if (rdev != freshest) {
   3793			if (super_types[mddev->major_version].
   3794			    validate_super(mddev, rdev)) {
   3795				pr_warn("md: kicking non-fresh %pg from array!\n",
   3796					rdev->bdev);
   3797				md_kick_rdev_from_array(rdev);
   3798				continue;
   3799			}
   3800		}
   3801		if (mddev->level == LEVEL_MULTIPATH) {
   3802			rdev->desc_nr = i++;
   3803			rdev->raid_disk = rdev->desc_nr;
   3804			set_bit(In_sync, &rdev->flags);
   3805		} else if (rdev->raid_disk >=
   3806			    (mddev->raid_disks - min(0, mddev->delta_disks)) &&
   3807			   !test_bit(Journal, &rdev->flags)) {
   3808			rdev->raid_disk = -1;
   3809			clear_bit(In_sync, &rdev->flags);
   3810		}
   3811	}
   3812
   3813	return 0;
   3814}
   3815
   3816/* Read a fixed-point number.
   3817 * Numbers in sysfs attributes should be in "standard" units where
   3818 * possible, so time should be in seconds.
   3819 * However we internally use a a much smaller unit such as
   3820 * milliseconds or jiffies.
   3821 * This function takes a decimal number with a possible fractional
   3822 * component, and produces an integer which is the result of
   3823 * multiplying that number by 10^'scale'.
   3824 * all without any floating-point arithmetic.
   3825 */
   3826int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
   3827{
   3828	unsigned long result = 0;
   3829	long decimals = -1;
   3830	while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
   3831		if (*cp == '.')
   3832			decimals = 0;
   3833		else if (decimals < scale) {
   3834			unsigned int value;
   3835			value = *cp - '0';
   3836			result = result * 10 + value;
   3837			if (decimals >= 0)
   3838				decimals++;
   3839		}
   3840		cp++;
   3841	}
   3842	if (*cp == '\n')
   3843		cp++;
   3844	if (*cp)
   3845		return -EINVAL;
   3846	if (decimals < 0)
   3847		decimals = 0;
   3848	*res = result * int_pow(10, scale - decimals);
   3849	return 0;
   3850}
   3851
   3852static ssize_t
   3853safe_delay_show(struct mddev *mddev, char *page)
   3854{
   3855	int msec = (mddev->safemode_delay*1000)/HZ;
   3856	return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
   3857}
   3858static ssize_t
   3859safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
   3860{
   3861	unsigned long msec;
   3862
   3863	if (mddev_is_clustered(mddev)) {
   3864		pr_warn("md: Safemode is disabled for clustered mode\n");
   3865		return -EINVAL;
   3866	}
   3867
   3868	if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
   3869		return -EINVAL;
   3870	if (msec == 0)
   3871		mddev->safemode_delay = 0;
   3872	else {
   3873		unsigned long old_delay = mddev->safemode_delay;
   3874		unsigned long new_delay = (msec*HZ)/1000;
   3875
   3876		if (new_delay == 0)
   3877			new_delay = 1;
   3878		mddev->safemode_delay = new_delay;
   3879		if (new_delay < old_delay || old_delay == 0)
   3880			mod_timer(&mddev->safemode_timer, jiffies+1);
   3881	}
   3882	return len;
   3883}
   3884static struct md_sysfs_entry md_safe_delay =
   3885__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
   3886
   3887static ssize_t
   3888level_show(struct mddev *mddev, char *page)
   3889{
   3890	struct md_personality *p;
   3891	int ret;
   3892	spin_lock(&mddev->lock);
   3893	p = mddev->pers;
   3894	if (p)
   3895		ret = sprintf(page, "%s\n", p->name);
   3896	else if (mddev->clevel[0])
   3897		ret = sprintf(page, "%s\n", mddev->clevel);
   3898	else if (mddev->level != LEVEL_NONE)
   3899		ret = sprintf(page, "%d\n", mddev->level);
   3900	else
   3901		ret = 0;
   3902	spin_unlock(&mddev->lock);
   3903	return ret;
   3904}
   3905
   3906static ssize_t
   3907level_store(struct mddev *mddev, const char *buf, size_t len)
   3908{
   3909	char clevel[16];
   3910	ssize_t rv;
   3911	size_t slen = len;
   3912	struct md_personality *pers, *oldpers;
   3913	long level;
   3914	void *priv, *oldpriv;
   3915	struct md_rdev *rdev;
   3916
   3917	if (slen == 0 || slen >= sizeof(clevel))
   3918		return -EINVAL;
   3919
   3920	rv = mddev_lock(mddev);
   3921	if (rv)
   3922		return rv;
   3923
   3924	if (mddev->pers == NULL) {
   3925		strncpy(mddev->clevel, buf, slen);
   3926		if (mddev->clevel[slen-1] == '\n')
   3927			slen--;
   3928		mddev->clevel[slen] = 0;
   3929		mddev->level = LEVEL_NONE;
   3930		rv = len;
   3931		goto out_unlock;
   3932	}
   3933	rv = -EROFS;
   3934	if (mddev->ro)
   3935		goto out_unlock;
   3936
   3937	/* request to change the personality.  Need to ensure:
   3938	 *  - array is not engaged in resync/recovery/reshape
   3939	 *  - old personality can be suspended
   3940	 *  - new personality will access other array.
   3941	 */
   3942
   3943	rv = -EBUSY;
   3944	if (mddev->sync_thread ||
   3945	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
   3946	    mddev->reshape_position != MaxSector ||
   3947	    mddev->sysfs_active)
   3948		goto out_unlock;
   3949
   3950	rv = -EINVAL;
   3951	if (!mddev->pers->quiesce) {
   3952		pr_warn("md: %s: %s does not support online personality change\n",
   3953			mdname(mddev), mddev->pers->name);
   3954		goto out_unlock;
   3955	}
   3956
   3957	/* Now find the new personality */
   3958	strncpy(clevel, buf, slen);
   3959	if (clevel[slen-1] == '\n')
   3960		slen--;
   3961	clevel[slen] = 0;
   3962	if (kstrtol(clevel, 10, &level))
   3963		level = LEVEL_NONE;
   3964
   3965	if (request_module("md-%s", clevel) != 0)
   3966		request_module("md-level-%s", clevel);
   3967	spin_lock(&pers_lock);
   3968	pers = find_pers(level, clevel);
   3969	if (!pers || !try_module_get(pers->owner)) {
   3970		spin_unlock(&pers_lock);
   3971		pr_warn("md: personality %s not loaded\n", clevel);
   3972		rv = -EINVAL;
   3973		goto out_unlock;
   3974	}
   3975	spin_unlock(&pers_lock);
   3976
   3977	if (pers == mddev->pers) {
   3978		/* Nothing to do! */
   3979		module_put(pers->owner);
   3980		rv = len;
   3981		goto out_unlock;
   3982	}
   3983	if (!pers->takeover) {
   3984		module_put(pers->owner);
   3985		pr_warn("md: %s: %s does not support personality takeover\n",
   3986			mdname(mddev), clevel);
   3987		rv = -EINVAL;
   3988		goto out_unlock;
   3989	}
   3990
   3991	rdev_for_each(rdev, mddev)
   3992		rdev->new_raid_disk = rdev->raid_disk;
   3993
   3994	/* ->takeover must set new_* and/or delta_disks
   3995	 * if it succeeds, and may set them when it fails.
   3996	 */
   3997	priv = pers->takeover(mddev);
   3998	if (IS_ERR(priv)) {
   3999		mddev->new_level = mddev->level;
   4000		mddev->new_layout = mddev->layout;
   4001		mddev->new_chunk_sectors = mddev->chunk_sectors;
   4002		mddev->raid_disks -= mddev->delta_disks;
   4003		mddev->delta_disks = 0;
   4004		mddev->reshape_backwards = 0;
   4005		module_put(pers->owner);
   4006		pr_warn("md: %s: %s would not accept array\n",
   4007			mdname(mddev), clevel);
   4008		rv = PTR_ERR(priv);
   4009		goto out_unlock;
   4010	}
   4011
   4012	/* Looks like we have a winner */
   4013	mddev_suspend(mddev);
   4014	mddev_detach(mddev);
   4015
   4016	spin_lock(&mddev->lock);
   4017	oldpers = mddev->pers;
   4018	oldpriv = mddev->private;
   4019	mddev->pers = pers;
   4020	mddev->private = priv;
   4021	strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
   4022	mddev->level = mddev->new_level;
   4023	mddev->layout = mddev->new_layout;
   4024	mddev->chunk_sectors = mddev->new_chunk_sectors;
   4025	mddev->delta_disks = 0;
   4026	mddev->reshape_backwards = 0;
   4027	mddev->degraded = 0;
   4028	spin_unlock(&mddev->lock);
   4029
   4030	if (oldpers->sync_request == NULL &&
   4031	    mddev->external) {
   4032		/* We are converting from a no-redundancy array
   4033		 * to a redundancy array and metadata is managed
   4034		 * externally so we need to be sure that writes
   4035		 * won't block due to a need to transition
   4036		 *      clean->dirty
   4037		 * until external management is started.
   4038		 */
   4039		mddev->in_sync = 0;
   4040		mddev->safemode_delay = 0;
   4041		mddev->safemode = 0;
   4042	}
   4043
   4044	oldpers->free(mddev, oldpriv);
   4045
   4046	if (oldpers->sync_request == NULL &&
   4047	    pers->sync_request != NULL) {
   4048		/* need to add the md_redundancy_group */
   4049		if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
   4050			pr_warn("md: cannot register extra attributes for %s\n",
   4051				mdname(mddev));
   4052		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
   4053		mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
   4054		mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
   4055	}
   4056	if (oldpers->sync_request != NULL &&
   4057	    pers->sync_request == NULL) {
   4058		/* need to remove the md_redundancy_group */
   4059		if (mddev->to_remove == NULL)
   4060			mddev->to_remove = &md_redundancy_group;
   4061	}
   4062
   4063	module_put(oldpers->owner);
   4064
   4065	rdev_for_each(rdev, mddev) {
   4066		if (rdev->raid_disk < 0)
   4067			continue;
   4068		if (rdev->new_raid_disk >= mddev->raid_disks)
   4069			rdev->new_raid_disk = -1;
   4070		if (rdev->new_raid_disk == rdev->raid_disk)
   4071			continue;
   4072		sysfs_unlink_rdev(mddev, rdev);
   4073	}
   4074	rdev_for_each(rdev, mddev) {
   4075		if (rdev->raid_disk < 0)
   4076			continue;
   4077		if (rdev->new_raid_disk == rdev->raid_disk)
   4078			continue;
   4079		rdev->raid_disk = rdev->new_raid_disk;
   4080		if (rdev->raid_disk < 0)
   4081			clear_bit(In_sync, &rdev->flags);
   4082		else {
   4083			if (sysfs_link_rdev(mddev, rdev))
   4084				pr_warn("md: cannot register rd%d for %s after level change\n",
   4085					rdev->raid_disk, mdname(mddev));
   4086		}
   4087	}
   4088
   4089	if (pers->sync_request == NULL) {
   4090		/* this is now an array without redundancy, so
   4091		 * it must always be in_sync
   4092		 */
   4093		mddev->in_sync = 1;
   4094		del_timer_sync(&mddev->safemode_timer);
   4095	}
   4096	blk_set_stacking_limits(&mddev->queue->limits);
   4097	pers->run(mddev);
   4098	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   4099	mddev_resume(mddev);
   4100	if (!mddev->thread)
   4101		md_update_sb(mddev, 1);
   4102	sysfs_notify_dirent_safe(mddev->sysfs_level);
   4103	md_new_event();
   4104	rv = len;
   4105out_unlock:
   4106	mddev_unlock(mddev);
   4107	return rv;
   4108}
   4109
   4110static struct md_sysfs_entry md_level =
   4111__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
   4112
   4113static ssize_t
   4114layout_show(struct mddev *mddev, char *page)
   4115{
   4116	/* just a number, not meaningful for all levels */
   4117	if (mddev->reshape_position != MaxSector &&
   4118	    mddev->layout != mddev->new_layout)
   4119		return sprintf(page, "%d (%d)\n",
   4120			       mddev->new_layout, mddev->layout);
   4121	return sprintf(page, "%d\n", mddev->layout);
   4122}
   4123
   4124static ssize_t
   4125layout_store(struct mddev *mddev, const char *buf, size_t len)
   4126{
   4127	unsigned int n;
   4128	int err;
   4129
   4130	err = kstrtouint(buf, 10, &n);
   4131	if (err < 0)
   4132		return err;
   4133	err = mddev_lock(mddev);
   4134	if (err)
   4135		return err;
   4136
   4137	if (mddev->pers) {
   4138		if (mddev->pers->check_reshape == NULL)
   4139			err = -EBUSY;
   4140		else if (mddev->ro)
   4141			err = -EROFS;
   4142		else {
   4143			mddev->new_layout = n;
   4144			err = mddev->pers->check_reshape(mddev);
   4145			if (err)
   4146				mddev->new_layout = mddev->layout;
   4147		}
   4148	} else {
   4149		mddev->new_layout = n;
   4150		if (mddev->reshape_position == MaxSector)
   4151			mddev->layout = n;
   4152	}
   4153	mddev_unlock(mddev);
   4154	return err ?: len;
   4155}
   4156static struct md_sysfs_entry md_layout =
   4157__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
   4158
   4159static ssize_t
   4160raid_disks_show(struct mddev *mddev, char *page)
   4161{
   4162	if (mddev->raid_disks == 0)
   4163		return 0;
   4164	if (mddev->reshape_position != MaxSector &&
   4165	    mddev->delta_disks != 0)
   4166		return sprintf(page, "%d (%d)\n", mddev->raid_disks,
   4167			       mddev->raid_disks - mddev->delta_disks);
   4168	return sprintf(page, "%d\n", mddev->raid_disks);
   4169}
   4170
   4171static int update_raid_disks(struct mddev *mddev, int raid_disks);
   4172
   4173static ssize_t
   4174raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
   4175{
   4176	unsigned int n;
   4177	int err;
   4178
   4179	err = kstrtouint(buf, 10, &n);
   4180	if (err < 0)
   4181		return err;
   4182
   4183	err = mddev_lock(mddev);
   4184	if (err)
   4185		return err;
   4186	if (mddev->pers)
   4187		err = update_raid_disks(mddev, n);
   4188	else if (mddev->reshape_position != MaxSector) {
   4189		struct md_rdev *rdev;
   4190		int olddisks = mddev->raid_disks - mddev->delta_disks;
   4191
   4192		err = -EINVAL;
   4193		rdev_for_each(rdev, mddev) {
   4194			if (olddisks < n &&
   4195			    rdev->data_offset < rdev->new_data_offset)
   4196				goto out_unlock;
   4197			if (olddisks > n &&
   4198			    rdev->data_offset > rdev->new_data_offset)
   4199				goto out_unlock;
   4200		}
   4201		err = 0;
   4202		mddev->delta_disks = n - olddisks;
   4203		mddev->raid_disks = n;
   4204		mddev->reshape_backwards = (mddev->delta_disks < 0);
   4205	} else
   4206		mddev->raid_disks = n;
   4207out_unlock:
   4208	mddev_unlock(mddev);
   4209	return err ? err : len;
   4210}
   4211static struct md_sysfs_entry md_raid_disks =
   4212__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
   4213
   4214static ssize_t
   4215uuid_show(struct mddev *mddev, char *page)
   4216{
   4217	return sprintf(page, "%pU\n", mddev->uuid);
   4218}
   4219static struct md_sysfs_entry md_uuid =
   4220__ATTR(uuid, S_IRUGO, uuid_show, NULL);
   4221
   4222static ssize_t
   4223chunk_size_show(struct mddev *mddev, char *page)
   4224{
   4225	if (mddev->reshape_position != MaxSector &&
   4226	    mddev->chunk_sectors != mddev->new_chunk_sectors)
   4227		return sprintf(page, "%d (%d)\n",
   4228			       mddev->new_chunk_sectors << 9,
   4229			       mddev->chunk_sectors << 9);
   4230	return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
   4231}
   4232
   4233static ssize_t
   4234chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
   4235{
   4236	unsigned long n;
   4237	int err;
   4238
   4239	err = kstrtoul(buf, 10, &n);
   4240	if (err < 0)
   4241		return err;
   4242
   4243	err = mddev_lock(mddev);
   4244	if (err)
   4245		return err;
   4246	if (mddev->pers) {
   4247		if (mddev->pers->check_reshape == NULL)
   4248			err = -EBUSY;
   4249		else if (mddev->ro)
   4250			err = -EROFS;
   4251		else {
   4252			mddev->new_chunk_sectors = n >> 9;
   4253			err = mddev->pers->check_reshape(mddev);
   4254			if (err)
   4255				mddev->new_chunk_sectors = mddev->chunk_sectors;
   4256		}
   4257	} else {
   4258		mddev->new_chunk_sectors = n >> 9;
   4259		if (mddev->reshape_position == MaxSector)
   4260			mddev->chunk_sectors = n >> 9;
   4261	}
   4262	mddev_unlock(mddev);
   4263	return err ?: len;
   4264}
   4265static struct md_sysfs_entry md_chunk_size =
   4266__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
   4267
   4268static ssize_t
   4269resync_start_show(struct mddev *mddev, char *page)
   4270{
   4271	if (mddev->recovery_cp == MaxSector)
   4272		return sprintf(page, "none\n");
   4273	return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
   4274}
   4275
   4276static ssize_t
   4277resync_start_store(struct mddev *mddev, const char *buf, size_t len)
   4278{
   4279	unsigned long long n;
   4280	int err;
   4281
   4282	if (cmd_match(buf, "none"))
   4283		n = MaxSector;
   4284	else {
   4285		err = kstrtoull(buf, 10, &n);
   4286		if (err < 0)
   4287			return err;
   4288		if (n != (sector_t)n)
   4289			return -EINVAL;
   4290	}
   4291
   4292	err = mddev_lock(mddev);
   4293	if (err)
   4294		return err;
   4295	if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
   4296		err = -EBUSY;
   4297
   4298	if (!err) {
   4299		mddev->recovery_cp = n;
   4300		if (mddev->pers)
   4301			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
   4302	}
   4303	mddev_unlock(mddev);
   4304	return err ?: len;
   4305}
   4306static struct md_sysfs_entry md_resync_start =
   4307__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
   4308		resync_start_show, resync_start_store);
   4309
   4310/*
   4311 * The array state can be:
   4312 *
   4313 * clear
   4314 *     No devices, no size, no level
   4315 *     Equivalent to STOP_ARRAY ioctl
   4316 * inactive
   4317 *     May have some settings, but array is not active
   4318 *        all IO results in error
   4319 *     When written, doesn't tear down array, but just stops it
   4320 * suspended (not supported yet)
   4321 *     All IO requests will block. The array can be reconfigured.
   4322 *     Writing this, if accepted, will block until array is quiescent
   4323 * readonly
   4324 *     no resync can happen.  no superblocks get written.
   4325 *     write requests fail
   4326 * read-auto
   4327 *     like readonly, but behaves like 'clean' on a write request.
   4328 *
   4329 * clean - no pending writes, but otherwise active.
   4330 *     When written to inactive array, starts without resync
   4331 *     If a write request arrives then
   4332 *       if metadata is known, mark 'dirty' and switch to 'active'.
   4333 *       if not known, block and switch to write-pending
   4334 *     If written to an active array that has pending writes, then fails.
   4335 * active
   4336 *     fully active: IO and resync can be happening.
   4337 *     When written to inactive array, starts with resync
   4338 *
   4339 * write-pending
   4340 *     clean, but writes are blocked waiting for 'active' to be written.
   4341 *
   4342 * active-idle
   4343 *     like active, but no writes have been seen for a while (100msec).
   4344 *
   4345 * broken
   4346*     Array is failed. It's useful because mounted-arrays aren't stopped
   4347*     when array is failed, so this state will at least alert the user that
   4348*     something is wrong.
   4349 */
   4350enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
   4351		   write_pending, active_idle, broken, bad_word};
   4352static char *array_states[] = {
   4353	"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
   4354	"write-pending", "active-idle", "broken", NULL };
   4355
   4356static int match_word(const char *word, char **list)
   4357{
   4358	int n;
   4359	for (n=0; list[n]; n++)
   4360		if (cmd_match(word, list[n]))
   4361			break;
   4362	return n;
   4363}
   4364
   4365static ssize_t
   4366array_state_show(struct mddev *mddev, char *page)
   4367{
   4368	enum array_state st = inactive;
   4369
   4370	if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
   4371		switch(mddev->ro) {
   4372		case 1:
   4373			st = readonly;
   4374			break;
   4375		case 2:
   4376			st = read_auto;
   4377			break;
   4378		case 0:
   4379			spin_lock(&mddev->lock);
   4380			if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
   4381				st = write_pending;
   4382			else if (mddev->in_sync)
   4383				st = clean;
   4384			else if (mddev->safemode)
   4385				st = active_idle;
   4386			else
   4387				st = active;
   4388			spin_unlock(&mddev->lock);
   4389		}
   4390
   4391		if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
   4392			st = broken;
   4393	} else {
   4394		if (list_empty(&mddev->disks) &&
   4395		    mddev->raid_disks == 0 &&
   4396		    mddev->dev_sectors == 0)
   4397			st = clear;
   4398		else
   4399			st = inactive;
   4400	}
   4401	return sprintf(page, "%s\n", array_states[st]);
   4402}
   4403
   4404static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
   4405static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
   4406static int restart_array(struct mddev *mddev);
   4407
   4408static ssize_t
   4409array_state_store(struct mddev *mddev, const char *buf, size_t len)
   4410{
   4411	int err = 0;
   4412	enum array_state st = match_word(buf, array_states);
   4413
   4414	if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
   4415		/* don't take reconfig_mutex when toggling between
   4416		 * clean and active
   4417		 */
   4418		spin_lock(&mddev->lock);
   4419		if (st == active) {
   4420			restart_array(mddev);
   4421			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
   4422			md_wakeup_thread(mddev->thread);
   4423			wake_up(&mddev->sb_wait);
   4424		} else /* st == clean */ {
   4425			restart_array(mddev);
   4426			if (!set_in_sync(mddev))
   4427				err = -EBUSY;
   4428		}
   4429		if (!err)
   4430			sysfs_notify_dirent_safe(mddev->sysfs_state);
   4431		spin_unlock(&mddev->lock);
   4432		return err ?: len;
   4433	}
   4434	err = mddev_lock(mddev);
   4435	if (err)
   4436		return err;
   4437	err = -EINVAL;
   4438	switch(st) {
   4439	case bad_word:
   4440		break;
   4441	case clear:
   4442		/* stopping an active array */
   4443		err = do_md_stop(mddev, 0, NULL);
   4444		break;
   4445	case inactive:
   4446		/* stopping an active array */
   4447		if (mddev->pers)
   4448			err = do_md_stop(mddev, 2, NULL);
   4449		else
   4450			err = 0; /* already inactive */
   4451		break;
   4452	case suspended:
   4453		break; /* not supported yet */
   4454	case readonly:
   4455		if (mddev->pers)
   4456			err = md_set_readonly(mddev, NULL);
   4457		else {
   4458			mddev->ro = 1;
   4459			set_disk_ro(mddev->gendisk, 1);
   4460			err = do_md_run(mddev);
   4461		}
   4462		break;
   4463	case read_auto:
   4464		if (mddev->pers) {
   4465			if (mddev->ro == 0)
   4466				err = md_set_readonly(mddev, NULL);
   4467			else if (mddev->ro == 1)
   4468				err = restart_array(mddev);
   4469			if (err == 0) {
   4470				mddev->ro = 2;
   4471				set_disk_ro(mddev->gendisk, 0);
   4472			}
   4473		} else {
   4474			mddev->ro = 2;
   4475			err = do_md_run(mddev);
   4476		}
   4477		break;
   4478	case clean:
   4479		if (mddev->pers) {
   4480			err = restart_array(mddev);
   4481			if (err)
   4482				break;
   4483			spin_lock(&mddev->lock);
   4484			if (!set_in_sync(mddev))
   4485				err = -EBUSY;
   4486			spin_unlock(&mddev->lock);
   4487		} else
   4488			err = -EINVAL;
   4489		break;
   4490	case active:
   4491		if (mddev->pers) {
   4492			err = restart_array(mddev);
   4493			if (err)
   4494				break;
   4495			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
   4496			wake_up(&mddev->sb_wait);
   4497			err = 0;
   4498		} else {
   4499			mddev->ro = 0;
   4500			set_disk_ro(mddev->gendisk, 0);
   4501			err = do_md_run(mddev);
   4502		}
   4503		break;
   4504	case write_pending:
   4505	case active_idle:
   4506	case broken:
   4507		/* these cannot be set */
   4508		break;
   4509	}
   4510
   4511	if (!err) {
   4512		if (mddev->hold_active == UNTIL_IOCTL)
   4513			mddev->hold_active = 0;
   4514		sysfs_notify_dirent_safe(mddev->sysfs_state);
   4515	}
   4516	mddev_unlock(mddev);
   4517	return err ?: len;
   4518}
   4519static struct md_sysfs_entry md_array_state =
   4520__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
   4521
   4522static ssize_t
   4523max_corrected_read_errors_show(struct mddev *mddev, char *page) {
   4524	return sprintf(page, "%d\n",
   4525		       atomic_read(&mddev->max_corr_read_errors));
   4526}
   4527
   4528static ssize_t
   4529max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
   4530{
   4531	unsigned int n;
   4532	int rv;
   4533
   4534	rv = kstrtouint(buf, 10, &n);
   4535	if (rv < 0)
   4536		return rv;
   4537	atomic_set(&mddev->max_corr_read_errors, n);
   4538	return len;
   4539}
   4540
   4541static struct md_sysfs_entry max_corr_read_errors =
   4542__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
   4543	max_corrected_read_errors_store);
   4544
   4545static ssize_t
   4546null_show(struct mddev *mddev, char *page)
   4547{
   4548	return -EINVAL;
   4549}
   4550
   4551/* need to ensure rdev_delayed_delete() has completed */
   4552static void flush_rdev_wq(struct mddev *mddev)
   4553{
   4554	struct md_rdev *rdev;
   4555
   4556	rcu_read_lock();
   4557	rdev_for_each_rcu(rdev, mddev)
   4558		if (work_pending(&rdev->del_work)) {
   4559			flush_workqueue(md_rdev_misc_wq);
   4560			break;
   4561		}
   4562	rcu_read_unlock();
   4563}
   4564
   4565static ssize_t
   4566new_dev_store(struct mddev *mddev, const char *buf, size_t len)
   4567{
   4568	/* buf must be %d:%d\n? giving major and minor numbers */
   4569	/* The new device is added to the array.
   4570	 * If the array has a persistent superblock, we read the
   4571	 * superblock to initialise info and check validity.
   4572	 * Otherwise, only checking done is that in bind_rdev_to_array,
   4573	 * which mainly checks size.
   4574	 */
   4575	char *e;
   4576	int major = simple_strtoul(buf, &e, 10);
   4577	int minor;
   4578	dev_t dev;
   4579	struct md_rdev *rdev;
   4580	int err;
   4581
   4582	if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
   4583		return -EINVAL;
   4584	minor = simple_strtoul(e+1, &e, 10);
   4585	if (*e && *e != '\n')
   4586		return -EINVAL;
   4587	dev = MKDEV(major, minor);
   4588	if (major != MAJOR(dev) ||
   4589	    minor != MINOR(dev))
   4590		return -EOVERFLOW;
   4591
   4592	flush_rdev_wq(mddev);
   4593	err = mddev_lock(mddev);
   4594	if (err)
   4595		return err;
   4596	if (mddev->persistent) {
   4597		rdev = md_import_device(dev, mddev->major_version,
   4598					mddev->minor_version);
   4599		if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
   4600			struct md_rdev *rdev0
   4601				= list_entry(mddev->disks.next,
   4602					     struct md_rdev, same_set);
   4603			err = super_types[mddev->major_version]
   4604				.load_super(rdev, rdev0, mddev->minor_version);
   4605			if (err < 0)
   4606				goto out;
   4607		}
   4608	} else if (mddev->external)
   4609		rdev = md_import_device(dev, -2, -1);
   4610	else
   4611		rdev = md_import_device(dev, -1, -1);
   4612
   4613	if (IS_ERR(rdev)) {
   4614		mddev_unlock(mddev);
   4615		return PTR_ERR(rdev);
   4616	}
   4617	err = bind_rdev_to_array(rdev, mddev);
   4618 out:
   4619	if (err)
   4620		export_rdev(rdev);
   4621	mddev_unlock(mddev);
   4622	if (!err)
   4623		md_new_event();
   4624	return err ? err : len;
   4625}
   4626
   4627static struct md_sysfs_entry md_new_device =
   4628__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
   4629
   4630static ssize_t
   4631bitmap_store(struct mddev *mddev, const char *buf, size_t len)
   4632{
   4633	char *end;
   4634	unsigned long chunk, end_chunk;
   4635	int err;
   4636
   4637	err = mddev_lock(mddev);
   4638	if (err)
   4639		return err;
   4640	if (!mddev->bitmap)
   4641		goto out;
   4642	/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
   4643	while (*buf) {
   4644		chunk = end_chunk = simple_strtoul(buf, &end, 0);
   4645		if (buf == end) break;
   4646		if (*end == '-') { /* range */
   4647			buf = end + 1;
   4648			end_chunk = simple_strtoul(buf, &end, 0);
   4649			if (buf == end) break;
   4650		}
   4651		if (*end && !isspace(*end)) break;
   4652		md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
   4653		buf = skip_spaces(end);
   4654	}
   4655	md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
   4656out:
   4657	mddev_unlock(mddev);
   4658	return len;
   4659}
   4660
   4661static struct md_sysfs_entry md_bitmap =
   4662__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
   4663
   4664static ssize_t
   4665size_show(struct mddev *mddev, char *page)
   4666{
   4667	return sprintf(page, "%llu\n",
   4668		(unsigned long long)mddev->dev_sectors / 2);
   4669}
   4670
   4671static int update_size(struct mddev *mddev, sector_t num_sectors);
   4672
   4673static ssize_t
   4674size_store(struct mddev *mddev, const char *buf, size_t len)
   4675{
   4676	/* If array is inactive, we can reduce the component size, but
   4677	 * not increase it (except from 0).
   4678	 * If array is active, we can try an on-line resize
   4679	 */
   4680	sector_t sectors;
   4681	int err = strict_blocks_to_sectors(buf, &sectors);
   4682
   4683	if (err < 0)
   4684		return err;
   4685	err = mddev_lock(mddev);
   4686	if (err)
   4687		return err;
   4688	if (mddev->pers) {
   4689		err = update_size(mddev, sectors);
   4690		if (err == 0)
   4691			md_update_sb(mddev, 1);
   4692	} else {
   4693		if (mddev->dev_sectors == 0 ||
   4694		    mddev->dev_sectors > sectors)
   4695			mddev->dev_sectors = sectors;
   4696		else
   4697			err = -ENOSPC;
   4698	}
   4699	mddev_unlock(mddev);
   4700	return err ? err : len;
   4701}
   4702
   4703static struct md_sysfs_entry md_size =
   4704__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
   4705
   4706/* Metadata version.
   4707 * This is one of
   4708 *   'none' for arrays with no metadata (good luck...)
   4709 *   'external' for arrays with externally managed metadata,
   4710 * or N.M for internally known formats
   4711 */
   4712static ssize_t
   4713metadata_show(struct mddev *mddev, char *page)
   4714{
   4715	if (mddev->persistent)
   4716		return sprintf(page, "%d.%d\n",
   4717			       mddev->major_version, mddev->minor_version);
   4718	else if (mddev->external)
   4719		return sprintf(page, "external:%s\n", mddev->metadata_type);
   4720	else
   4721		return sprintf(page, "none\n");
   4722}
   4723
   4724static ssize_t
   4725metadata_store(struct mddev *mddev, const char *buf, size_t len)
   4726{
   4727	int major, minor;
   4728	char *e;
   4729	int err;
   4730	/* Changing the details of 'external' metadata is
   4731	 * always permitted.  Otherwise there must be
   4732	 * no devices attached to the array.
   4733	 */
   4734
   4735	err = mddev_lock(mddev);
   4736	if (err)
   4737		return err;
   4738	err = -EBUSY;
   4739	if (mddev->external && strncmp(buf, "external:", 9) == 0)
   4740		;
   4741	else if (!list_empty(&mddev->disks))
   4742		goto out_unlock;
   4743
   4744	err = 0;
   4745	if (cmd_match(buf, "none")) {
   4746		mddev->persistent = 0;
   4747		mddev->external = 0;
   4748		mddev->major_version = 0;
   4749		mddev->minor_version = 90;
   4750		goto out_unlock;
   4751	}
   4752	if (strncmp(buf, "external:", 9) == 0) {
   4753		size_t namelen = len-9;
   4754		if (namelen >= sizeof(mddev->metadata_type))
   4755			namelen = sizeof(mddev->metadata_type)-1;
   4756		strncpy(mddev->metadata_type, buf+9, namelen);
   4757		mddev->metadata_type[namelen] = 0;
   4758		if (namelen && mddev->metadata_type[namelen-1] == '\n')
   4759			mddev->metadata_type[--namelen] = 0;
   4760		mddev->persistent = 0;
   4761		mddev->external = 1;
   4762		mddev->major_version = 0;
   4763		mddev->minor_version = 90;
   4764		goto out_unlock;
   4765	}
   4766	major = simple_strtoul(buf, &e, 10);
   4767	err = -EINVAL;
   4768	if (e==buf || *e != '.')
   4769		goto out_unlock;
   4770	buf = e+1;
   4771	minor = simple_strtoul(buf, &e, 10);
   4772	if (e==buf || (*e && *e != '\n') )
   4773		goto out_unlock;
   4774	err = -ENOENT;
   4775	if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
   4776		goto out_unlock;
   4777	mddev->major_version = major;
   4778	mddev->minor_version = minor;
   4779	mddev->persistent = 1;
   4780	mddev->external = 0;
   4781	err = 0;
   4782out_unlock:
   4783	mddev_unlock(mddev);
   4784	return err ?: len;
   4785}
   4786
   4787static struct md_sysfs_entry md_metadata =
   4788__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
   4789
   4790static ssize_t
   4791action_show(struct mddev *mddev, char *page)
   4792{
   4793	char *type = "idle";
   4794	unsigned long recovery = mddev->recovery;
   4795	if (test_bit(MD_RECOVERY_FROZEN, &recovery))
   4796		type = "frozen";
   4797	else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
   4798	    (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
   4799		if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
   4800			type = "reshape";
   4801		else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
   4802			if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
   4803				type = "resync";
   4804			else if (test_bit(MD_RECOVERY_CHECK, &recovery))
   4805				type = "check";
   4806			else
   4807				type = "repair";
   4808		} else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
   4809			type = "recover";
   4810		else if (mddev->reshape_position != MaxSector)
   4811			type = "reshape";
   4812	}
   4813	return sprintf(page, "%s\n", type);
   4814}
   4815
   4816static ssize_t
   4817action_store(struct mddev *mddev, const char *page, size_t len)
   4818{
   4819	if (!mddev->pers || !mddev->pers->sync_request)
   4820		return -EINVAL;
   4821
   4822
   4823	if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
   4824		if (cmd_match(page, "frozen"))
   4825			set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   4826		else
   4827			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   4828		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
   4829		    mddev_lock(mddev) == 0) {
   4830			if (work_pending(&mddev->del_work))
   4831				flush_workqueue(md_misc_wq);
   4832			if (mddev->sync_thread) {
   4833				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
   4834				md_reap_sync_thread(mddev);
   4835			}
   4836			mddev_unlock(mddev);
   4837		}
   4838	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
   4839		return -EBUSY;
   4840	else if (cmd_match(page, "resync"))
   4841		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   4842	else if (cmd_match(page, "recover")) {
   4843		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   4844		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
   4845	} else if (cmd_match(page, "reshape")) {
   4846		int err;
   4847		if (mddev->pers->start_reshape == NULL)
   4848			return -EINVAL;
   4849		err = mddev_lock(mddev);
   4850		if (!err) {
   4851			if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
   4852				err =  -EBUSY;
   4853			else {
   4854				clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   4855				err = mddev->pers->start_reshape(mddev);
   4856			}
   4857			mddev_unlock(mddev);
   4858		}
   4859		if (err)
   4860			return err;
   4861		sysfs_notify_dirent_safe(mddev->sysfs_degraded);
   4862	} else {
   4863		if (cmd_match(page, "check"))
   4864			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
   4865		else if (!cmd_match(page, "repair"))
   4866			return -EINVAL;
   4867		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   4868		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
   4869		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
   4870	}
   4871	if (mddev->ro == 2) {
   4872		/* A write to sync_action is enough to justify
   4873		 * canceling read-auto mode
   4874		 */
   4875		mddev->ro = 0;
   4876		md_wakeup_thread(mddev->sync_thread);
   4877	}
   4878	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   4879	md_wakeup_thread(mddev->thread);
   4880	sysfs_notify_dirent_safe(mddev->sysfs_action);
   4881	return len;
   4882}
   4883
   4884static struct md_sysfs_entry md_scan_mode =
   4885__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
   4886
   4887static ssize_t
   4888last_sync_action_show(struct mddev *mddev, char *page)
   4889{
   4890	return sprintf(page, "%s\n", mddev->last_sync_action);
   4891}
   4892
   4893static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
   4894
   4895static ssize_t
   4896mismatch_cnt_show(struct mddev *mddev, char *page)
   4897{
   4898	return sprintf(page, "%llu\n",
   4899		       (unsigned long long)
   4900		       atomic64_read(&mddev->resync_mismatches));
   4901}
   4902
   4903static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
   4904
   4905static ssize_t
   4906sync_min_show(struct mddev *mddev, char *page)
   4907{
   4908	return sprintf(page, "%d (%s)\n", speed_min(mddev),
   4909		       mddev->sync_speed_min ? "local": "system");
   4910}
   4911
   4912static ssize_t
   4913sync_min_store(struct mddev *mddev, const char *buf, size_t len)
   4914{
   4915	unsigned int min;
   4916	int rv;
   4917
   4918	if (strncmp(buf, "system", 6)==0) {
   4919		min = 0;
   4920	} else {
   4921		rv = kstrtouint(buf, 10, &min);
   4922		if (rv < 0)
   4923			return rv;
   4924		if (min == 0)
   4925			return -EINVAL;
   4926	}
   4927	mddev->sync_speed_min = min;
   4928	return len;
   4929}
   4930
   4931static struct md_sysfs_entry md_sync_min =
   4932__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
   4933
   4934static ssize_t
   4935sync_max_show(struct mddev *mddev, char *page)
   4936{
   4937	return sprintf(page, "%d (%s)\n", speed_max(mddev),
   4938		       mddev->sync_speed_max ? "local": "system");
   4939}
   4940
   4941static ssize_t
   4942sync_max_store(struct mddev *mddev, const char *buf, size_t len)
   4943{
   4944	unsigned int max;
   4945	int rv;
   4946
   4947	if (strncmp(buf, "system", 6)==0) {
   4948		max = 0;
   4949	} else {
   4950		rv = kstrtouint(buf, 10, &max);
   4951		if (rv < 0)
   4952			return rv;
   4953		if (max == 0)
   4954			return -EINVAL;
   4955	}
   4956	mddev->sync_speed_max = max;
   4957	return len;
   4958}
   4959
   4960static struct md_sysfs_entry md_sync_max =
   4961__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
   4962
   4963static ssize_t
   4964degraded_show(struct mddev *mddev, char *page)
   4965{
   4966	return sprintf(page, "%d\n", mddev->degraded);
   4967}
   4968static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
   4969
   4970static ssize_t
   4971sync_force_parallel_show(struct mddev *mddev, char *page)
   4972{
   4973	return sprintf(page, "%d\n", mddev->parallel_resync);
   4974}
   4975
   4976static ssize_t
   4977sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
   4978{
   4979	long n;
   4980
   4981	if (kstrtol(buf, 10, &n))
   4982		return -EINVAL;
   4983
   4984	if (n != 0 && n != 1)
   4985		return -EINVAL;
   4986
   4987	mddev->parallel_resync = n;
   4988
   4989	if (mddev->sync_thread)
   4990		wake_up(&resync_wait);
   4991
   4992	return len;
   4993}
   4994
   4995/* force parallel resync, even with shared block devices */
   4996static struct md_sysfs_entry md_sync_force_parallel =
   4997__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
   4998       sync_force_parallel_show, sync_force_parallel_store);
   4999
   5000static ssize_t
   5001sync_speed_show(struct mddev *mddev, char *page)
   5002{
   5003	unsigned long resync, dt, db;
   5004	if (mddev->curr_resync == 0)
   5005		return sprintf(page, "none\n");
   5006	resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
   5007	dt = (jiffies - mddev->resync_mark) / HZ;
   5008	if (!dt) dt++;
   5009	db = resync - mddev->resync_mark_cnt;
   5010	return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
   5011}
   5012
   5013static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
   5014
   5015static ssize_t
   5016sync_completed_show(struct mddev *mddev, char *page)
   5017{
   5018	unsigned long long max_sectors, resync;
   5019
   5020	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
   5021		return sprintf(page, "none\n");
   5022
   5023	if (mddev->curr_resync == 1 ||
   5024	    mddev->curr_resync == 2)
   5025		return sprintf(page, "delayed\n");
   5026
   5027	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
   5028	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
   5029		max_sectors = mddev->resync_max_sectors;
   5030	else
   5031		max_sectors = mddev->dev_sectors;
   5032
   5033	resync = mddev->curr_resync_completed;
   5034	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
   5035}
   5036
   5037static struct md_sysfs_entry md_sync_completed =
   5038	__ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
   5039
   5040static ssize_t
   5041min_sync_show(struct mddev *mddev, char *page)
   5042{
   5043	return sprintf(page, "%llu\n",
   5044		       (unsigned long long)mddev->resync_min);
   5045}
   5046static ssize_t
   5047min_sync_store(struct mddev *mddev, const char *buf, size_t len)
   5048{
   5049	unsigned long long min;
   5050	int err;
   5051
   5052	if (kstrtoull(buf, 10, &min))
   5053		return -EINVAL;
   5054
   5055	spin_lock(&mddev->lock);
   5056	err = -EINVAL;
   5057	if (min > mddev->resync_max)
   5058		goto out_unlock;
   5059
   5060	err = -EBUSY;
   5061	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
   5062		goto out_unlock;
   5063
   5064	/* Round down to multiple of 4K for safety */
   5065	mddev->resync_min = round_down(min, 8);
   5066	err = 0;
   5067
   5068out_unlock:
   5069	spin_unlock(&mddev->lock);
   5070	return err ?: len;
   5071}
   5072
   5073static struct md_sysfs_entry md_min_sync =
   5074__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
   5075
   5076static ssize_t
   5077max_sync_show(struct mddev *mddev, char *page)
   5078{
   5079	if (mddev->resync_max == MaxSector)
   5080		return sprintf(page, "max\n");
   5081	else
   5082		return sprintf(page, "%llu\n",
   5083			       (unsigned long long)mddev->resync_max);
   5084}
   5085static ssize_t
   5086max_sync_store(struct mddev *mddev, const char *buf, size_t len)
   5087{
   5088	int err;
   5089	spin_lock(&mddev->lock);
   5090	if (strncmp(buf, "max", 3) == 0)
   5091		mddev->resync_max = MaxSector;
   5092	else {
   5093		unsigned long long max;
   5094		int chunk;
   5095
   5096		err = -EINVAL;
   5097		if (kstrtoull(buf, 10, &max))
   5098			goto out_unlock;
   5099		if (max < mddev->resync_min)
   5100			goto out_unlock;
   5101
   5102		err = -EBUSY;
   5103		if (max < mddev->resync_max &&
   5104		    mddev->ro == 0 &&
   5105		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
   5106			goto out_unlock;
   5107
   5108		/* Must be a multiple of chunk_size */
   5109		chunk = mddev->chunk_sectors;
   5110		if (chunk) {
   5111			sector_t temp = max;
   5112
   5113			err = -EINVAL;
   5114			if (sector_div(temp, chunk))
   5115				goto out_unlock;
   5116		}
   5117		mddev->resync_max = max;
   5118	}
   5119	wake_up(&mddev->recovery_wait);
   5120	err = 0;
   5121out_unlock:
   5122	spin_unlock(&mddev->lock);
   5123	return err ?: len;
   5124}
   5125
   5126static struct md_sysfs_entry md_max_sync =
   5127__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
   5128
   5129static ssize_t
   5130suspend_lo_show(struct mddev *mddev, char *page)
   5131{
   5132	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
   5133}
   5134
   5135static ssize_t
   5136suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
   5137{
   5138	unsigned long long new;
   5139	int err;
   5140
   5141	err = kstrtoull(buf, 10, &new);
   5142	if (err < 0)
   5143		return err;
   5144	if (new != (sector_t)new)
   5145		return -EINVAL;
   5146
   5147	err = mddev_lock(mddev);
   5148	if (err)
   5149		return err;
   5150	err = -EINVAL;
   5151	if (mddev->pers == NULL ||
   5152	    mddev->pers->quiesce == NULL)
   5153		goto unlock;
   5154	mddev_suspend(mddev);
   5155	mddev->suspend_lo = new;
   5156	mddev_resume(mddev);
   5157
   5158	err = 0;
   5159unlock:
   5160	mddev_unlock(mddev);
   5161	return err ?: len;
   5162}
   5163static struct md_sysfs_entry md_suspend_lo =
   5164__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
   5165
   5166static ssize_t
   5167suspend_hi_show(struct mddev *mddev, char *page)
   5168{
   5169	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
   5170}
   5171
   5172static ssize_t
   5173suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
   5174{
   5175	unsigned long long new;
   5176	int err;
   5177
   5178	err = kstrtoull(buf, 10, &new);
   5179	if (err < 0)
   5180		return err;
   5181	if (new != (sector_t)new)
   5182		return -EINVAL;
   5183
   5184	err = mddev_lock(mddev);
   5185	if (err)
   5186		return err;
   5187	err = -EINVAL;
   5188	if (mddev->pers == NULL)
   5189		goto unlock;
   5190
   5191	mddev_suspend(mddev);
   5192	mddev->suspend_hi = new;
   5193	mddev_resume(mddev);
   5194
   5195	err = 0;
   5196unlock:
   5197	mddev_unlock(mddev);
   5198	return err ?: len;
   5199}
   5200static struct md_sysfs_entry md_suspend_hi =
   5201__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
   5202
   5203static ssize_t
   5204reshape_position_show(struct mddev *mddev, char *page)
   5205{
   5206	if (mddev->reshape_position != MaxSector)
   5207		return sprintf(page, "%llu\n",
   5208			       (unsigned long long)mddev->reshape_position);
   5209	strcpy(page, "none\n");
   5210	return 5;
   5211}
   5212
   5213static ssize_t
   5214reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
   5215{
   5216	struct md_rdev *rdev;
   5217	unsigned long long new;
   5218	int err;
   5219
   5220	err = kstrtoull(buf, 10, &new);
   5221	if (err < 0)
   5222		return err;
   5223	if (new != (sector_t)new)
   5224		return -EINVAL;
   5225	err = mddev_lock(mddev);
   5226	if (err)
   5227		return err;
   5228	err = -EBUSY;
   5229	if (mddev->pers)
   5230		goto unlock;
   5231	mddev->reshape_position = new;
   5232	mddev->delta_disks = 0;
   5233	mddev->reshape_backwards = 0;
   5234	mddev->new_level = mddev->level;
   5235	mddev->new_layout = mddev->layout;
   5236	mddev->new_chunk_sectors = mddev->chunk_sectors;
   5237	rdev_for_each(rdev, mddev)
   5238		rdev->new_data_offset = rdev->data_offset;
   5239	err = 0;
   5240unlock:
   5241	mddev_unlock(mddev);
   5242	return err ?: len;
   5243}
   5244
   5245static struct md_sysfs_entry md_reshape_position =
   5246__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
   5247       reshape_position_store);
   5248
   5249static ssize_t
   5250reshape_direction_show(struct mddev *mddev, char *page)
   5251{
   5252	return sprintf(page, "%s\n",
   5253		       mddev->reshape_backwards ? "backwards" : "forwards");
   5254}
   5255
   5256static ssize_t
   5257reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
   5258{
   5259	int backwards = 0;
   5260	int err;
   5261
   5262	if (cmd_match(buf, "forwards"))
   5263		backwards = 0;
   5264	else if (cmd_match(buf, "backwards"))
   5265		backwards = 1;
   5266	else
   5267		return -EINVAL;
   5268	if (mddev->reshape_backwards == backwards)
   5269		return len;
   5270
   5271	err = mddev_lock(mddev);
   5272	if (err)
   5273		return err;
   5274	/* check if we are allowed to change */
   5275	if (mddev->delta_disks)
   5276		err = -EBUSY;
   5277	else if (mddev->persistent &&
   5278	    mddev->major_version == 0)
   5279		err =  -EINVAL;
   5280	else
   5281		mddev->reshape_backwards = backwards;
   5282	mddev_unlock(mddev);
   5283	return err ?: len;
   5284}
   5285
   5286static struct md_sysfs_entry md_reshape_direction =
   5287__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
   5288       reshape_direction_store);
   5289
   5290static ssize_t
   5291array_size_show(struct mddev *mddev, char *page)
   5292{
   5293	if (mddev->external_size)
   5294		return sprintf(page, "%llu\n",
   5295			       (unsigned long long)mddev->array_sectors/2);
   5296	else
   5297		return sprintf(page, "default\n");
   5298}
   5299
   5300static ssize_t
   5301array_size_store(struct mddev *mddev, const char *buf, size_t len)
   5302{
   5303	sector_t sectors;
   5304	int err;
   5305
   5306	err = mddev_lock(mddev);
   5307	if (err)
   5308		return err;
   5309
   5310	/* cluster raid doesn't support change array_sectors */
   5311	if (mddev_is_clustered(mddev)) {
   5312		mddev_unlock(mddev);
   5313		return -EINVAL;
   5314	}
   5315
   5316	if (strncmp(buf, "default", 7) == 0) {
   5317		if (mddev->pers)
   5318			sectors = mddev->pers->size(mddev, 0, 0);
   5319		else
   5320			sectors = mddev->array_sectors;
   5321
   5322		mddev->external_size = 0;
   5323	} else {
   5324		if (strict_blocks_to_sectors(buf, &sectors) < 0)
   5325			err = -EINVAL;
   5326		else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
   5327			err = -E2BIG;
   5328		else
   5329			mddev->external_size = 1;
   5330	}
   5331
   5332	if (!err) {
   5333		mddev->array_sectors = sectors;
   5334		if (mddev->pers)
   5335			set_capacity_and_notify(mddev->gendisk,
   5336						mddev->array_sectors);
   5337	}
   5338	mddev_unlock(mddev);
   5339	return err ?: len;
   5340}
   5341
   5342static struct md_sysfs_entry md_array_size =
   5343__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
   5344       array_size_store);
   5345
   5346static ssize_t
   5347consistency_policy_show(struct mddev *mddev, char *page)
   5348{
   5349	int ret;
   5350
   5351	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
   5352		ret = sprintf(page, "journal\n");
   5353	} else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
   5354		ret = sprintf(page, "ppl\n");
   5355	} else if (mddev->bitmap) {
   5356		ret = sprintf(page, "bitmap\n");
   5357	} else if (mddev->pers) {
   5358		if (mddev->pers->sync_request)
   5359			ret = sprintf(page, "resync\n");
   5360		else
   5361			ret = sprintf(page, "none\n");
   5362	} else {
   5363		ret = sprintf(page, "unknown\n");
   5364	}
   5365
   5366	return ret;
   5367}
   5368
   5369static ssize_t
   5370consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
   5371{
   5372	int err = 0;
   5373
   5374	if (mddev->pers) {
   5375		if (mddev->pers->change_consistency_policy)
   5376			err = mddev->pers->change_consistency_policy(mddev, buf);
   5377		else
   5378			err = -EBUSY;
   5379	} else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
   5380		set_bit(MD_HAS_PPL, &mddev->flags);
   5381	} else {
   5382		err = -EINVAL;
   5383	}
   5384
   5385	return err ? err : len;
   5386}
   5387
   5388static struct md_sysfs_entry md_consistency_policy =
   5389__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
   5390       consistency_policy_store);
   5391
   5392static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
   5393{
   5394	return sprintf(page, "%d\n", mddev->fail_last_dev);
   5395}
   5396
   5397/*
   5398 * Setting fail_last_dev to true to allow last device to be forcibly removed
   5399 * from RAID1/RAID10.
   5400 */
   5401static ssize_t
   5402fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
   5403{
   5404	int ret;
   5405	bool value;
   5406
   5407	ret = kstrtobool(buf, &value);
   5408	if (ret)
   5409		return ret;
   5410
   5411	if (value != mddev->fail_last_dev)
   5412		mddev->fail_last_dev = value;
   5413
   5414	return len;
   5415}
   5416static struct md_sysfs_entry md_fail_last_dev =
   5417__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
   5418       fail_last_dev_store);
   5419
   5420static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
   5421{
   5422	if (mddev->pers == NULL || (mddev->pers->level != 1))
   5423		return sprintf(page, "n/a\n");
   5424	else
   5425		return sprintf(page, "%d\n", mddev->serialize_policy);
   5426}
   5427
   5428/*
   5429 * Setting serialize_policy to true to enforce write IO is not reordered
   5430 * for raid1.
   5431 */
   5432static ssize_t
   5433serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
   5434{
   5435	int err;
   5436	bool value;
   5437
   5438	err = kstrtobool(buf, &value);
   5439	if (err)
   5440		return err;
   5441
   5442	if (value == mddev->serialize_policy)
   5443		return len;
   5444
   5445	err = mddev_lock(mddev);
   5446	if (err)
   5447		return err;
   5448	if (mddev->pers == NULL || (mddev->pers->level != 1)) {
   5449		pr_err("md: serialize_policy is only effective for raid1\n");
   5450		err = -EINVAL;
   5451		goto unlock;
   5452	}
   5453
   5454	mddev_suspend(mddev);
   5455	if (value)
   5456		mddev_create_serial_pool(mddev, NULL, true);
   5457	else
   5458		mddev_destroy_serial_pool(mddev, NULL, true);
   5459	mddev->serialize_policy = value;
   5460	mddev_resume(mddev);
   5461unlock:
   5462	mddev_unlock(mddev);
   5463	return err ?: len;
   5464}
   5465
   5466static struct md_sysfs_entry md_serialize_policy =
   5467__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
   5468       serialize_policy_store);
   5469
   5470
   5471static struct attribute *md_default_attrs[] = {
   5472	&md_level.attr,
   5473	&md_layout.attr,
   5474	&md_raid_disks.attr,
   5475	&md_uuid.attr,
   5476	&md_chunk_size.attr,
   5477	&md_size.attr,
   5478	&md_resync_start.attr,
   5479	&md_metadata.attr,
   5480	&md_new_device.attr,
   5481	&md_safe_delay.attr,
   5482	&md_array_state.attr,
   5483	&md_reshape_position.attr,
   5484	&md_reshape_direction.attr,
   5485	&md_array_size.attr,
   5486	&max_corr_read_errors.attr,
   5487	&md_consistency_policy.attr,
   5488	&md_fail_last_dev.attr,
   5489	&md_serialize_policy.attr,
   5490	NULL,
   5491};
   5492
   5493static const struct attribute_group md_default_group = {
   5494	.attrs = md_default_attrs,
   5495};
   5496
   5497static struct attribute *md_redundancy_attrs[] = {
   5498	&md_scan_mode.attr,
   5499	&md_last_scan_mode.attr,
   5500	&md_mismatches.attr,
   5501	&md_sync_min.attr,
   5502	&md_sync_max.attr,
   5503	&md_sync_speed.attr,
   5504	&md_sync_force_parallel.attr,
   5505	&md_sync_completed.attr,
   5506	&md_min_sync.attr,
   5507	&md_max_sync.attr,
   5508	&md_suspend_lo.attr,
   5509	&md_suspend_hi.attr,
   5510	&md_bitmap.attr,
   5511	&md_degraded.attr,
   5512	NULL,
   5513};
   5514static const struct attribute_group md_redundancy_group = {
   5515	.name = NULL,
   5516	.attrs = md_redundancy_attrs,
   5517};
   5518
   5519static const struct attribute_group *md_attr_groups[] = {
   5520	&md_default_group,
   5521	&md_bitmap_group,
   5522	NULL,
   5523};
   5524
   5525static ssize_t
   5526md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
   5527{
   5528	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
   5529	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
   5530	ssize_t rv;
   5531
   5532	if (!entry->show)
   5533		return -EIO;
   5534	spin_lock(&all_mddevs_lock);
   5535	if (list_empty(&mddev->all_mddevs)) {
   5536		spin_unlock(&all_mddevs_lock);
   5537		return -EBUSY;
   5538	}
   5539	mddev_get(mddev);
   5540	spin_unlock(&all_mddevs_lock);
   5541
   5542	rv = entry->show(mddev, page);
   5543	mddev_put(mddev);
   5544	return rv;
   5545}
   5546
   5547static ssize_t
   5548md_attr_store(struct kobject *kobj, struct attribute *attr,
   5549	      const char *page, size_t length)
   5550{
   5551	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
   5552	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
   5553	ssize_t rv;
   5554
   5555	if (!entry->store)
   5556		return -EIO;
   5557	if (!capable(CAP_SYS_ADMIN))
   5558		return -EACCES;
   5559	spin_lock(&all_mddevs_lock);
   5560	if (list_empty(&mddev->all_mddevs)) {
   5561		spin_unlock(&all_mddevs_lock);
   5562		return -EBUSY;
   5563	}
   5564	mddev_get(mddev);
   5565	spin_unlock(&all_mddevs_lock);
   5566	rv = entry->store(mddev, page, length);
   5567	mddev_put(mddev);
   5568	return rv;
   5569}
   5570
   5571static void md_free(struct kobject *ko)
   5572{
   5573	struct mddev *mddev = container_of(ko, struct mddev, kobj);
   5574
   5575	if (mddev->sysfs_state)
   5576		sysfs_put(mddev->sysfs_state);
   5577	if (mddev->sysfs_level)
   5578		sysfs_put(mddev->sysfs_level);
   5579
   5580	if (mddev->gendisk) {
   5581		del_gendisk(mddev->gendisk);
   5582		blk_cleanup_disk(mddev->gendisk);
   5583	}
   5584	percpu_ref_exit(&mddev->writes_pending);
   5585
   5586	bioset_exit(&mddev->bio_set);
   5587	bioset_exit(&mddev->sync_set);
   5588	kfree(mddev);
   5589}
   5590
   5591static const struct sysfs_ops md_sysfs_ops = {
   5592	.show	= md_attr_show,
   5593	.store	= md_attr_store,
   5594};
   5595static struct kobj_type md_ktype = {
   5596	.release	= md_free,
   5597	.sysfs_ops	= &md_sysfs_ops,
   5598	.default_groups	= md_attr_groups,
   5599};
   5600
   5601int mdp_major = 0;
   5602
   5603static void mddev_delayed_delete(struct work_struct *ws)
   5604{
   5605	struct mddev *mddev = container_of(ws, struct mddev, del_work);
   5606
   5607	kobject_del(&mddev->kobj);
   5608	kobject_put(&mddev->kobj);
   5609}
   5610
   5611static void no_op(struct percpu_ref *r) {}
   5612
   5613int mddev_init_writes_pending(struct mddev *mddev)
   5614{
   5615	if (mddev->writes_pending.percpu_count_ptr)
   5616		return 0;
   5617	if (percpu_ref_init(&mddev->writes_pending, no_op,
   5618			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
   5619		return -ENOMEM;
   5620	/* We want to start with the refcount at zero */
   5621	percpu_ref_put(&mddev->writes_pending);
   5622	return 0;
   5623}
   5624EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
   5625
   5626static int md_alloc(dev_t dev, char *name)
   5627{
   5628	/*
   5629	 * If dev is zero, name is the name of a device to allocate with
   5630	 * an arbitrary minor number.  It will be "md_???"
   5631	 * If dev is non-zero it must be a device number with a MAJOR of
   5632	 * MD_MAJOR or mdp_major.  In this case, if "name" is NULL, then
   5633	 * the device is being created by opening a node in /dev.
   5634	 * If "name" is not NULL, the device is being created by
   5635	 * writing to /sys/module/md_mod/parameters/new_array.
   5636	 */
   5637	static DEFINE_MUTEX(disks_mutex);
   5638	struct mddev *mddev;
   5639	struct gendisk *disk;
   5640	int partitioned;
   5641	int shift;
   5642	int unit;
   5643	int error ;
   5644
   5645	/*
   5646	 * Wait for any previous instance of this device to be completely
   5647	 * removed (mddev_delayed_delete).
   5648	 */
   5649	flush_workqueue(md_misc_wq);
   5650
   5651	mutex_lock(&disks_mutex);
   5652	mddev = mddev_alloc(dev);
   5653	if (IS_ERR(mddev)) {
   5654		mutex_unlock(&disks_mutex);
   5655		return PTR_ERR(mddev);
   5656	}
   5657
   5658	partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
   5659	shift = partitioned ? MdpMinorShift : 0;
   5660	unit = MINOR(mddev->unit) >> shift;
   5661
   5662	if (name && !dev) {
   5663		/* Need to ensure that 'name' is not a duplicate.
   5664		 */
   5665		struct mddev *mddev2;
   5666		spin_lock(&all_mddevs_lock);
   5667
   5668		list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
   5669			if (mddev2->gendisk &&
   5670			    strcmp(mddev2->gendisk->disk_name, name) == 0) {
   5671				spin_unlock(&all_mddevs_lock);
   5672				error = -EEXIST;
   5673				goto out_unlock_disks_mutex;
   5674			}
   5675		spin_unlock(&all_mddevs_lock);
   5676	}
   5677	if (name && dev)
   5678		/*
   5679		 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
   5680		 */
   5681		mddev->hold_active = UNTIL_STOP;
   5682
   5683	error = -ENOMEM;
   5684	disk = blk_alloc_disk(NUMA_NO_NODE);
   5685	if (!disk)
   5686		goto out_unlock_disks_mutex;
   5687
   5688	disk->major = MAJOR(mddev->unit);
   5689	disk->first_minor = unit << shift;
   5690	disk->minors = 1 << shift;
   5691	if (name)
   5692		strcpy(disk->disk_name, name);
   5693	else if (partitioned)
   5694		sprintf(disk->disk_name, "md_d%d", unit);
   5695	else
   5696		sprintf(disk->disk_name, "md%d", unit);
   5697	disk->fops = &md_fops;
   5698	disk->private_data = mddev;
   5699
   5700	mddev->queue = disk->queue;
   5701	blk_set_stacking_limits(&mddev->queue->limits);
   5702	blk_queue_write_cache(mddev->queue, true, true);
   5703	disk->events |= DISK_EVENT_MEDIA_CHANGE;
   5704	mddev->gendisk = disk;
   5705	error = add_disk(disk);
   5706	if (error)
   5707		goto out_cleanup_disk;
   5708
   5709	error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
   5710	if (error)
   5711		goto out_del_gendisk;
   5712
   5713	kobject_uevent(&mddev->kobj, KOBJ_ADD);
   5714	mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
   5715	mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
   5716	goto out_unlock_disks_mutex;
   5717
   5718out_del_gendisk:
   5719	del_gendisk(disk);
   5720out_cleanup_disk:
   5721	blk_cleanup_disk(disk);
   5722out_unlock_disks_mutex:
   5723	mutex_unlock(&disks_mutex);
   5724	mddev_put(mddev);
   5725	return error;
   5726}
   5727
   5728static void md_probe(dev_t dev)
   5729{
   5730	if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
   5731		return;
   5732	if (create_on_open)
   5733		md_alloc(dev, NULL);
   5734}
   5735
   5736static int add_named_array(const char *val, const struct kernel_param *kp)
   5737{
   5738	/*
   5739	 * val must be "md_*" or "mdNNN".
   5740	 * For "md_*" we allocate an array with a large free minor number, and
   5741	 * set the name to val.  val must not already be an active name.
   5742	 * For "mdNNN" we allocate an array with the minor number NNN
   5743	 * which must not already be in use.
   5744	 */
   5745	int len = strlen(val);
   5746	char buf[DISK_NAME_LEN];
   5747	unsigned long devnum;
   5748
   5749	while (len && val[len-1] == '\n')
   5750		len--;
   5751	if (len >= DISK_NAME_LEN)
   5752		return -E2BIG;
   5753	strscpy(buf, val, len+1);
   5754	if (strncmp(buf, "md_", 3) == 0)
   5755		return md_alloc(0, buf);
   5756	if (strncmp(buf, "md", 2) == 0 &&
   5757	    isdigit(buf[2]) &&
   5758	    kstrtoul(buf+2, 10, &devnum) == 0 &&
   5759	    devnum <= MINORMASK)
   5760		return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
   5761
   5762	return -EINVAL;
   5763}
   5764
   5765static void md_safemode_timeout(struct timer_list *t)
   5766{
   5767	struct mddev *mddev = from_timer(mddev, t, safemode_timer);
   5768
   5769	mddev->safemode = 1;
   5770	if (mddev->external)
   5771		sysfs_notify_dirent_safe(mddev->sysfs_state);
   5772
   5773	md_wakeup_thread(mddev->thread);
   5774}
   5775
   5776static int start_dirty_degraded;
   5777
   5778int md_run(struct mddev *mddev)
   5779{
   5780	int err;
   5781	struct md_rdev *rdev;
   5782	struct md_personality *pers;
   5783	bool nowait = true;
   5784
   5785	if (list_empty(&mddev->disks))
   5786		/* cannot run an array with no devices.. */
   5787		return -EINVAL;
   5788
   5789	if (mddev->pers)
   5790		return -EBUSY;
   5791	/* Cannot run until previous stop completes properly */
   5792	if (mddev->sysfs_active)
   5793		return -EBUSY;
   5794
   5795	/*
   5796	 * Analyze all RAID superblock(s)
   5797	 */
   5798	if (!mddev->raid_disks) {
   5799		if (!mddev->persistent)
   5800			return -EINVAL;
   5801		err = analyze_sbs(mddev);
   5802		if (err)
   5803			return -EINVAL;
   5804	}
   5805
   5806	if (mddev->level != LEVEL_NONE)
   5807		request_module("md-level-%d", mddev->level);
   5808	else if (mddev->clevel[0])
   5809		request_module("md-%s", mddev->clevel);
   5810
   5811	/*
   5812	 * Drop all container device buffers, from now on
   5813	 * the only valid external interface is through the md
   5814	 * device.
   5815	 */
   5816	mddev->has_superblocks = false;
   5817	rdev_for_each(rdev, mddev) {
   5818		if (test_bit(Faulty, &rdev->flags))
   5819			continue;
   5820		sync_blockdev(rdev->bdev);
   5821		invalidate_bdev(rdev->bdev);
   5822		if (mddev->ro != 1 && rdev_read_only(rdev)) {
   5823			mddev->ro = 1;
   5824			if (mddev->gendisk)
   5825				set_disk_ro(mddev->gendisk, 1);
   5826		}
   5827
   5828		if (rdev->sb_page)
   5829			mddev->has_superblocks = true;
   5830
   5831		/* perform some consistency tests on the device.
   5832		 * We don't want the data to overlap the metadata,
   5833		 * Internal Bitmap issues have been handled elsewhere.
   5834		 */
   5835		if (rdev->meta_bdev) {
   5836			/* Nothing to check */;
   5837		} else if (rdev->data_offset < rdev->sb_start) {
   5838			if (mddev->dev_sectors &&
   5839			    rdev->data_offset + mddev->dev_sectors
   5840			    > rdev->sb_start) {
   5841				pr_warn("md: %s: data overlaps metadata\n",
   5842					mdname(mddev));
   5843				return -EINVAL;
   5844			}
   5845		} else {
   5846			if (rdev->sb_start + rdev->sb_size/512
   5847			    > rdev->data_offset) {
   5848				pr_warn("md: %s: metadata overlaps data\n",
   5849					mdname(mddev));
   5850				return -EINVAL;
   5851			}
   5852		}
   5853		sysfs_notify_dirent_safe(rdev->sysfs_state);
   5854		nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
   5855	}
   5856
   5857	if (!bioset_initialized(&mddev->bio_set)) {
   5858		err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
   5859		if (err)
   5860			return err;
   5861	}
   5862	if (!bioset_initialized(&mddev->sync_set)) {
   5863		err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
   5864		if (err)
   5865			goto exit_bio_set;
   5866	}
   5867
   5868	spin_lock(&pers_lock);
   5869	pers = find_pers(mddev->level, mddev->clevel);
   5870	if (!pers || !try_module_get(pers->owner)) {
   5871		spin_unlock(&pers_lock);
   5872		if (mddev->level != LEVEL_NONE)
   5873			pr_warn("md: personality for level %d is not loaded!\n",
   5874				mddev->level);
   5875		else
   5876			pr_warn("md: personality for level %s is not loaded!\n",
   5877				mddev->clevel);
   5878		err = -EINVAL;
   5879		goto abort;
   5880	}
   5881	spin_unlock(&pers_lock);
   5882	if (mddev->level != pers->level) {
   5883		mddev->level = pers->level;
   5884		mddev->new_level = pers->level;
   5885	}
   5886	strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
   5887
   5888	if (mddev->reshape_position != MaxSector &&
   5889	    pers->start_reshape == NULL) {
   5890		/* This personality cannot handle reshaping... */
   5891		module_put(pers->owner);
   5892		err = -EINVAL;
   5893		goto abort;
   5894	}
   5895
   5896	if (pers->sync_request) {
   5897		/* Warn if this is a potentially silly
   5898		 * configuration.
   5899		 */
   5900		struct md_rdev *rdev2;
   5901		int warned = 0;
   5902
   5903		rdev_for_each(rdev, mddev)
   5904			rdev_for_each(rdev2, mddev) {
   5905				if (rdev < rdev2 &&
   5906				    rdev->bdev->bd_disk ==
   5907				    rdev2->bdev->bd_disk) {
   5908					pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n",
   5909						mdname(mddev),
   5910						rdev->bdev,
   5911						rdev2->bdev);
   5912					warned = 1;
   5913				}
   5914			}
   5915
   5916		if (warned)
   5917			pr_warn("True protection against single-disk failure might be compromised.\n");
   5918	}
   5919
   5920	mddev->recovery = 0;
   5921	/* may be over-ridden by personality */
   5922	mddev->resync_max_sectors = mddev->dev_sectors;
   5923
   5924	mddev->ok_start_degraded = start_dirty_degraded;
   5925
   5926	if (start_readonly && mddev->ro == 0)
   5927		mddev->ro = 2; /* read-only, but switch on first write */
   5928
   5929	err = pers->run(mddev);
   5930	if (err)
   5931		pr_warn("md: pers->run() failed ...\n");
   5932	else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
   5933		WARN_ONCE(!mddev->external_size,
   5934			  "%s: default size too small, but 'external_size' not in effect?\n",
   5935			  __func__);
   5936		pr_warn("md: invalid array_size %llu > default size %llu\n",
   5937			(unsigned long long)mddev->array_sectors / 2,
   5938			(unsigned long long)pers->size(mddev, 0, 0) / 2);
   5939		err = -EINVAL;
   5940	}
   5941	if (err == 0 && pers->sync_request &&
   5942	    (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
   5943		struct bitmap *bitmap;
   5944
   5945		bitmap = md_bitmap_create(mddev, -1);
   5946		if (IS_ERR(bitmap)) {
   5947			err = PTR_ERR(bitmap);
   5948			pr_warn("%s: failed to create bitmap (%d)\n",
   5949				mdname(mddev), err);
   5950		} else
   5951			mddev->bitmap = bitmap;
   5952
   5953	}
   5954	if (err)
   5955		goto bitmap_abort;
   5956
   5957	if (mddev->bitmap_info.max_write_behind > 0) {
   5958		bool create_pool = false;
   5959
   5960		rdev_for_each(rdev, mddev) {
   5961			if (test_bit(WriteMostly, &rdev->flags) &&
   5962			    rdev_init_serial(rdev))
   5963				create_pool = true;
   5964		}
   5965		if (create_pool && mddev->serial_info_pool == NULL) {
   5966			mddev->serial_info_pool =
   5967				mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
   5968						    sizeof(struct serial_info));
   5969			if (!mddev->serial_info_pool) {
   5970				err = -ENOMEM;
   5971				goto bitmap_abort;
   5972			}
   5973		}
   5974	}
   5975
   5976	if (mddev->queue) {
   5977		bool nonrot = true;
   5978
   5979		rdev_for_each(rdev, mddev) {
   5980			if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
   5981				nonrot = false;
   5982				break;
   5983			}
   5984		}
   5985		if (mddev->degraded)
   5986			nonrot = false;
   5987		if (nonrot)
   5988			blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
   5989		else
   5990			blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
   5991		blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
   5992
   5993		/* Set the NOWAIT flags if all underlying devices support it */
   5994		if (nowait)
   5995			blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
   5996	}
   5997	if (pers->sync_request) {
   5998		if (mddev->kobj.sd &&
   5999		    sysfs_create_group(&mddev->kobj, &md_redundancy_group))
   6000			pr_warn("md: cannot register extra attributes for %s\n",
   6001				mdname(mddev));
   6002		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
   6003		mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
   6004		mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
   6005	} else if (mddev->ro == 2) /* auto-readonly not meaningful */
   6006		mddev->ro = 0;
   6007
   6008	atomic_set(&mddev->max_corr_read_errors,
   6009		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
   6010	mddev->safemode = 0;
   6011	if (mddev_is_clustered(mddev))
   6012		mddev->safemode_delay = 0;
   6013	else
   6014		mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
   6015	mddev->in_sync = 1;
   6016	smp_wmb();
   6017	spin_lock(&mddev->lock);
   6018	mddev->pers = pers;
   6019	spin_unlock(&mddev->lock);
   6020	rdev_for_each(rdev, mddev)
   6021		if (rdev->raid_disk >= 0)
   6022			sysfs_link_rdev(mddev, rdev); /* failure here is OK */
   6023
   6024	if (mddev->degraded && !mddev->ro)
   6025		/* This ensures that recovering status is reported immediately
   6026		 * via sysfs - until a lack of spares is confirmed.
   6027		 */
   6028		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
   6029	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   6030
   6031	if (mddev->sb_flags)
   6032		md_update_sb(mddev, 0);
   6033
   6034	md_new_event();
   6035	return 0;
   6036
   6037bitmap_abort:
   6038	mddev_detach(mddev);
   6039	if (mddev->private)
   6040		pers->free(mddev, mddev->private);
   6041	mddev->private = NULL;
   6042	module_put(pers->owner);
   6043	md_bitmap_destroy(mddev);
   6044abort:
   6045	bioset_exit(&mddev->sync_set);
   6046exit_bio_set:
   6047	bioset_exit(&mddev->bio_set);
   6048	return err;
   6049}
   6050EXPORT_SYMBOL_GPL(md_run);
   6051
   6052int do_md_run(struct mddev *mddev)
   6053{
   6054	int err;
   6055
   6056	set_bit(MD_NOT_READY, &mddev->flags);
   6057	err = md_run(mddev);
   6058	if (err)
   6059		goto out;
   6060	err = md_bitmap_load(mddev);
   6061	if (err) {
   6062		md_bitmap_destroy(mddev);
   6063		goto out;
   6064	}
   6065
   6066	if (mddev_is_clustered(mddev))
   6067		md_allow_write(mddev);
   6068
   6069	/* run start up tasks that require md_thread */
   6070	md_start(mddev);
   6071
   6072	md_wakeup_thread(mddev->thread);
   6073	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
   6074
   6075	set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
   6076	clear_bit(MD_NOT_READY, &mddev->flags);
   6077	mddev->changed = 1;
   6078	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
   6079	sysfs_notify_dirent_safe(mddev->sysfs_state);
   6080	sysfs_notify_dirent_safe(mddev->sysfs_action);
   6081	sysfs_notify_dirent_safe(mddev->sysfs_degraded);
   6082out:
   6083	clear_bit(MD_NOT_READY, &mddev->flags);
   6084	return err;
   6085}
   6086
   6087int md_start(struct mddev *mddev)
   6088{
   6089	int ret = 0;
   6090
   6091	if (mddev->pers->start) {
   6092		set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
   6093		md_wakeup_thread(mddev->thread);
   6094		ret = mddev->pers->start(mddev);
   6095		clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
   6096		md_wakeup_thread(mddev->sync_thread);
   6097	}
   6098	return ret;
   6099}
   6100EXPORT_SYMBOL_GPL(md_start);
   6101
   6102static int restart_array(struct mddev *mddev)
   6103{
   6104	struct gendisk *disk = mddev->gendisk;
   6105	struct md_rdev *rdev;
   6106	bool has_journal = false;
   6107	bool has_readonly = false;
   6108
   6109	/* Complain if it has no devices */
   6110	if (list_empty(&mddev->disks))
   6111		return -ENXIO;
   6112	if (!mddev->pers)
   6113		return -EINVAL;
   6114	if (!mddev->ro)
   6115		return -EBUSY;
   6116
   6117	rcu_read_lock();
   6118	rdev_for_each_rcu(rdev, mddev) {
   6119		if (test_bit(Journal, &rdev->flags) &&
   6120		    !test_bit(Faulty, &rdev->flags))
   6121			has_journal = true;
   6122		if (rdev_read_only(rdev))
   6123			has_readonly = true;
   6124	}
   6125	rcu_read_unlock();
   6126	if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
   6127		/* Don't restart rw with journal missing/faulty */
   6128			return -EINVAL;
   6129	if (has_readonly)
   6130		return -EROFS;
   6131
   6132	mddev->safemode = 0;
   6133	mddev->ro = 0;
   6134	set_disk_ro(disk, 0);
   6135	pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
   6136	/* Kick recovery or resync if necessary */
   6137	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   6138	md_wakeup_thread(mddev->thread);
   6139	md_wakeup_thread(mddev->sync_thread);
   6140	sysfs_notify_dirent_safe(mddev->sysfs_state);
   6141	return 0;
   6142}
   6143
   6144static void md_clean(struct mddev *mddev)
   6145{
   6146	mddev->array_sectors = 0;
   6147	mddev->external_size = 0;
   6148	mddev->dev_sectors = 0;
   6149	mddev->raid_disks = 0;
   6150	mddev->recovery_cp = 0;
   6151	mddev->resync_min = 0;
   6152	mddev->resync_max = MaxSector;
   6153	mddev->reshape_position = MaxSector;
   6154	mddev->external = 0;
   6155	mddev->persistent = 0;
   6156	mddev->level = LEVEL_NONE;
   6157	mddev->clevel[0] = 0;
   6158	mddev->flags = 0;
   6159	mddev->sb_flags = 0;
   6160	mddev->ro = 0;
   6161	mddev->metadata_type[0] = 0;
   6162	mddev->chunk_sectors = 0;
   6163	mddev->ctime = mddev->utime = 0;
   6164	mddev->layout = 0;
   6165	mddev->max_disks = 0;
   6166	mddev->events = 0;
   6167	mddev->can_decrease_events = 0;
   6168	mddev->delta_disks = 0;
   6169	mddev->reshape_backwards = 0;
   6170	mddev->new_level = LEVEL_NONE;
   6171	mddev->new_layout = 0;
   6172	mddev->new_chunk_sectors = 0;
   6173	mddev->curr_resync = 0;
   6174	atomic64_set(&mddev->resync_mismatches, 0);
   6175	mddev->suspend_lo = mddev->suspend_hi = 0;
   6176	mddev->sync_speed_min = mddev->sync_speed_max = 0;
   6177	mddev->recovery = 0;
   6178	mddev->in_sync = 0;
   6179	mddev->changed = 0;
   6180	mddev->degraded = 0;
   6181	mddev->safemode = 0;
   6182	mddev->private = NULL;
   6183	mddev->cluster_info = NULL;
   6184	mddev->bitmap_info.offset = 0;
   6185	mddev->bitmap_info.default_offset = 0;
   6186	mddev->bitmap_info.default_space = 0;
   6187	mddev->bitmap_info.chunksize = 0;
   6188	mddev->bitmap_info.daemon_sleep = 0;
   6189	mddev->bitmap_info.max_write_behind = 0;
   6190	mddev->bitmap_info.nodes = 0;
   6191}
   6192
   6193static void __md_stop_writes(struct mddev *mddev)
   6194{
   6195	set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   6196	if (work_pending(&mddev->del_work))
   6197		flush_workqueue(md_misc_wq);
   6198	if (mddev->sync_thread) {
   6199		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
   6200		md_reap_sync_thread(mddev);
   6201	}
   6202
   6203	del_timer_sync(&mddev->safemode_timer);
   6204
   6205	if (mddev->pers && mddev->pers->quiesce) {
   6206		mddev->pers->quiesce(mddev, 1);
   6207		mddev->pers->quiesce(mddev, 0);
   6208	}
   6209	md_bitmap_flush(mddev);
   6210
   6211	if (mddev->ro == 0 &&
   6212	    ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
   6213	     mddev->sb_flags)) {
   6214		/* mark array as shutdown cleanly */
   6215		if (!mddev_is_clustered(mddev))
   6216			mddev->in_sync = 1;
   6217		md_update_sb(mddev, 1);
   6218	}
   6219	/* disable policy to guarantee rdevs free resources for serialization */
   6220	mddev->serialize_policy = 0;
   6221	mddev_destroy_serial_pool(mddev, NULL, true);
   6222}
   6223
   6224void md_stop_writes(struct mddev *mddev)
   6225{
   6226	mddev_lock_nointr(mddev);
   6227	__md_stop_writes(mddev);
   6228	mddev_unlock(mddev);
   6229}
   6230EXPORT_SYMBOL_GPL(md_stop_writes);
   6231
   6232static void mddev_detach(struct mddev *mddev)
   6233{
   6234	md_bitmap_wait_behind_writes(mddev);
   6235	if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
   6236		mddev->pers->quiesce(mddev, 1);
   6237		mddev->pers->quiesce(mddev, 0);
   6238	}
   6239	md_unregister_thread(&mddev->thread);
   6240	if (mddev->queue)
   6241		blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
   6242}
   6243
   6244static void __md_stop(struct mddev *mddev)
   6245{
   6246	struct md_personality *pers = mddev->pers;
   6247	md_bitmap_destroy(mddev);
   6248	mddev_detach(mddev);
   6249	/* Ensure ->event_work is done */
   6250	if (mddev->event_work.func)
   6251		flush_workqueue(md_misc_wq);
   6252	spin_lock(&mddev->lock);
   6253	mddev->pers = NULL;
   6254	spin_unlock(&mddev->lock);
   6255	if (mddev->private)
   6256		pers->free(mddev, mddev->private);
   6257	mddev->private = NULL;
   6258	if (pers->sync_request && mddev->to_remove == NULL)
   6259		mddev->to_remove = &md_redundancy_group;
   6260	module_put(pers->owner);
   6261	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   6262}
   6263
   6264void md_stop(struct mddev *mddev)
   6265{
   6266	/* stop the array and free an attached data structures.
   6267	 * This is called from dm-raid
   6268	 */
   6269	__md_stop(mddev);
   6270	bioset_exit(&mddev->bio_set);
   6271	bioset_exit(&mddev->sync_set);
   6272}
   6273
   6274EXPORT_SYMBOL_GPL(md_stop);
   6275
   6276static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
   6277{
   6278	int err = 0;
   6279	int did_freeze = 0;
   6280
   6281	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
   6282		did_freeze = 1;
   6283		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   6284		md_wakeup_thread(mddev->thread);
   6285	}
   6286	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
   6287		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
   6288	if (mddev->sync_thread)
   6289		/* Thread might be blocked waiting for metadata update
   6290		 * which will now never happen */
   6291		wake_up_process(mddev->sync_thread->tsk);
   6292
   6293	if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
   6294		return -EBUSY;
   6295	mddev_unlock(mddev);
   6296	wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
   6297					  &mddev->recovery));
   6298	wait_event(mddev->sb_wait,
   6299		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
   6300	mddev_lock_nointr(mddev);
   6301
   6302	mutex_lock(&mddev->open_mutex);
   6303	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
   6304	    mddev->sync_thread ||
   6305	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
   6306		pr_warn("md: %s still in use.\n",mdname(mddev));
   6307		if (did_freeze) {
   6308			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   6309			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   6310			md_wakeup_thread(mddev->thread);
   6311		}
   6312		err = -EBUSY;
   6313		goto out;
   6314	}
   6315	if (mddev->pers) {
   6316		__md_stop_writes(mddev);
   6317
   6318		err  = -ENXIO;
   6319		if (mddev->ro==1)
   6320			goto out;
   6321		mddev->ro = 1;
   6322		set_disk_ro(mddev->gendisk, 1);
   6323		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   6324		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   6325		md_wakeup_thread(mddev->thread);
   6326		sysfs_notify_dirent_safe(mddev->sysfs_state);
   6327		err = 0;
   6328	}
   6329out:
   6330	mutex_unlock(&mddev->open_mutex);
   6331	return err;
   6332}
   6333
   6334/* mode:
   6335 *   0 - completely stop and dis-assemble array
   6336 *   2 - stop but do not disassemble array
   6337 */
   6338static int do_md_stop(struct mddev *mddev, int mode,
   6339		      struct block_device *bdev)
   6340{
   6341	struct gendisk *disk = mddev->gendisk;
   6342	struct md_rdev *rdev;
   6343	int did_freeze = 0;
   6344
   6345	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
   6346		did_freeze = 1;
   6347		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   6348		md_wakeup_thread(mddev->thread);
   6349	}
   6350	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
   6351		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
   6352	if (mddev->sync_thread)
   6353		/* Thread might be blocked waiting for metadata update
   6354		 * which will now never happen */
   6355		wake_up_process(mddev->sync_thread->tsk);
   6356
   6357	mddev_unlock(mddev);
   6358	wait_event(resync_wait, (mddev->sync_thread == NULL &&
   6359				 !test_bit(MD_RECOVERY_RUNNING,
   6360					   &mddev->recovery)));
   6361	mddev_lock_nointr(mddev);
   6362
   6363	mutex_lock(&mddev->open_mutex);
   6364	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
   6365	    mddev->sysfs_active ||
   6366	    mddev->sync_thread ||
   6367	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
   6368		pr_warn("md: %s still in use.\n",mdname(mddev));
   6369		mutex_unlock(&mddev->open_mutex);
   6370		if (did_freeze) {
   6371			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
   6372			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   6373			md_wakeup_thread(mddev->thread);
   6374		}
   6375		return -EBUSY;
   6376	}
   6377	if (mddev->pers) {
   6378		if (mddev->ro)
   6379			set_disk_ro(disk, 0);
   6380
   6381		__md_stop_writes(mddev);
   6382		__md_stop(mddev);
   6383
   6384		/* tell userspace to handle 'inactive' */
   6385		sysfs_notify_dirent_safe(mddev->sysfs_state);
   6386
   6387		rdev_for_each(rdev, mddev)
   6388			if (rdev->raid_disk >= 0)
   6389				sysfs_unlink_rdev(mddev, rdev);
   6390
   6391		set_capacity_and_notify(disk, 0);
   6392		mutex_unlock(&mddev->open_mutex);
   6393		mddev->changed = 1;
   6394
   6395		if (mddev->ro)
   6396			mddev->ro = 0;
   6397	} else
   6398		mutex_unlock(&mddev->open_mutex);
   6399	/*
   6400	 * Free resources if final stop
   6401	 */
   6402	if (mode == 0) {
   6403		pr_info("md: %s stopped.\n", mdname(mddev));
   6404
   6405		if (mddev->bitmap_info.file) {
   6406			struct file *f = mddev->bitmap_info.file;
   6407			spin_lock(&mddev->lock);
   6408			mddev->bitmap_info.file = NULL;
   6409			spin_unlock(&mddev->lock);
   6410			fput(f);
   6411		}
   6412		mddev->bitmap_info.offset = 0;
   6413
   6414		export_array(mddev);
   6415
   6416		md_clean(mddev);
   6417		if (mddev->hold_active == UNTIL_STOP)
   6418			mddev->hold_active = 0;
   6419	}
   6420	md_new_event();
   6421	sysfs_notify_dirent_safe(mddev->sysfs_state);
   6422	return 0;
   6423}
   6424
   6425#ifndef MODULE
   6426static void autorun_array(struct mddev *mddev)
   6427{
   6428	struct md_rdev *rdev;
   6429	int err;
   6430
   6431	if (list_empty(&mddev->disks))
   6432		return;
   6433
   6434	pr_info("md: running: ");
   6435
   6436	rdev_for_each(rdev, mddev) {
   6437		pr_cont("<%pg>", rdev->bdev);
   6438	}
   6439	pr_cont("\n");
   6440
   6441	err = do_md_run(mddev);
   6442	if (err) {
   6443		pr_warn("md: do_md_run() returned %d\n", err);
   6444		do_md_stop(mddev, 0, NULL);
   6445	}
   6446}
   6447
   6448/*
   6449 * lets try to run arrays based on all disks that have arrived
   6450 * until now. (those are in pending_raid_disks)
   6451 *
   6452 * the method: pick the first pending disk, collect all disks with
   6453 * the same UUID, remove all from the pending list and put them into
   6454 * the 'same_array' list. Then order this list based on superblock
   6455 * update time (freshest comes first), kick out 'old' disks and
   6456 * compare superblocks. If everything's fine then run it.
   6457 *
   6458 * If "unit" is allocated, then bump its reference count
   6459 */
   6460static void autorun_devices(int part)
   6461{
   6462	struct md_rdev *rdev0, *rdev, *tmp;
   6463	struct mddev *mddev;
   6464
   6465	pr_info("md: autorun ...\n");
   6466	while (!list_empty(&pending_raid_disks)) {
   6467		int unit;
   6468		dev_t dev;
   6469		LIST_HEAD(candidates);
   6470		rdev0 = list_entry(pending_raid_disks.next,
   6471					 struct md_rdev, same_set);
   6472
   6473		pr_debug("md: considering %pg ...\n", rdev0->bdev);
   6474		INIT_LIST_HEAD(&candidates);
   6475		rdev_for_each_list(rdev, tmp, &pending_raid_disks)
   6476			if (super_90_load(rdev, rdev0, 0) >= 0) {
   6477				pr_debug("md:  adding %pg ...\n",
   6478					 rdev->bdev);
   6479				list_move(&rdev->same_set, &candidates);
   6480			}
   6481		/*
   6482		 * now we have a set of devices, with all of them having
   6483		 * mostly sane superblocks. It's time to allocate the
   6484		 * mddev.
   6485		 */
   6486		if (part) {
   6487			dev = MKDEV(mdp_major,
   6488				    rdev0->preferred_minor << MdpMinorShift);
   6489			unit = MINOR(dev) >> MdpMinorShift;
   6490		} else {
   6491			dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
   6492			unit = MINOR(dev);
   6493		}
   6494		if (rdev0->preferred_minor != unit) {
   6495			pr_warn("md: unit number in %pg is bad: %d\n",
   6496				rdev0->bdev, rdev0->preferred_minor);
   6497			break;
   6498		}
   6499
   6500		md_probe(dev);
   6501		mddev = mddev_find(dev);
   6502		if (!mddev)
   6503			break;
   6504
   6505		if (mddev_lock(mddev))
   6506			pr_warn("md: %s locked, cannot run\n", mdname(mddev));
   6507		else if (mddev->raid_disks || mddev->major_version
   6508			 || !list_empty(&mddev->disks)) {
   6509			pr_warn("md: %s already running, cannot run %pg\n",
   6510				mdname(mddev), rdev0->bdev);
   6511			mddev_unlock(mddev);
   6512		} else {
   6513			pr_debug("md: created %s\n", mdname(mddev));
   6514			mddev->persistent = 1;
   6515			rdev_for_each_list(rdev, tmp, &candidates) {
   6516				list_del_init(&rdev->same_set);
   6517				if (bind_rdev_to_array(rdev, mddev))
   6518					export_rdev(rdev);
   6519			}
   6520			autorun_array(mddev);
   6521			mddev_unlock(mddev);
   6522		}
   6523		/* on success, candidates will be empty, on error
   6524		 * it won't...
   6525		 */
   6526		rdev_for_each_list(rdev, tmp, &candidates) {
   6527			list_del_init(&rdev->same_set);
   6528			export_rdev(rdev);
   6529		}
   6530		mddev_put(mddev);
   6531	}
   6532	pr_info("md: ... autorun DONE.\n");
   6533}
   6534#endif /* !MODULE */
   6535
   6536static int get_version(void __user *arg)
   6537{
   6538	mdu_version_t ver;
   6539
   6540	ver.major = MD_MAJOR_VERSION;
   6541	ver.minor = MD_MINOR_VERSION;
   6542	ver.patchlevel = MD_PATCHLEVEL_VERSION;
   6543
   6544	if (copy_to_user(arg, &ver, sizeof(ver)))
   6545		return -EFAULT;
   6546
   6547	return 0;
   6548}
   6549
   6550static int get_array_info(struct mddev *mddev, void __user *arg)
   6551{
   6552	mdu_array_info_t info;
   6553	int nr,working,insync,failed,spare;
   6554	struct md_rdev *rdev;
   6555
   6556	nr = working = insync = failed = spare = 0;
   6557	rcu_read_lock();
   6558	rdev_for_each_rcu(rdev, mddev) {
   6559		nr++;
   6560		if (test_bit(Faulty, &rdev->flags))
   6561			failed++;
   6562		else {
   6563			working++;
   6564			if (test_bit(In_sync, &rdev->flags))
   6565				insync++;
   6566			else if (test_bit(Journal, &rdev->flags))
   6567				/* TODO: add journal count to md_u.h */
   6568				;
   6569			else
   6570				spare++;
   6571		}
   6572	}
   6573	rcu_read_unlock();
   6574
   6575	info.major_version = mddev->major_version;
   6576	info.minor_version = mddev->minor_version;
   6577	info.patch_version = MD_PATCHLEVEL_VERSION;
   6578	info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
   6579	info.level         = mddev->level;
   6580	info.size          = mddev->dev_sectors / 2;
   6581	if (info.size != mddev->dev_sectors / 2) /* overflow */
   6582		info.size = -1;
   6583	info.nr_disks      = nr;
   6584	info.raid_disks    = mddev->raid_disks;
   6585	info.md_minor      = mddev->md_minor;
   6586	info.not_persistent= !mddev->persistent;
   6587
   6588	info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
   6589	info.state         = 0;
   6590	if (mddev->in_sync)
   6591		info.state = (1<<MD_SB_CLEAN);
   6592	if (mddev->bitmap && mddev->bitmap_info.offset)
   6593		info.state |= (1<<MD_SB_BITMAP_PRESENT);
   6594	if (mddev_is_clustered(mddev))
   6595		info.state |= (1<<MD_SB_CLUSTERED);
   6596	info.active_disks  = insync;
   6597	info.working_disks = working;
   6598	info.failed_disks  = failed;
   6599	info.spare_disks   = spare;
   6600
   6601	info.layout        = mddev->layout;
   6602	info.chunk_size    = mddev->chunk_sectors << 9;
   6603
   6604	if (copy_to_user(arg, &info, sizeof(info)))
   6605		return -EFAULT;
   6606
   6607	return 0;
   6608}
   6609
   6610static int get_bitmap_file(struct mddev *mddev, void __user * arg)
   6611{
   6612	mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
   6613	char *ptr;
   6614	int err;
   6615
   6616	file = kzalloc(sizeof(*file), GFP_NOIO);
   6617	if (!file)
   6618		return -ENOMEM;
   6619
   6620	err = 0;
   6621	spin_lock(&mddev->lock);
   6622	/* bitmap enabled */
   6623	if (mddev->bitmap_info.file) {
   6624		ptr = file_path(mddev->bitmap_info.file, file->pathname,
   6625				sizeof(file->pathname));
   6626		if (IS_ERR(ptr))
   6627			err = PTR_ERR(ptr);
   6628		else
   6629			memmove(file->pathname, ptr,
   6630				sizeof(file->pathname)-(ptr-file->pathname));
   6631	}
   6632	spin_unlock(&mddev->lock);
   6633
   6634	if (err == 0 &&
   6635	    copy_to_user(arg, file, sizeof(*file)))
   6636		err = -EFAULT;
   6637
   6638	kfree(file);
   6639	return err;
   6640}
   6641
   6642static int get_disk_info(struct mddev *mddev, void __user * arg)
   6643{
   6644	mdu_disk_info_t info;
   6645	struct md_rdev *rdev;
   6646
   6647	if (copy_from_user(&info, arg, sizeof(info)))
   6648		return -EFAULT;
   6649
   6650	rcu_read_lock();
   6651	rdev = md_find_rdev_nr_rcu(mddev, info.number);
   6652	if (rdev) {
   6653		info.major = MAJOR(rdev->bdev->bd_dev);
   6654		info.minor = MINOR(rdev->bdev->bd_dev);
   6655		info.raid_disk = rdev->raid_disk;
   6656		info.state = 0;
   6657		if (test_bit(Faulty, &rdev->flags))
   6658			info.state |= (1<<MD_DISK_FAULTY);
   6659		else if (test_bit(In_sync, &rdev->flags)) {
   6660			info.state |= (1<<MD_DISK_ACTIVE);
   6661			info.state |= (1<<MD_DISK_SYNC);
   6662		}
   6663		if (test_bit(Journal, &rdev->flags))
   6664			info.state |= (1<<MD_DISK_JOURNAL);
   6665		if (test_bit(WriteMostly, &rdev->flags))
   6666			info.state |= (1<<MD_DISK_WRITEMOSTLY);
   6667		if (test_bit(FailFast, &rdev->flags))
   6668			info.state |= (1<<MD_DISK_FAILFAST);
   6669	} else {
   6670		info.major = info.minor = 0;
   6671		info.raid_disk = -1;
   6672		info.state = (1<<MD_DISK_REMOVED);
   6673	}
   6674	rcu_read_unlock();
   6675
   6676	if (copy_to_user(arg, &info, sizeof(info)))
   6677		return -EFAULT;
   6678
   6679	return 0;
   6680}
   6681
   6682int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
   6683{
   6684	struct md_rdev *rdev;
   6685	dev_t dev = MKDEV(info->major,info->minor);
   6686
   6687	if (mddev_is_clustered(mddev) &&
   6688		!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
   6689		pr_warn("%s: Cannot add to clustered mddev.\n",
   6690			mdname(mddev));
   6691		return -EINVAL;
   6692	}
   6693
   6694	if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
   6695		return -EOVERFLOW;
   6696
   6697	if (!mddev->raid_disks) {
   6698		int err;
   6699		/* expecting a device which has a superblock */
   6700		rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
   6701		if (IS_ERR(rdev)) {
   6702			pr_warn("md: md_import_device returned %ld\n",
   6703				PTR_ERR(rdev));
   6704			return PTR_ERR(rdev);
   6705		}
   6706		if (!list_empty(&mddev->disks)) {
   6707			struct md_rdev *rdev0
   6708				= list_entry(mddev->disks.next,
   6709					     struct md_rdev, same_set);
   6710			err = super_types[mddev->major_version]
   6711				.load_super(rdev, rdev0, mddev->minor_version);
   6712			if (err < 0) {
   6713				pr_warn("md: %pg has different UUID to %pg\n",
   6714					rdev->bdev,
   6715					rdev0->bdev);
   6716				export_rdev(rdev);
   6717				return -EINVAL;
   6718			}
   6719		}
   6720		err = bind_rdev_to_array(rdev, mddev);
   6721		if (err)
   6722			export_rdev(rdev);
   6723		return err;
   6724	}
   6725
   6726	/*
   6727	 * md_add_new_disk can be used once the array is assembled
   6728	 * to add "hot spares".  They must already have a superblock
   6729	 * written
   6730	 */
   6731	if (mddev->pers) {
   6732		int err;
   6733		if (!mddev->pers->hot_add_disk) {
   6734			pr_warn("%s: personality does not support diskops!\n",
   6735				mdname(mddev));
   6736			return -EINVAL;
   6737		}
   6738		if (mddev->persistent)
   6739			rdev = md_import_device(dev, mddev->major_version,
   6740						mddev->minor_version);
   6741		else
   6742			rdev = md_import_device(dev, -1, -1);
   6743		if (IS_ERR(rdev)) {
   6744			pr_warn("md: md_import_device returned %ld\n",
   6745				PTR_ERR(rdev));
   6746			return PTR_ERR(rdev);
   6747		}
   6748		/* set saved_raid_disk if appropriate */
   6749		if (!mddev->persistent) {
   6750			if (info->state & (1<<MD_DISK_SYNC)  &&
   6751			    info->raid_disk < mddev->raid_disks) {
   6752				rdev->raid_disk = info->raid_disk;
   6753				set_bit(In_sync, &rdev->flags);
   6754				clear_bit(Bitmap_sync, &rdev->flags);
   6755			} else
   6756				rdev->raid_disk = -1;
   6757			rdev->saved_raid_disk = rdev->raid_disk;
   6758		} else
   6759			super_types[mddev->major_version].
   6760				validate_super(mddev, rdev);
   6761		if ((info->state & (1<<MD_DISK_SYNC)) &&
   6762		     rdev->raid_disk != info->raid_disk) {
   6763			/* This was a hot-add request, but events doesn't
   6764			 * match, so reject it.
   6765			 */
   6766			export_rdev(rdev);
   6767			return -EINVAL;
   6768		}
   6769
   6770		clear_bit(In_sync, &rdev->flags); /* just to be sure */
   6771		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
   6772			set_bit(WriteMostly, &rdev->flags);
   6773		else
   6774			clear_bit(WriteMostly, &rdev->flags);
   6775		if (info->state & (1<<MD_DISK_FAILFAST))
   6776			set_bit(FailFast, &rdev->flags);
   6777		else
   6778			clear_bit(FailFast, &rdev->flags);
   6779
   6780		if (info->state & (1<<MD_DISK_JOURNAL)) {
   6781			struct md_rdev *rdev2;
   6782			bool has_journal = false;
   6783
   6784			/* make sure no existing journal disk */
   6785			rdev_for_each(rdev2, mddev) {
   6786				if (test_bit(Journal, &rdev2->flags)) {
   6787					has_journal = true;
   6788					break;
   6789				}
   6790			}
   6791			if (has_journal || mddev->bitmap) {
   6792				export_rdev(rdev);
   6793				return -EBUSY;
   6794			}
   6795			set_bit(Journal, &rdev->flags);
   6796		}
   6797		/*
   6798		 * check whether the device shows up in other nodes
   6799		 */
   6800		if (mddev_is_clustered(mddev)) {
   6801			if (info->state & (1 << MD_DISK_CANDIDATE))
   6802				set_bit(Candidate, &rdev->flags);
   6803			else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
   6804				/* --add initiated by this node */
   6805				err = md_cluster_ops->add_new_disk(mddev, rdev);
   6806				if (err) {
   6807					export_rdev(rdev);
   6808					return err;
   6809				}
   6810			}
   6811		}
   6812
   6813		rdev->raid_disk = -1;
   6814		err = bind_rdev_to_array(rdev, mddev);
   6815
   6816		if (err)
   6817			export_rdev(rdev);
   6818
   6819		if (mddev_is_clustered(mddev)) {
   6820			if (info->state & (1 << MD_DISK_CANDIDATE)) {
   6821				if (!err) {
   6822					err = md_cluster_ops->new_disk_ack(mddev,
   6823						err == 0);
   6824					if (err)
   6825						md_kick_rdev_from_array(rdev);
   6826				}
   6827			} else {
   6828				if (err)
   6829					md_cluster_ops->add_new_disk_cancel(mddev);
   6830				else
   6831					err = add_bound_rdev(rdev);
   6832			}
   6833
   6834		} else if (!err)
   6835			err = add_bound_rdev(rdev);
   6836
   6837		return err;
   6838	}
   6839
   6840	/* otherwise, md_add_new_disk is only allowed
   6841	 * for major_version==0 superblocks
   6842	 */
   6843	if (mddev->major_version != 0) {
   6844		pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
   6845		return -EINVAL;
   6846	}
   6847
   6848	if (!(info->state & (1<<MD_DISK_FAULTY))) {
   6849		int err;
   6850		rdev = md_import_device(dev, -1, 0);
   6851		if (IS_ERR(rdev)) {
   6852			pr_warn("md: error, md_import_device() returned %ld\n",
   6853				PTR_ERR(rdev));
   6854			return PTR_ERR(rdev);
   6855		}
   6856		rdev->desc_nr = info->number;
   6857		if (info->raid_disk < mddev->raid_disks)
   6858			rdev->raid_disk = info->raid_disk;
   6859		else
   6860			rdev->raid_disk = -1;
   6861
   6862		if (rdev->raid_disk < mddev->raid_disks)
   6863			if (info->state & (1<<MD_DISK_SYNC))
   6864				set_bit(In_sync, &rdev->flags);
   6865
   6866		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
   6867			set_bit(WriteMostly, &rdev->flags);
   6868		if (info->state & (1<<MD_DISK_FAILFAST))
   6869			set_bit(FailFast, &rdev->flags);
   6870
   6871		if (!mddev->persistent) {
   6872			pr_debug("md: nonpersistent superblock ...\n");
   6873			rdev->sb_start = bdev_nr_sectors(rdev->bdev);
   6874		} else
   6875			rdev->sb_start = calc_dev_sboffset(rdev);
   6876		rdev->sectors = rdev->sb_start;
   6877
   6878		err = bind_rdev_to_array(rdev, mddev);
   6879		if (err) {
   6880			export_rdev(rdev);
   6881			return err;
   6882		}
   6883	}
   6884
   6885	return 0;
   6886}
   6887
   6888static int hot_remove_disk(struct mddev *mddev, dev_t dev)
   6889{
   6890	struct md_rdev *rdev;
   6891
   6892	if (!mddev->pers)
   6893		return -ENODEV;
   6894
   6895	rdev = find_rdev(mddev, dev);
   6896	if (!rdev)
   6897		return -ENXIO;
   6898
   6899	if (rdev->raid_disk < 0)
   6900		goto kick_rdev;
   6901
   6902	clear_bit(Blocked, &rdev->flags);
   6903	remove_and_add_spares(mddev, rdev);
   6904
   6905	if (rdev->raid_disk >= 0)
   6906		goto busy;
   6907
   6908kick_rdev:
   6909	if (mddev_is_clustered(mddev)) {
   6910		if (md_cluster_ops->remove_disk(mddev, rdev))
   6911			goto busy;
   6912	}
   6913
   6914	md_kick_rdev_from_array(rdev);
   6915	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   6916	if (mddev->thread)
   6917		md_wakeup_thread(mddev->thread);
   6918	else
   6919		md_update_sb(mddev, 1);
   6920	md_new_event();
   6921
   6922	return 0;
   6923busy:
   6924	pr_debug("md: cannot remove active disk %pg from %s ...\n",
   6925		 rdev->bdev, mdname(mddev));
   6926	return -EBUSY;
   6927}
   6928
   6929static int hot_add_disk(struct mddev *mddev, dev_t dev)
   6930{
   6931	int err;
   6932	struct md_rdev *rdev;
   6933
   6934	if (!mddev->pers)
   6935		return -ENODEV;
   6936
   6937	if (mddev->major_version != 0) {
   6938		pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
   6939			mdname(mddev));
   6940		return -EINVAL;
   6941	}
   6942	if (!mddev->pers->hot_add_disk) {
   6943		pr_warn("%s: personality does not support diskops!\n",
   6944			mdname(mddev));
   6945		return -EINVAL;
   6946	}
   6947
   6948	rdev = md_import_device(dev, -1, 0);
   6949	if (IS_ERR(rdev)) {
   6950		pr_warn("md: error, md_import_device() returned %ld\n",
   6951			PTR_ERR(rdev));
   6952		return -EINVAL;
   6953	}
   6954
   6955	if (mddev->persistent)
   6956		rdev->sb_start = calc_dev_sboffset(rdev);
   6957	else
   6958		rdev->sb_start = bdev_nr_sectors(rdev->bdev);
   6959
   6960	rdev->sectors = rdev->sb_start;
   6961
   6962	if (test_bit(Faulty, &rdev->flags)) {
   6963		pr_warn("md: can not hot-add faulty %pg disk to %s!\n",
   6964			rdev->bdev, mdname(mddev));
   6965		err = -EINVAL;
   6966		goto abort_export;
   6967	}
   6968
   6969	clear_bit(In_sync, &rdev->flags);
   6970	rdev->desc_nr = -1;
   6971	rdev->saved_raid_disk = -1;
   6972	err = bind_rdev_to_array(rdev, mddev);
   6973	if (err)
   6974		goto abort_export;
   6975
   6976	/*
   6977	 * The rest should better be atomic, we can have disk failures
   6978	 * noticed in interrupt contexts ...
   6979	 */
   6980
   6981	rdev->raid_disk = -1;
   6982
   6983	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   6984	if (!mddev->thread)
   6985		md_update_sb(mddev, 1);
   6986	/*
   6987	 * If the new disk does not support REQ_NOWAIT,
   6988	 * disable on the whole MD.
   6989	 */
   6990	if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
   6991		pr_info("%s: Disabling nowait because %pg does not support nowait\n",
   6992			mdname(mddev), rdev->bdev);
   6993		blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
   6994	}
   6995	/*
   6996	 * Kick recovery, maybe this spare has to be added to the
   6997	 * array immediately.
   6998	 */
   6999	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   7000	md_wakeup_thread(mddev->thread);
   7001	md_new_event();
   7002	return 0;
   7003
   7004abort_export:
   7005	export_rdev(rdev);
   7006	return err;
   7007}
   7008
   7009static int set_bitmap_file(struct mddev *mddev, int fd)
   7010{
   7011	int err = 0;
   7012
   7013	if (mddev->pers) {
   7014		if (!mddev->pers->quiesce || !mddev->thread)
   7015			return -EBUSY;
   7016		if (mddev->recovery || mddev->sync_thread)
   7017			return -EBUSY;
   7018		/* we should be able to change the bitmap.. */
   7019	}
   7020
   7021	if (fd >= 0) {
   7022		struct inode *inode;
   7023		struct file *f;
   7024
   7025		if (mddev->bitmap || mddev->bitmap_info.file)
   7026			return -EEXIST; /* cannot add when bitmap is present */
   7027		f = fget(fd);
   7028
   7029		if (f == NULL) {
   7030			pr_warn("%s: error: failed to get bitmap file\n",
   7031				mdname(mddev));
   7032			return -EBADF;
   7033		}
   7034
   7035		inode = f->f_mapping->host;
   7036		if (!S_ISREG(inode->i_mode)) {
   7037			pr_warn("%s: error: bitmap file must be a regular file\n",
   7038				mdname(mddev));
   7039			err = -EBADF;
   7040		} else if (!(f->f_mode & FMODE_WRITE)) {
   7041			pr_warn("%s: error: bitmap file must open for write\n",
   7042				mdname(mddev));
   7043			err = -EBADF;
   7044		} else if (atomic_read(&inode->i_writecount) != 1) {
   7045			pr_warn("%s: error: bitmap file is already in use\n",
   7046				mdname(mddev));
   7047			err = -EBUSY;
   7048		}
   7049		if (err) {
   7050			fput(f);
   7051			return err;
   7052		}
   7053		mddev->bitmap_info.file = f;
   7054		mddev->bitmap_info.offset = 0; /* file overrides offset */
   7055	} else if (mddev->bitmap == NULL)
   7056		return -ENOENT; /* cannot remove what isn't there */
   7057	err = 0;
   7058	if (mddev->pers) {
   7059		if (fd >= 0) {
   7060			struct bitmap *bitmap;
   7061
   7062			bitmap = md_bitmap_create(mddev, -1);
   7063			mddev_suspend(mddev);
   7064			if (!IS_ERR(bitmap)) {
   7065				mddev->bitmap = bitmap;
   7066				err = md_bitmap_load(mddev);
   7067			} else
   7068				err = PTR_ERR(bitmap);
   7069			if (err) {
   7070				md_bitmap_destroy(mddev);
   7071				fd = -1;
   7072			}
   7073			mddev_resume(mddev);
   7074		} else if (fd < 0) {
   7075			mddev_suspend(mddev);
   7076			md_bitmap_destroy(mddev);
   7077			mddev_resume(mddev);
   7078		}
   7079	}
   7080	if (fd < 0) {
   7081		struct file *f = mddev->bitmap_info.file;
   7082		if (f) {
   7083			spin_lock(&mddev->lock);
   7084			mddev->bitmap_info.file = NULL;
   7085			spin_unlock(&mddev->lock);
   7086			fput(f);
   7087		}
   7088	}
   7089
   7090	return err;
   7091}
   7092
   7093/*
   7094 * md_set_array_info is used two different ways
   7095 * The original usage is when creating a new array.
   7096 * In this usage, raid_disks is > 0 and it together with
   7097 *  level, size, not_persistent,layout,chunksize determine the
   7098 *  shape of the array.
   7099 *  This will always create an array with a type-0.90.0 superblock.
   7100 * The newer usage is when assembling an array.
   7101 *  In this case raid_disks will be 0, and the major_version field is
   7102 *  use to determine which style super-blocks are to be found on the devices.
   7103 *  The minor and patch _version numbers are also kept incase the
   7104 *  super_block handler wishes to interpret them.
   7105 */
   7106int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
   7107{
   7108	if (info->raid_disks == 0) {
   7109		/* just setting version number for superblock loading */
   7110		if (info->major_version < 0 ||
   7111		    info->major_version >= ARRAY_SIZE(super_types) ||
   7112		    super_types[info->major_version].name == NULL) {
   7113			/* maybe try to auto-load a module? */
   7114			pr_warn("md: superblock version %d not known\n",
   7115				info->major_version);
   7116			return -EINVAL;
   7117		}
   7118		mddev->major_version = info->major_version;
   7119		mddev->minor_version = info->minor_version;
   7120		mddev->patch_version = info->patch_version;
   7121		mddev->persistent = !info->not_persistent;
   7122		/* ensure mddev_put doesn't delete this now that there
   7123		 * is some minimal configuration.
   7124		 */
   7125		mddev->ctime         = ktime_get_real_seconds();
   7126		return 0;
   7127	}
   7128	mddev->major_version = MD_MAJOR_VERSION;
   7129	mddev->minor_version = MD_MINOR_VERSION;
   7130	mddev->patch_version = MD_PATCHLEVEL_VERSION;
   7131	mddev->ctime         = ktime_get_real_seconds();
   7132
   7133	mddev->level         = info->level;
   7134	mddev->clevel[0]     = 0;
   7135	mddev->dev_sectors   = 2 * (sector_t)info->size;
   7136	mddev->raid_disks    = info->raid_disks;
   7137	/* don't set md_minor, it is determined by which /dev/md* was
   7138	 * openned
   7139	 */
   7140	if (info->state & (1<<MD_SB_CLEAN))
   7141		mddev->recovery_cp = MaxSector;
   7142	else
   7143		mddev->recovery_cp = 0;
   7144	mddev->persistent    = ! info->not_persistent;
   7145	mddev->external	     = 0;
   7146
   7147	mddev->layout        = info->layout;
   7148	if (mddev->level == 0)
   7149		/* Cannot trust RAID0 layout info here */
   7150		mddev->layout = -1;
   7151	mddev->chunk_sectors = info->chunk_size >> 9;
   7152
   7153	if (mddev->persistent) {
   7154		mddev->max_disks = MD_SB_DISKS;
   7155		mddev->flags = 0;
   7156		mddev->sb_flags = 0;
   7157	}
   7158	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   7159
   7160	mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
   7161	mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
   7162	mddev->bitmap_info.offset = 0;
   7163
   7164	mddev->reshape_position = MaxSector;
   7165
   7166	/*
   7167	 * Generate a 128 bit UUID
   7168	 */
   7169	get_random_bytes(mddev->uuid, 16);
   7170
   7171	mddev->new_level = mddev->level;
   7172	mddev->new_chunk_sectors = mddev->chunk_sectors;
   7173	mddev->new_layout = mddev->layout;
   7174	mddev->delta_disks = 0;
   7175	mddev->reshape_backwards = 0;
   7176
   7177	return 0;
   7178}
   7179
   7180void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
   7181{
   7182	lockdep_assert_held(&mddev->reconfig_mutex);
   7183
   7184	if (mddev->external_size)
   7185		return;
   7186
   7187	mddev->array_sectors = array_sectors;
   7188}
   7189EXPORT_SYMBOL(md_set_array_sectors);
   7190
   7191static int update_size(struct mddev *mddev, sector_t num_sectors)
   7192{
   7193	struct md_rdev *rdev;
   7194	int rv;
   7195	int fit = (num_sectors == 0);
   7196	sector_t old_dev_sectors = mddev->dev_sectors;
   7197
   7198	if (mddev->pers->resize == NULL)
   7199		return -EINVAL;
   7200	/* The "num_sectors" is the number of sectors of each device that
   7201	 * is used.  This can only make sense for arrays with redundancy.
   7202	 * linear and raid0 always use whatever space is available. We can only
   7203	 * consider changing this number if no resync or reconstruction is
   7204	 * happening, and if the new size is acceptable. It must fit before the
   7205	 * sb_start or, if that is <data_offset, it must fit before the size
   7206	 * of each device.  If num_sectors is zero, we find the largest size
   7207	 * that fits.
   7208	 */
   7209	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
   7210	    mddev->sync_thread)
   7211		return -EBUSY;
   7212	if (mddev->ro)
   7213		return -EROFS;
   7214
   7215	rdev_for_each(rdev, mddev) {
   7216		sector_t avail = rdev->sectors;
   7217
   7218		if (fit && (num_sectors == 0 || num_sectors > avail))
   7219			num_sectors = avail;
   7220		if (avail < num_sectors)
   7221			return -ENOSPC;
   7222	}
   7223	rv = mddev->pers->resize(mddev, num_sectors);
   7224	if (!rv) {
   7225		if (mddev_is_clustered(mddev))
   7226			md_cluster_ops->update_size(mddev, old_dev_sectors);
   7227		else if (mddev->queue) {
   7228			set_capacity_and_notify(mddev->gendisk,
   7229						mddev->array_sectors);
   7230		}
   7231	}
   7232	return rv;
   7233}
   7234
   7235static int update_raid_disks(struct mddev *mddev, int raid_disks)
   7236{
   7237	int rv;
   7238	struct md_rdev *rdev;
   7239	/* change the number of raid disks */
   7240	if (mddev->pers->check_reshape == NULL)
   7241		return -EINVAL;
   7242	if (mddev->ro)
   7243		return -EROFS;
   7244	if (raid_disks <= 0 ||
   7245	    (mddev->max_disks && raid_disks >= mddev->max_disks))
   7246		return -EINVAL;
   7247	if (mddev->sync_thread ||
   7248	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
   7249	    test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
   7250	    mddev->reshape_position != MaxSector)
   7251		return -EBUSY;
   7252
   7253	rdev_for_each(rdev, mddev) {
   7254		if (mddev->raid_disks < raid_disks &&
   7255		    rdev->data_offset < rdev->new_data_offset)
   7256			return -EINVAL;
   7257		if (mddev->raid_disks > raid_disks &&
   7258		    rdev->data_offset > rdev->new_data_offset)
   7259			return -EINVAL;
   7260	}
   7261
   7262	mddev->delta_disks = raid_disks - mddev->raid_disks;
   7263	if (mddev->delta_disks < 0)
   7264		mddev->reshape_backwards = 1;
   7265	else if (mddev->delta_disks > 0)
   7266		mddev->reshape_backwards = 0;
   7267
   7268	rv = mddev->pers->check_reshape(mddev);
   7269	if (rv < 0) {
   7270		mddev->delta_disks = 0;
   7271		mddev->reshape_backwards = 0;
   7272	}
   7273	return rv;
   7274}
   7275
   7276/*
   7277 * update_array_info is used to change the configuration of an
   7278 * on-line array.
   7279 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
   7280 * fields in the info are checked against the array.
   7281 * Any differences that cannot be handled will cause an error.
   7282 * Normally, only one change can be managed at a time.
   7283 */
   7284static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
   7285{
   7286	int rv = 0;
   7287	int cnt = 0;
   7288	int state = 0;
   7289
   7290	/* calculate expected state,ignoring low bits */
   7291	if (mddev->bitmap && mddev->bitmap_info.offset)
   7292		state |= (1 << MD_SB_BITMAP_PRESENT);
   7293
   7294	if (mddev->major_version != info->major_version ||
   7295	    mddev->minor_version != info->minor_version ||
   7296/*	    mddev->patch_version != info->patch_version || */
   7297	    mddev->ctime         != info->ctime         ||
   7298	    mddev->level         != info->level         ||
   7299/*	    mddev->layout        != info->layout        || */
   7300	    mddev->persistent	 != !info->not_persistent ||
   7301	    mddev->chunk_sectors != info->chunk_size >> 9 ||
   7302	    /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
   7303	    ((state^info->state) & 0xfffffe00)
   7304		)
   7305		return -EINVAL;
   7306	/* Check there is only one change */
   7307	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
   7308		cnt++;
   7309	if (mddev->raid_disks != info->raid_disks)
   7310		cnt++;
   7311	if (mddev->layout != info->layout)
   7312		cnt++;
   7313	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
   7314		cnt++;
   7315	if (cnt == 0)
   7316		return 0;
   7317	if (cnt > 1)
   7318		return -EINVAL;
   7319
   7320	if (mddev->layout != info->layout) {
   7321		/* Change layout
   7322		 * we don't need to do anything at the md level, the
   7323		 * personality will take care of it all.
   7324		 */
   7325		if (mddev->pers->check_reshape == NULL)
   7326			return -EINVAL;
   7327		else {
   7328			mddev->new_layout = info->layout;
   7329			rv = mddev->pers->check_reshape(mddev);
   7330			if (rv)
   7331				mddev->new_layout = mddev->layout;
   7332			return rv;
   7333		}
   7334	}
   7335	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
   7336		rv = update_size(mddev, (sector_t)info->size * 2);
   7337
   7338	if (mddev->raid_disks    != info->raid_disks)
   7339		rv = update_raid_disks(mddev, info->raid_disks);
   7340
   7341	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
   7342		if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
   7343			rv = -EINVAL;
   7344			goto err;
   7345		}
   7346		if (mddev->recovery || mddev->sync_thread) {
   7347			rv = -EBUSY;
   7348			goto err;
   7349		}
   7350		if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
   7351			struct bitmap *bitmap;
   7352			/* add the bitmap */
   7353			if (mddev->bitmap) {
   7354				rv = -EEXIST;
   7355				goto err;
   7356			}
   7357			if (mddev->bitmap_info.default_offset == 0) {
   7358				rv = -EINVAL;
   7359				goto err;
   7360			}
   7361			mddev->bitmap_info.offset =
   7362				mddev->bitmap_info.default_offset;
   7363			mddev->bitmap_info.space =
   7364				mddev->bitmap_info.default_space;
   7365			bitmap = md_bitmap_create(mddev, -1);
   7366			mddev_suspend(mddev);
   7367			if (!IS_ERR(bitmap)) {
   7368				mddev->bitmap = bitmap;
   7369				rv = md_bitmap_load(mddev);
   7370			} else
   7371				rv = PTR_ERR(bitmap);
   7372			if (rv)
   7373				md_bitmap_destroy(mddev);
   7374			mddev_resume(mddev);
   7375		} else {
   7376			/* remove the bitmap */
   7377			if (!mddev->bitmap) {
   7378				rv = -ENOENT;
   7379				goto err;
   7380			}
   7381			if (mddev->bitmap->storage.file) {
   7382				rv = -EINVAL;
   7383				goto err;
   7384			}
   7385			if (mddev->bitmap_info.nodes) {
   7386				/* hold PW on all the bitmap lock */
   7387				if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
   7388					pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
   7389					rv = -EPERM;
   7390					md_cluster_ops->unlock_all_bitmaps(mddev);
   7391					goto err;
   7392				}
   7393
   7394				mddev->bitmap_info.nodes = 0;
   7395				md_cluster_ops->leave(mddev);
   7396				module_put(md_cluster_mod);
   7397				mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
   7398			}
   7399			mddev_suspend(mddev);
   7400			md_bitmap_destroy(mddev);
   7401			mddev_resume(mddev);
   7402			mddev->bitmap_info.offset = 0;
   7403		}
   7404	}
   7405	md_update_sb(mddev, 1);
   7406	return rv;
   7407err:
   7408	return rv;
   7409}
   7410
   7411static int set_disk_faulty(struct mddev *mddev, dev_t dev)
   7412{
   7413	struct md_rdev *rdev;
   7414	int err = 0;
   7415
   7416	if (mddev->pers == NULL)
   7417		return -ENODEV;
   7418
   7419	rcu_read_lock();
   7420	rdev = md_find_rdev_rcu(mddev, dev);
   7421	if (!rdev)
   7422		err =  -ENODEV;
   7423	else {
   7424		md_error(mddev, rdev);
   7425		if (test_bit(MD_BROKEN, &mddev->flags))
   7426			err = -EBUSY;
   7427	}
   7428	rcu_read_unlock();
   7429	return err;
   7430}
   7431
   7432/*
   7433 * We have a problem here : there is no easy way to give a CHS
   7434 * virtual geometry. We currently pretend that we have a 2 heads
   7435 * 4 sectors (with a BIG number of cylinders...). This drives
   7436 * dosfs just mad... ;-)
   7437 */
   7438static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
   7439{
   7440	struct mddev *mddev = bdev->bd_disk->private_data;
   7441
   7442	geo->heads = 2;
   7443	geo->sectors = 4;
   7444	geo->cylinders = mddev->array_sectors / 8;
   7445	return 0;
   7446}
   7447
   7448static inline bool md_ioctl_valid(unsigned int cmd)
   7449{
   7450	switch (cmd) {
   7451	case ADD_NEW_DISK:
   7452	case GET_ARRAY_INFO:
   7453	case GET_BITMAP_FILE:
   7454	case GET_DISK_INFO:
   7455	case HOT_ADD_DISK:
   7456	case HOT_REMOVE_DISK:
   7457	case RAID_VERSION:
   7458	case RESTART_ARRAY_RW:
   7459	case RUN_ARRAY:
   7460	case SET_ARRAY_INFO:
   7461	case SET_BITMAP_FILE:
   7462	case SET_DISK_FAULTY:
   7463	case STOP_ARRAY:
   7464	case STOP_ARRAY_RO:
   7465	case CLUSTERED_DISK_NACK:
   7466		return true;
   7467	default:
   7468		return false;
   7469	}
   7470}
   7471
   7472static int md_ioctl(struct block_device *bdev, fmode_t mode,
   7473			unsigned int cmd, unsigned long arg)
   7474{
   7475	int err = 0;
   7476	void __user *argp = (void __user *)arg;
   7477	struct mddev *mddev = NULL;
   7478	bool did_set_md_closing = false;
   7479
   7480	if (!md_ioctl_valid(cmd))
   7481		return -ENOTTY;
   7482
   7483	switch (cmd) {
   7484	case RAID_VERSION:
   7485	case GET_ARRAY_INFO:
   7486	case GET_DISK_INFO:
   7487		break;
   7488	default:
   7489		if (!capable(CAP_SYS_ADMIN))
   7490			return -EACCES;
   7491	}
   7492
   7493	/*
   7494	 * Commands dealing with the RAID driver but not any
   7495	 * particular array:
   7496	 */
   7497	switch (cmd) {
   7498	case RAID_VERSION:
   7499		err = get_version(argp);
   7500		goto out;
   7501	default:;
   7502	}
   7503
   7504	/*
   7505	 * Commands creating/starting a new array:
   7506	 */
   7507
   7508	mddev = bdev->bd_disk->private_data;
   7509
   7510	if (!mddev) {
   7511		BUG();
   7512		goto out;
   7513	}
   7514
   7515	/* Some actions do not requires the mutex */
   7516	switch (cmd) {
   7517	case GET_ARRAY_INFO:
   7518		if (!mddev->raid_disks && !mddev->external)
   7519			err = -ENODEV;
   7520		else
   7521			err = get_array_info(mddev, argp);
   7522		goto out;
   7523
   7524	case GET_DISK_INFO:
   7525		if (!mddev->raid_disks && !mddev->external)
   7526			err = -ENODEV;
   7527		else
   7528			err = get_disk_info(mddev, argp);
   7529		goto out;
   7530
   7531	case SET_DISK_FAULTY:
   7532		err = set_disk_faulty(mddev, new_decode_dev(arg));
   7533		goto out;
   7534
   7535	case GET_BITMAP_FILE:
   7536		err = get_bitmap_file(mddev, argp);
   7537		goto out;
   7538
   7539	}
   7540
   7541	if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
   7542		flush_rdev_wq(mddev);
   7543
   7544	if (cmd == HOT_REMOVE_DISK)
   7545		/* need to ensure recovery thread has run */
   7546		wait_event_interruptible_timeout(mddev->sb_wait,
   7547						 !test_bit(MD_RECOVERY_NEEDED,
   7548							   &mddev->recovery),
   7549						 msecs_to_jiffies(5000));
   7550	if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
   7551		/* Need to flush page cache, and ensure no-one else opens
   7552		 * and writes
   7553		 */
   7554		mutex_lock(&mddev->open_mutex);
   7555		if (mddev->pers && atomic_read(&mddev->openers) > 1) {
   7556			mutex_unlock(&mddev->open_mutex);
   7557			err = -EBUSY;
   7558			goto out;
   7559		}
   7560		if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
   7561			mutex_unlock(&mddev->open_mutex);
   7562			err = -EBUSY;
   7563			goto out;
   7564		}
   7565		did_set_md_closing = true;
   7566		mutex_unlock(&mddev->open_mutex);
   7567		sync_blockdev(bdev);
   7568	}
   7569	err = mddev_lock(mddev);
   7570	if (err) {
   7571		pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
   7572			 err, cmd);
   7573		goto out;
   7574	}
   7575
   7576	if (cmd == SET_ARRAY_INFO) {
   7577		mdu_array_info_t info;
   7578		if (!arg)
   7579			memset(&info, 0, sizeof(info));
   7580		else if (copy_from_user(&info, argp, sizeof(info))) {
   7581			err = -EFAULT;
   7582			goto unlock;
   7583		}
   7584		if (mddev->pers) {
   7585			err = update_array_info(mddev, &info);
   7586			if (err) {
   7587				pr_warn("md: couldn't update array info. %d\n", err);
   7588				goto unlock;
   7589			}
   7590			goto unlock;
   7591		}
   7592		if (!list_empty(&mddev->disks)) {
   7593			pr_warn("md: array %s already has disks!\n", mdname(mddev));
   7594			err = -EBUSY;
   7595			goto unlock;
   7596		}
   7597		if (mddev->raid_disks) {
   7598			pr_warn("md: array %s already initialised!\n", mdname(mddev));
   7599			err = -EBUSY;
   7600			goto unlock;
   7601		}
   7602		err = md_set_array_info(mddev, &info);
   7603		if (err) {
   7604			pr_warn("md: couldn't set array info. %d\n", err);
   7605			goto unlock;
   7606		}
   7607		goto unlock;
   7608	}
   7609
   7610	/*
   7611	 * Commands querying/configuring an existing array:
   7612	 */
   7613	/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
   7614	 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
   7615	if ((!mddev->raid_disks && !mddev->external)
   7616	    && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
   7617	    && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
   7618	    && cmd != GET_BITMAP_FILE) {
   7619		err = -ENODEV;
   7620		goto unlock;
   7621	}
   7622
   7623	/*
   7624	 * Commands even a read-only array can execute:
   7625	 */
   7626	switch (cmd) {
   7627	case RESTART_ARRAY_RW:
   7628		err = restart_array(mddev);
   7629		goto unlock;
   7630
   7631	case STOP_ARRAY:
   7632		err = do_md_stop(mddev, 0, bdev);
   7633		goto unlock;
   7634
   7635	case STOP_ARRAY_RO:
   7636		err = md_set_readonly(mddev, bdev);
   7637		goto unlock;
   7638
   7639	case HOT_REMOVE_DISK:
   7640		err = hot_remove_disk(mddev, new_decode_dev(arg));
   7641		goto unlock;
   7642
   7643	case ADD_NEW_DISK:
   7644		/* We can support ADD_NEW_DISK on read-only arrays
   7645		 * only if we are re-adding a preexisting device.
   7646		 * So require mddev->pers and MD_DISK_SYNC.
   7647		 */
   7648		if (mddev->pers) {
   7649			mdu_disk_info_t info;
   7650			if (copy_from_user(&info, argp, sizeof(info)))
   7651				err = -EFAULT;
   7652			else if (!(info.state & (1<<MD_DISK_SYNC)))
   7653				/* Need to clear read-only for this */
   7654				break;
   7655			else
   7656				err = md_add_new_disk(mddev, &info);
   7657			goto unlock;
   7658		}
   7659		break;
   7660	}
   7661
   7662	/*
   7663	 * The remaining ioctls are changing the state of the
   7664	 * superblock, so we do not allow them on read-only arrays.
   7665	 */
   7666	if (mddev->ro && mddev->pers) {
   7667		if (mddev->ro == 2) {
   7668			mddev->ro = 0;
   7669			sysfs_notify_dirent_safe(mddev->sysfs_state);
   7670			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   7671			/* mddev_unlock will wake thread */
   7672			/* If a device failed while we were read-only, we
   7673			 * need to make sure the metadata is updated now.
   7674			 */
   7675			if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
   7676				mddev_unlock(mddev);
   7677				wait_event(mddev->sb_wait,
   7678					   !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
   7679					   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
   7680				mddev_lock_nointr(mddev);
   7681			}
   7682		} else {
   7683			err = -EROFS;
   7684			goto unlock;
   7685		}
   7686	}
   7687
   7688	switch (cmd) {
   7689	case ADD_NEW_DISK:
   7690	{
   7691		mdu_disk_info_t info;
   7692		if (copy_from_user(&info, argp, sizeof(info)))
   7693			err = -EFAULT;
   7694		else
   7695			err = md_add_new_disk(mddev, &info);
   7696		goto unlock;
   7697	}
   7698
   7699	case CLUSTERED_DISK_NACK:
   7700		if (mddev_is_clustered(mddev))
   7701			md_cluster_ops->new_disk_ack(mddev, false);
   7702		else
   7703			err = -EINVAL;
   7704		goto unlock;
   7705
   7706	case HOT_ADD_DISK:
   7707		err = hot_add_disk(mddev, new_decode_dev(arg));
   7708		goto unlock;
   7709
   7710	case RUN_ARRAY:
   7711		err = do_md_run(mddev);
   7712		goto unlock;
   7713
   7714	case SET_BITMAP_FILE:
   7715		err = set_bitmap_file(mddev, (int)arg);
   7716		goto unlock;
   7717
   7718	default:
   7719		err = -EINVAL;
   7720		goto unlock;
   7721	}
   7722
   7723unlock:
   7724	if (mddev->hold_active == UNTIL_IOCTL &&
   7725	    err != -EINVAL)
   7726		mddev->hold_active = 0;
   7727	mddev_unlock(mddev);
   7728out:
   7729	if(did_set_md_closing)
   7730		clear_bit(MD_CLOSING, &mddev->flags);
   7731	return err;
   7732}
   7733#ifdef CONFIG_COMPAT
   7734static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
   7735		    unsigned int cmd, unsigned long arg)
   7736{
   7737	switch (cmd) {
   7738	case HOT_REMOVE_DISK:
   7739	case HOT_ADD_DISK:
   7740	case SET_DISK_FAULTY:
   7741	case SET_BITMAP_FILE:
   7742		/* These take in integer arg, do not convert */
   7743		break;
   7744	default:
   7745		arg = (unsigned long)compat_ptr(arg);
   7746		break;
   7747	}
   7748
   7749	return md_ioctl(bdev, mode, cmd, arg);
   7750}
   7751#endif /* CONFIG_COMPAT */
   7752
   7753static int md_set_read_only(struct block_device *bdev, bool ro)
   7754{
   7755	struct mddev *mddev = bdev->bd_disk->private_data;
   7756	int err;
   7757
   7758	err = mddev_lock(mddev);
   7759	if (err)
   7760		return err;
   7761
   7762	if (!mddev->raid_disks && !mddev->external) {
   7763		err = -ENODEV;
   7764		goto out_unlock;
   7765	}
   7766
   7767	/*
   7768	 * Transitioning to read-auto need only happen for arrays that call
   7769	 * md_write_start and which are not ready for writes yet.
   7770	 */
   7771	if (!ro && mddev->ro == 1 && mddev->pers) {
   7772		err = restart_array(mddev);
   7773		if (err)
   7774			goto out_unlock;
   7775		mddev->ro = 2;
   7776	}
   7777
   7778out_unlock:
   7779	mddev_unlock(mddev);
   7780	return err;
   7781}
   7782
   7783static int md_open(struct block_device *bdev, fmode_t mode)
   7784{
   7785	/*
   7786	 * Succeed if we can lock the mddev, which confirms that
   7787	 * it isn't being stopped right now.
   7788	 */
   7789	struct mddev *mddev = mddev_find(bdev->bd_dev);
   7790	int err;
   7791
   7792	if (!mddev)
   7793		return -ENODEV;
   7794
   7795	if (mddev->gendisk != bdev->bd_disk) {
   7796		/* we are racing with mddev_put which is discarding this
   7797		 * bd_disk.
   7798		 */
   7799		mddev_put(mddev);
   7800		/* Wait until bdev->bd_disk is definitely gone */
   7801		if (work_pending(&mddev->del_work))
   7802			flush_workqueue(md_misc_wq);
   7803		return -EBUSY;
   7804	}
   7805	BUG_ON(mddev != bdev->bd_disk->private_data);
   7806
   7807	if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
   7808		goto out;
   7809
   7810	if (test_bit(MD_CLOSING, &mddev->flags)) {
   7811		mutex_unlock(&mddev->open_mutex);
   7812		err = -ENODEV;
   7813		goto out;
   7814	}
   7815
   7816	err = 0;
   7817	atomic_inc(&mddev->openers);
   7818	mutex_unlock(&mddev->open_mutex);
   7819
   7820	bdev_check_media_change(bdev);
   7821 out:
   7822	if (err)
   7823		mddev_put(mddev);
   7824	return err;
   7825}
   7826
   7827static void md_release(struct gendisk *disk, fmode_t mode)
   7828{
   7829	struct mddev *mddev = disk->private_data;
   7830
   7831	BUG_ON(!mddev);
   7832	atomic_dec(&mddev->openers);
   7833	mddev_put(mddev);
   7834}
   7835
   7836static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
   7837{
   7838	struct mddev *mddev = disk->private_data;
   7839	unsigned int ret = 0;
   7840
   7841	if (mddev->changed)
   7842		ret = DISK_EVENT_MEDIA_CHANGE;
   7843	mddev->changed = 0;
   7844	return ret;
   7845}
   7846
   7847const struct block_device_operations md_fops =
   7848{
   7849	.owner		= THIS_MODULE,
   7850	.submit_bio	= md_submit_bio,
   7851	.open		= md_open,
   7852	.release	= md_release,
   7853	.ioctl		= md_ioctl,
   7854#ifdef CONFIG_COMPAT
   7855	.compat_ioctl	= md_compat_ioctl,
   7856#endif
   7857	.getgeo		= md_getgeo,
   7858	.check_events	= md_check_events,
   7859	.set_read_only	= md_set_read_only,
   7860};
   7861
   7862static int md_thread(void *arg)
   7863{
   7864	struct md_thread *thread = arg;
   7865
   7866	/*
   7867	 * md_thread is a 'system-thread', it's priority should be very
   7868	 * high. We avoid resource deadlocks individually in each
   7869	 * raid personality. (RAID5 does preallocation) We also use RR and
   7870	 * the very same RT priority as kswapd, thus we will never get
   7871	 * into a priority inversion deadlock.
   7872	 *
   7873	 * we definitely have to have equal or higher priority than
   7874	 * bdflush, otherwise bdflush will deadlock if there are too
   7875	 * many dirty RAID5 blocks.
   7876	 */
   7877
   7878	allow_signal(SIGKILL);
   7879	while (!kthread_should_stop()) {
   7880
   7881		/* We need to wait INTERRUPTIBLE so that
   7882		 * we don't add to the load-average.
   7883		 * That means we need to be sure no signals are
   7884		 * pending
   7885		 */
   7886		if (signal_pending(current))
   7887			flush_signals(current);
   7888
   7889		wait_event_interruptible_timeout
   7890			(thread->wqueue,
   7891			 test_bit(THREAD_WAKEUP, &thread->flags)
   7892			 || kthread_should_stop() || kthread_should_park(),
   7893			 thread->timeout);
   7894
   7895		clear_bit(THREAD_WAKEUP, &thread->flags);
   7896		if (kthread_should_park())
   7897			kthread_parkme();
   7898		if (!kthread_should_stop())
   7899			thread->run(thread);
   7900	}
   7901
   7902	return 0;
   7903}
   7904
   7905void md_wakeup_thread(struct md_thread *thread)
   7906{
   7907	if (thread) {
   7908		pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
   7909		set_bit(THREAD_WAKEUP, &thread->flags);
   7910		wake_up(&thread->wqueue);
   7911	}
   7912}
   7913EXPORT_SYMBOL(md_wakeup_thread);
   7914
   7915struct md_thread *md_register_thread(void (*run) (struct md_thread *),
   7916		struct mddev *mddev, const char *name)
   7917{
   7918	struct md_thread *thread;
   7919
   7920	thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
   7921	if (!thread)
   7922		return NULL;
   7923
   7924	init_waitqueue_head(&thread->wqueue);
   7925
   7926	thread->run = run;
   7927	thread->mddev = mddev;
   7928	thread->timeout = MAX_SCHEDULE_TIMEOUT;
   7929	thread->tsk = kthread_run(md_thread, thread,
   7930				  "%s_%s",
   7931				  mdname(thread->mddev),
   7932				  name);
   7933	if (IS_ERR(thread->tsk)) {
   7934		kfree(thread);
   7935		return NULL;
   7936	}
   7937	return thread;
   7938}
   7939EXPORT_SYMBOL(md_register_thread);
   7940
   7941void md_unregister_thread(struct md_thread **threadp)
   7942{
   7943	struct md_thread *thread;
   7944
   7945	/*
   7946	 * Locking ensures that mddev_unlock does not wake_up a
   7947	 * non-existent thread
   7948	 */
   7949	spin_lock(&pers_lock);
   7950	thread = *threadp;
   7951	if (!thread) {
   7952		spin_unlock(&pers_lock);
   7953		return;
   7954	}
   7955	*threadp = NULL;
   7956	spin_unlock(&pers_lock);
   7957
   7958	pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
   7959	kthread_stop(thread->tsk);
   7960	kfree(thread);
   7961}
   7962EXPORT_SYMBOL(md_unregister_thread);
   7963
   7964void md_error(struct mddev *mddev, struct md_rdev *rdev)
   7965{
   7966	if (!rdev || test_bit(Faulty, &rdev->flags))
   7967		return;
   7968
   7969	if (!mddev->pers || !mddev->pers->error_handler)
   7970		return;
   7971	mddev->pers->error_handler(mddev, rdev);
   7972
   7973	if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
   7974		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
   7975	sysfs_notify_dirent_safe(rdev->sysfs_state);
   7976	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
   7977	if (!test_bit(MD_BROKEN, &mddev->flags)) {
   7978		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   7979		md_wakeup_thread(mddev->thread);
   7980	}
   7981	if (mddev->event_work.func)
   7982		queue_work(md_misc_wq, &mddev->event_work);
   7983	md_new_event();
   7984}
   7985EXPORT_SYMBOL(md_error);
   7986
   7987/* seq_file implementation /proc/mdstat */
   7988
   7989static void status_unused(struct seq_file *seq)
   7990{
   7991	int i = 0;
   7992	struct md_rdev *rdev;
   7993
   7994	seq_printf(seq, "unused devices: ");
   7995
   7996	list_for_each_entry(rdev, &pending_raid_disks, same_set) {
   7997		i++;
   7998		seq_printf(seq, "%pg ", rdev->bdev);
   7999	}
   8000	if (!i)
   8001		seq_printf(seq, "<none>");
   8002
   8003	seq_printf(seq, "\n");
   8004}
   8005
   8006static int status_resync(struct seq_file *seq, struct mddev *mddev)
   8007{
   8008	sector_t max_sectors, resync, res;
   8009	unsigned long dt, db = 0;
   8010	sector_t rt, curr_mark_cnt, resync_mark_cnt;
   8011	int scale, recovery_active;
   8012	unsigned int per_milli;
   8013
   8014	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
   8015	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
   8016		max_sectors = mddev->resync_max_sectors;
   8017	else
   8018		max_sectors = mddev->dev_sectors;
   8019
   8020	resync = mddev->curr_resync;
   8021	if (resync <= 3) {
   8022		if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
   8023			/* Still cleaning up */
   8024			resync = max_sectors;
   8025	} else if (resync > max_sectors)
   8026		resync = max_sectors;
   8027	else
   8028		resync -= atomic_read(&mddev->recovery_active);
   8029
   8030	if (resync == 0) {
   8031		if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
   8032			struct md_rdev *rdev;
   8033
   8034			rdev_for_each(rdev, mddev)
   8035				if (rdev->raid_disk >= 0 &&
   8036				    !test_bit(Faulty, &rdev->flags) &&
   8037				    rdev->recovery_offset != MaxSector &&
   8038				    rdev->recovery_offset) {
   8039					seq_printf(seq, "\trecover=REMOTE");
   8040					return 1;
   8041				}
   8042			if (mddev->reshape_position != MaxSector)
   8043				seq_printf(seq, "\treshape=REMOTE");
   8044			else
   8045				seq_printf(seq, "\tresync=REMOTE");
   8046			return 1;
   8047		}
   8048		if (mddev->recovery_cp < MaxSector) {
   8049			seq_printf(seq, "\tresync=PENDING");
   8050			return 1;
   8051		}
   8052		return 0;
   8053	}
   8054	if (resync < 3) {
   8055		seq_printf(seq, "\tresync=DELAYED");
   8056		return 1;
   8057	}
   8058
   8059	WARN_ON(max_sectors == 0);
   8060	/* Pick 'scale' such that (resync>>scale)*1000 will fit
   8061	 * in a sector_t, and (max_sectors>>scale) will fit in a
   8062	 * u32, as those are the requirements for sector_div.
   8063	 * Thus 'scale' must be at least 10
   8064	 */
   8065	scale = 10;
   8066	if (sizeof(sector_t) > sizeof(unsigned long)) {
   8067		while ( max_sectors/2 > (1ULL<<(scale+32)))
   8068			scale++;
   8069	}
   8070	res = (resync>>scale)*1000;
   8071	sector_div(res, (u32)((max_sectors>>scale)+1));
   8072
   8073	per_milli = res;
   8074	{
   8075		int i, x = per_milli/50, y = 20-x;
   8076		seq_printf(seq, "[");
   8077		for (i = 0; i < x; i++)
   8078			seq_printf(seq, "=");
   8079		seq_printf(seq, ">");
   8080		for (i = 0; i < y; i++)
   8081			seq_printf(seq, ".");
   8082		seq_printf(seq, "] ");
   8083	}
   8084	seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
   8085		   (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
   8086		    "reshape" :
   8087		    (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
   8088		     "check" :
   8089		     (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
   8090		      "resync" : "recovery"))),
   8091		   per_milli/10, per_milli % 10,
   8092		   (unsigned long long) resync/2,
   8093		   (unsigned long long) max_sectors/2);
   8094
   8095	/*
   8096	 * dt: time from mark until now
   8097	 * db: blocks written from mark until now
   8098	 * rt: remaining time
   8099	 *
   8100	 * rt is a sector_t, which is always 64bit now. We are keeping
   8101	 * the original algorithm, but it is not really necessary.
   8102	 *
   8103	 * Original algorithm:
   8104	 *   So we divide before multiply in case it is 32bit and close
   8105	 *   to the limit.
   8106	 *   We scale the divisor (db) by 32 to avoid losing precision
   8107	 *   near the end of resync when the number of remaining sectors
   8108	 *   is close to 'db'.
   8109	 *   We then divide rt by 32 after multiplying by db to compensate.
   8110	 *   The '+1' avoids division by zero if db is very small.
   8111	 */
   8112	dt = ((jiffies - mddev->resync_mark) / HZ);
   8113	if (!dt) dt++;
   8114
   8115	curr_mark_cnt = mddev->curr_mark_cnt;
   8116	recovery_active = atomic_read(&mddev->recovery_active);
   8117	resync_mark_cnt = mddev->resync_mark_cnt;
   8118
   8119	if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
   8120		db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
   8121
   8122	rt = max_sectors - resync;    /* number of remaining sectors */
   8123	rt = div64_u64(rt, db/32+1);
   8124	rt *= dt;
   8125	rt >>= 5;
   8126
   8127	seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
   8128		   ((unsigned long)rt % 60)/6);
   8129
   8130	seq_printf(seq, " speed=%ldK/sec", db/2/dt);
   8131	return 1;
   8132}
   8133
   8134static void *md_seq_start(struct seq_file *seq, loff_t *pos)
   8135{
   8136	struct list_head *tmp;
   8137	loff_t l = *pos;
   8138	struct mddev *mddev;
   8139
   8140	if (l == 0x10000) {
   8141		++*pos;
   8142		return (void *)2;
   8143	}
   8144	if (l > 0x10000)
   8145		return NULL;
   8146	if (!l--)
   8147		/* header */
   8148		return (void*)1;
   8149
   8150	spin_lock(&all_mddevs_lock);
   8151	list_for_each(tmp,&all_mddevs)
   8152		if (!l--) {
   8153			mddev = list_entry(tmp, struct mddev, all_mddevs);
   8154			mddev_get(mddev);
   8155			spin_unlock(&all_mddevs_lock);
   8156			return mddev;
   8157		}
   8158	spin_unlock(&all_mddevs_lock);
   8159	if (!l--)
   8160		return (void*)2;/* tail */
   8161	return NULL;
   8162}
   8163
   8164static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
   8165{
   8166	struct list_head *tmp;
   8167	struct mddev *next_mddev, *mddev = v;
   8168
   8169	++*pos;
   8170	if (v == (void*)2)
   8171		return NULL;
   8172
   8173	spin_lock(&all_mddevs_lock);
   8174	if (v == (void*)1)
   8175		tmp = all_mddevs.next;
   8176	else
   8177		tmp = mddev->all_mddevs.next;
   8178	if (tmp != &all_mddevs)
   8179		next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
   8180	else {
   8181		next_mddev = (void*)2;
   8182		*pos = 0x10000;
   8183	}
   8184	spin_unlock(&all_mddevs_lock);
   8185
   8186	if (v != (void*)1)
   8187		mddev_put(mddev);
   8188	return next_mddev;
   8189
   8190}
   8191
   8192static void md_seq_stop(struct seq_file *seq, void *v)
   8193{
   8194	struct mddev *mddev = v;
   8195
   8196	if (mddev && v != (void*)1 && v != (void*)2)
   8197		mddev_put(mddev);
   8198}
   8199
   8200static int md_seq_show(struct seq_file *seq, void *v)
   8201{
   8202	struct mddev *mddev = v;
   8203	sector_t sectors;
   8204	struct md_rdev *rdev;
   8205
   8206	if (v == (void*)1) {
   8207		struct md_personality *pers;
   8208		seq_printf(seq, "Personalities : ");
   8209		spin_lock(&pers_lock);
   8210		list_for_each_entry(pers, &pers_list, list)
   8211			seq_printf(seq, "[%s] ", pers->name);
   8212
   8213		spin_unlock(&pers_lock);
   8214		seq_printf(seq, "\n");
   8215		seq->poll_event = atomic_read(&md_event_count);
   8216		return 0;
   8217	}
   8218	if (v == (void*)2) {
   8219		status_unused(seq);
   8220		return 0;
   8221	}
   8222
   8223	spin_lock(&mddev->lock);
   8224	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
   8225		seq_printf(seq, "%s : %sactive", mdname(mddev),
   8226						mddev->pers ? "" : "in");
   8227		if (mddev->pers) {
   8228			if (mddev->ro==1)
   8229				seq_printf(seq, " (read-only)");
   8230			if (mddev->ro==2)
   8231				seq_printf(seq, " (auto-read-only)");
   8232			seq_printf(seq, " %s", mddev->pers->name);
   8233		}
   8234
   8235		sectors = 0;
   8236		rcu_read_lock();
   8237		rdev_for_each_rcu(rdev, mddev) {
   8238			seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
   8239
   8240			if (test_bit(WriteMostly, &rdev->flags))
   8241				seq_printf(seq, "(W)");
   8242			if (test_bit(Journal, &rdev->flags))
   8243				seq_printf(seq, "(J)");
   8244			if (test_bit(Faulty, &rdev->flags)) {
   8245				seq_printf(seq, "(F)");
   8246				continue;
   8247			}
   8248			if (rdev->raid_disk < 0)
   8249				seq_printf(seq, "(S)"); /* spare */
   8250			if (test_bit(Replacement, &rdev->flags))
   8251				seq_printf(seq, "(R)");
   8252			sectors += rdev->sectors;
   8253		}
   8254		rcu_read_unlock();
   8255
   8256		if (!list_empty(&mddev->disks)) {
   8257			if (mddev->pers)
   8258				seq_printf(seq, "\n      %llu blocks",
   8259					   (unsigned long long)
   8260					   mddev->array_sectors / 2);
   8261			else
   8262				seq_printf(seq, "\n      %llu blocks",
   8263					   (unsigned long long)sectors / 2);
   8264		}
   8265		if (mddev->persistent) {
   8266			if (mddev->major_version != 0 ||
   8267			    mddev->minor_version != 90) {
   8268				seq_printf(seq," super %d.%d",
   8269					   mddev->major_version,
   8270					   mddev->minor_version);
   8271			}
   8272		} else if (mddev->external)
   8273			seq_printf(seq, " super external:%s",
   8274				   mddev->metadata_type);
   8275		else
   8276			seq_printf(seq, " super non-persistent");
   8277
   8278		if (mddev->pers) {
   8279			mddev->pers->status(seq, mddev);
   8280			seq_printf(seq, "\n      ");
   8281			if (mddev->pers->sync_request) {
   8282				if (status_resync(seq, mddev))
   8283					seq_printf(seq, "\n      ");
   8284			}
   8285		} else
   8286			seq_printf(seq, "\n       ");
   8287
   8288		md_bitmap_status(seq, mddev->bitmap);
   8289
   8290		seq_printf(seq, "\n");
   8291	}
   8292	spin_unlock(&mddev->lock);
   8293
   8294	return 0;
   8295}
   8296
   8297static const struct seq_operations md_seq_ops = {
   8298	.start  = md_seq_start,
   8299	.next   = md_seq_next,
   8300	.stop   = md_seq_stop,
   8301	.show   = md_seq_show,
   8302};
   8303
   8304static int md_seq_open(struct inode *inode, struct file *file)
   8305{
   8306	struct seq_file *seq;
   8307	int error;
   8308
   8309	error = seq_open(file, &md_seq_ops);
   8310	if (error)
   8311		return error;
   8312
   8313	seq = file->private_data;
   8314	seq->poll_event = atomic_read(&md_event_count);
   8315	return error;
   8316}
   8317
   8318static int md_unloading;
   8319static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
   8320{
   8321	struct seq_file *seq = filp->private_data;
   8322	__poll_t mask;
   8323
   8324	if (md_unloading)
   8325		return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
   8326	poll_wait(filp, &md_event_waiters, wait);
   8327
   8328	/* always allow read */
   8329	mask = EPOLLIN | EPOLLRDNORM;
   8330
   8331	if (seq->poll_event != atomic_read(&md_event_count))
   8332		mask |= EPOLLERR | EPOLLPRI;
   8333	return mask;
   8334}
   8335
   8336static const struct proc_ops mdstat_proc_ops = {
   8337	.proc_open	= md_seq_open,
   8338	.proc_read	= seq_read,
   8339	.proc_lseek	= seq_lseek,
   8340	.proc_release	= seq_release,
   8341	.proc_poll	= mdstat_poll,
   8342};
   8343
   8344int register_md_personality(struct md_personality *p)
   8345{
   8346	pr_debug("md: %s personality registered for level %d\n",
   8347		 p->name, p->level);
   8348	spin_lock(&pers_lock);
   8349	list_add_tail(&p->list, &pers_list);
   8350	spin_unlock(&pers_lock);
   8351	return 0;
   8352}
   8353EXPORT_SYMBOL(register_md_personality);
   8354
   8355int unregister_md_personality(struct md_personality *p)
   8356{
   8357	pr_debug("md: %s personality unregistered\n", p->name);
   8358	spin_lock(&pers_lock);
   8359	list_del_init(&p->list);
   8360	spin_unlock(&pers_lock);
   8361	return 0;
   8362}
   8363EXPORT_SYMBOL(unregister_md_personality);
   8364
   8365int register_md_cluster_operations(struct md_cluster_operations *ops,
   8366				   struct module *module)
   8367{
   8368	int ret = 0;
   8369	spin_lock(&pers_lock);
   8370	if (md_cluster_ops != NULL)
   8371		ret = -EALREADY;
   8372	else {
   8373		md_cluster_ops = ops;
   8374		md_cluster_mod = module;
   8375	}
   8376	spin_unlock(&pers_lock);
   8377	return ret;
   8378}
   8379EXPORT_SYMBOL(register_md_cluster_operations);
   8380
   8381int unregister_md_cluster_operations(void)
   8382{
   8383	spin_lock(&pers_lock);
   8384	md_cluster_ops = NULL;
   8385	spin_unlock(&pers_lock);
   8386	return 0;
   8387}
   8388EXPORT_SYMBOL(unregister_md_cluster_operations);
   8389
   8390int md_setup_cluster(struct mddev *mddev, int nodes)
   8391{
   8392	int ret;
   8393	if (!md_cluster_ops)
   8394		request_module("md-cluster");
   8395	spin_lock(&pers_lock);
   8396	/* ensure module won't be unloaded */
   8397	if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
   8398		pr_warn("can't find md-cluster module or get its reference.\n");
   8399		spin_unlock(&pers_lock);
   8400		return -ENOENT;
   8401	}
   8402	spin_unlock(&pers_lock);
   8403
   8404	ret = md_cluster_ops->join(mddev, nodes);
   8405	if (!ret)
   8406		mddev->safemode_delay = 0;
   8407	return ret;
   8408}
   8409
   8410void md_cluster_stop(struct mddev *mddev)
   8411{
   8412	if (!md_cluster_ops)
   8413		return;
   8414	md_cluster_ops->leave(mddev);
   8415	module_put(md_cluster_mod);
   8416}
   8417
   8418static int is_mddev_idle(struct mddev *mddev, int init)
   8419{
   8420	struct md_rdev *rdev;
   8421	int idle;
   8422	int curr_events;
   8423
   8424	idle = 1;
   8425	rcu_read_lock();
   8426	rdev_for_each_rcu(rdev, mddev) {
   8427		struct gendisk *disk = rdev->bdev->bd_disk;
   8428		curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
   8429			      atomic_read(&disk->sync_io);
   8430		/* sync IO will cause sync_io to increase before the disk_stats
   8431		 * as sync_io is counted when a request starts, and
   8432		 * disk_stats is counted when it completes.
   8433		 * So resync activity will cause curr_events to be smaller than
   8434		 * when there was no such activity.
   8435		 * non-sync IO will cause disk_stat to increase without
   8436		 * increasing sync_io so curr_events will (eventually)
   8437		 * be larger than it was before.  Once it becomes
   8438		 * substantially larger, the test below will cause
   8439		 * the array to appear non-idle, and resync will slow
   8440		 * down.
   8441		 * If there is a lot of outstanding resync activity when
   8442		 * we set last_event to curr_events, then all that activity
   8443		 * completing might cause the array to appear non-idle
   8444		 * and resync will be slowed down even though there might
   8445		 * not have been non-resync activity.  This will only
   8446		 * happen once though.  'last_events' will soon reflect
   8447		 * the state where there is little or no outstanding
   8448		 * resync requests, and further resync activity will
   8449		 * always make curr_events less than last_events.
   8450		 *
   8451		 */
   8452		if (init || curr_events - rdev->last_events > 64) {
   8453			rdev->last_events = curr_events;
   8454			idle = 0;
   8455		}
   8456	}
   8457	rcu_read_unlock();
   8458	return idle;
   8459}
   8460
   8461void md_done_sync(struct mddev *mddev, int blocks, int ok)
   8462{
   8463	/* another "blocks" (512byte) blocks have been synced */
   8464	atomic_sub(blocks, &mddev->recovery_active);
   8465	wake_up(&mddev->recovery_wait);
   8466	if (!ok) {
   8467		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
   8468		set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
   8469		md_wakeup_thread(mddev->thread);
   8470		// stop recovery, signal do_sync ....
   8471	}
   8472}
   8473EXPORT_SYMBOL(md_done_sync);
   8474
   8475/* md_write_start(mddev, bi)
   8476 * If we need to update some array metadata (e.g. 'active' flag
   8477 * in superblock) before writing, schedule a superblock update
   8478 * and wait for it to complete.
   8479 * A return value of 'false' means that the write wasn't recorded
   8480 * and cannot proceed as the array is being suspend.
   8481 */
   8482bool md_write_start(struct mddev *mddev, struct bio *bi)
   8483{
   8484	int did_change = 0;
   8485
   8486	if (bio_data_dir(bi) != WRITE)
   8487		return true;
   8488
   8489	BUG_ON(mddev->ro == 1);
   8490	if (mddev->ro == 2) {
   8491		/* need to switch to read/write */
   8492		mddev->ro = 0;
   8493		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   8494		md_wakeup_thread(mddev->thread);
   8495		md_wakeup_thread(mddev->sync_thread);
   8496		did_change = 1;
   8497	}
   8498	rcu_read_lock();
   8499	percpu_ref_get(&mddev->writes_pending);
   8500	smp_mb(); /* Match smp_mb in set_in_sync() */
   8501	if (mddev->safemode == 1)
   8502		mddev->safemode = 0;
   8503	/* sync_checkers is always 0 when writes_pending is in per-cpu mode */
   8504	if (mddev->in_sync || mddev->sync_checkers) {
   8505		spin_lock(&mddev->lock);
   8506		if (mddev->in_sync) {
   8507			mddev->in_sync = 0;
   8508			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
   8509			set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
   8510			md_wakeup_thread(mddev->thread);
   8511			did_change = 1;
   8512		}
   8513		spin_unlock(&mddev->lock);
   8514	}
   8515	rcu_read_unlock();
   8516	if (did_change)
   8517		sysfs_notify_dirent_safe(mddev->sysfs_state);
   8518	if (!mddev->has_superblocks)
   8519		return true;
   8520	wait_event(mddev->sb_wait,
   8521		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
   8522		   mddev->suspended);
   8523	if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
   8524		percpu_ref_put(&mddev->writes_pending);
   8525		return false;
   8526	}
   8527	return true;
   8528}
   8529EXPORT_SYMBOL(md_write_start);
   8530
   8531/* md_write_inc can only be called when md_write_start() has
   8532 * already been called at least once of the current request.
   8533 * It increments the counter and is useful when a single request
   8534 * is split into several parts.  Each part causes an increment and
   8535 * so needs a matching md_write_end().
   8536 * Unlike md_write_start(), it is safe to call md_write_inc() inside
   8537 * a spinlocked region.
   8538 */
   8539void md_write_inc(struct mddev *mddev, struct bio *bi)
   8540{
   8541	if (bio_data_dir(bi) != WRITE)
   8542		return;
   8543	WARN_ON_ONCE(mddev->in_sync || mddev->ro);
   8544	percpu_ref_get(&mddev->writes_pending);
   8545}
   8546EXPORT_SYMBOL(md_write_inc);
   8547
   8548void md_write_end(struct mddev *mddev)
   8549{
   8550	percpu_ref_put(&mddev->writes_pending);
   8551
   8552	if (mddev->safemode == 2)
   8553		md_wakeup_thread(mddev->thread);
   8554	else if (mddev->safemode_delay)
   8555		/* The roundup() ensures this only performs locking once
   8556		 * every ->safemode_delay jiffies
   8557		 */
   8558		mod_timer(&mddev->safemode_timer,
   8559			  roundup(jiffies, mddev->safemode_delay) +
   8560			  mddev->safemode_delay);
   8561}
   8562
   8563EXPORT_SYMBOL(md_write_end);
   8564
   8565/* This is used by raid0 and raid10 */
   8566void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
   8567			struct bio *bio, sector_t start, sector_t size)
   8568{
   8569	struct bio *discard_bio = NULL;
   8570
   8571	if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO,
   8572			&discard_bio) || !discard_bio)
   8573		return;
   8574
   8575	bio_chain(discard_bio, bio);
   8576	bio_clone_blkg_association(discard_bio, bio);
   8577	if (mddev->gendisk)
   8578		trace_block_bio_remap(discard_bio,
   8579				disk_devt(mddev->gendisk),
   8580				bio->bi_iter.bi_sector);
   8581	submit_bio_noacct(discard_bio);
   8582}
   8583EXPORT_SYMBOL_GPL(md_submit_discard_bio);
   8584
   8585int acct_bioset_init(struct mddev *mddev)
   8586{
   8587	int err = 0;
   8588
   8589	if (!bioset_initialized(&mddev->io_acct_set))
   8590		err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
   8591			offsetof(struct md_io_acct, bio_clone), 0);
   8592	return err;
   8593}
   8594EXPORT_SYMBOL_GPL(acct_bioset_init);
   8595
   8596void acct_bioset_exit(struct mddev *mddev)
   8597{
   8598	bioset_exit(&mddev->io_acct_set);
   8599}
   8600EXPORT_SYMBOL_GPL(acct_bioset_exit);
   8601
   8602static void md_end_io_acct(struct bio *bio)
   8603{
   8604	struct md_io_acct *md_io_acct = bio->bi_private;
   8605	struct bio *orig_bio = md_io_acct->orig_bio;
   8606
   8607	orig_bio->bi_status = bio->bi_status;
   8608
   8609	bio_end_io_acct(orig_bio, md_io_acct->start_time);
   8610	bio_put(bio);
   8611	bio_endio(orig_bio);
   8612}
   8613
   8614/*
   8615 * Used by personalities that don't already clone the bio and thus can't
   8616 * easily add the timestamp to their extended bio structure.
   8617 */
   8618void md_account_bio(struct mddev *mddev, struct bio **bio)
   8619{
   8620	struct block_device *bdev = (*bio)->bi_bdev;
   8621	struct md_io_acct *md_io_acct;
   8622	struct bio *clone;
   8623
   8624	if (!blk_queue_io_stat(bdev->bd_disk->queue))
   8625		return;
   8626
   8627	clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set);
   8628	md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
   8629	md_io_acct->orig_bio = *bio;
   8630	md_io_acct->start_time = bio_start_io_acct(*bio);
   8631
   8632	clone->bi_end_io = md_end_io_acct;
   8633	clone->bi_private = md_io_acct;
   8634	*bio = clone;
   8635}
   8636EXPORT_SYMBOL_GPL(md_account_bio);
   8637
   8638/* md_allow_write(mddev)
   8639 * Calling this ensures that the array is marked 'active' so that writes
   8640 * may proceed without blocking.  It is important to call this before
   8641 * attempting a GFP_KERNEL allocation while holding the mddev lock.
   8642 * Must be called with mddev_lock held.
   8643 */
   8644void md_allow_write(struct mddev *mddev)
   8645{
   8646	if (!mddev->pers)
   8647		return;
   8648	if (mddev->ro)
   8649		return;
   8650	if (!mddev->pers->sync_request)
   8651		return;
   8652
   8653	spin_lock(&mddev->lock);
   8654	if (mddev->in_sync) {
   8655		mddev->in_sync = 0;
   8656		set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
   8657		set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
   8658		if (mddev->safemode_delay &&
   8659		    mddev->safemode == 0)
   8660			mddev->safemode = 1;
   8661		spin_unlock(&mddev->lock);
   8662		md_update_sb(mddev, 0);
   8663		sysfs_notify_dirent_safe(mddev->sysfs_state);
   8664		/* wait for the dirty state to be recorded in the metadata */
   8665		wait_event(mddev->sb_wait,
   8666			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
   8667	} else
   8668		spin_unlock(&mddev->lock);
   8669}
   8670EXPORT_SYMBOL_GPL(md_allow_write);
   8671
   8672#define SYNC_MARKS	10
   8673#define	SYNC_MARK_STEP	(3*HZ)
   8674#define UPDATE_FREQUENCY (5*60*HZ)
   8675void md_do_sync(struct md_thread *thread)
   8676{
   8677	struct mddev *mddev = thread->mddev;
   8678	struct mddev *mddev2;
   8679	unsigned int currspeed = 0, window;
   8680	sector_t max_sectors,j, io_sectors, recovery_done;
   8681	unsigned long mark[SYNC_MARKS];
   8682	unsigned long update_time;
   8683	sector_t mark_cnt[SYNC_MARKS];
   8684	int last_mark,m;
   8685	struct list_head *tmp;
   8686	sector_t last_check;
   8687	int skipped = 0;
   8688	struct md_rdev *rdev;
   8689	char *desc, *action = NULL;
   8690	struct blk_plug plug;
   8691	int ret;
   8692
   8693	/* just incase thread restarts... */
   8694	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
   8695	    test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
   8696		return;
   8697	if (mddev->ro) {/* never try to sync a read-only array */
   8698		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
   8699		return;
   8700	}
   8701
   8702	if (mddev_is_clustered(mddev)) {
   8703		ret = md_cluster_ops->resync_start(mddev);
   8704		if (ret)
   8705			goto skip;
   8706
   8707		set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
   8708		if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
   8709			test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
   8710			test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
   8711		     && ((unsigned long long)mddev->curr_resync_completed
   8712			 < (unsigned long long)mddev->resync_max_sectors))
   8713			goto skip;
   8714	}
   8715
   8716	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
   8717		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
   8718			desc = "data-check";
   8719			action = "check";
   8720		} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
   8721			desc = "requested-resync";
   8722			action = "repair";
   8723		} else
   8724			desc = "resync";
   8725	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
   8726		desc = "reshape";
   8727	else
   8728		desc = "recovery";
   8729
   8730	mddev->last_sync_action = action ?: desc;
   8731
   8732	/* we overload curr_resync somewhat here.
   8733	 * 0 == not engaged in resync at all
   8734	 * 2 == checking that there is no conflict with another sync
   8735	 * 1 == like 2, but have yielded to allow conflicting resync to
   8736	 *		commence
   8737	 * other == active in resync - this many blocks
   8738	 *
   8739	 * Before starting a resync we must have set curr_resync to
   8740	 * 2, and then checked that every "conflicting" array has curr_resync
   8741	 * less than ours.  When we find one that is the same or higher
   8742	 * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
   8743	 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
   8744	 * This will mean we have to start checking from the beginning again.
   8745	 *
   8746	 */
   8747
   8748	do {
   8749		int mddev2_minor = -1;
   8750		mddev->curr_resync = 2;
   8751
   8752	try_again:
   8753		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
   8754			goto skip;
   8755		for_each_mddev(mddev2, tmp) {
   8756			if (mddev2 == mddev)
   8757				continue;
   8758			if (!mddev->parallel_resync
   8759			&&  mddev2->curr_resync
   8760			&&  match_mddev_units(mddev, mddev2)) {
   8761				DEFINE_WAIT(wq);
   8762				if (mddev < mddev2 && mddev->curr_resync == 2) {
   8763					/* arbitrarily yield */
   8764					mddev->curr_resync = 1;
   8765					wake_up(&resync_wait);
   8766				}
   8767				if (mddev > mddev2 && mddev->curr_resync == 1)
   8768					/* no need to wait here, we can wait the next
   8769					 * time 'round when curr_resync == 2
   8770					 */
   8771					continue;
   8772				/* We need to wait 'interruptible' so as not to
   8773				 * contribute to the load average, and not to
   8774				 * be caught by 'softlockup'
   8775				 */
   8776				prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
   8777				if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
   8778				    mddev2->curr_resync >= mddev->curr_resync) {
   8779					if (mddev2_minor != mddev2->md_minor) {
   8780						mddev2_minor = mddev2->md_minor;
   8781						pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
   8782							desc, mdname(mddev),
   8783							mdname(mddev2));
   8784					}
   8785					mddev_put(mddev2);
   8786					if (signal_pending(current))
   8787						flush_signals(current);
   8788					schedule();
   8789					finish_wait(&resync_wait, &wq);
   8790					goto try_again;
   8791				}
   8792				finish_wait(&resync_wait, &wq);
   8793			}
   8794		}
   8795	} while (mddev->curr_resync < 2);
   8796
   8797	j = 0;
   8798	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
   8799		/* resync follows the size requested by the personality,
   8800		 * which defaults to physical size, but can be virtual size
   8801		 */
   8802		max_sectors = mddev->resync_max_sectors;
   8803		atomic64_set(&mddev->resync_mismatches, 0);
   8804		/* we don't use the checkpoint if there's a bitmap */
   8805		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
   8806			j = mddev->resync_min;
   8807		else if (!mddev->bitmap)
   8808			j = mddev->recovery_cp;
   8809
   8810	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
   8811		max_sectors = mddev->resync_max_sectors;
   8812		/*
   8813		 * If the original node aborts reshaping then we continue the
   8814		 * reshaping, so set j again to avoid restart reshape from the
   8815		 * first beginning
   8816		 */
   8817		if (mddev_is_clustered(mddev) &&
   8818		    mddev->reshape_position != MaxSector)
   8819			j = mddev->reshape_position;
   8820	} else {
   8821		/* recovery follows the physical size of devices */
   8822		max_sectors = mddev->dev_sectors;
   8823		j = MaxSector;
   8824		rcu_read_lock();
   8825		rdev_for_each_rcu(rdev, mddev)
   8826			if (rdev->raid_disk >= 0 &&
   8827			    !test_bit(Journal, &rdev->flags) &&
   8828			    !test_bit(Faulty, &rdev->flags) &&
   8829			    !test_bit(In_sync, &rdev->flags) &&
   8830			    rdev->recovery_offset < j)
   8831				j = rdev->recovery_offset;
   8832		rcu_read_unlock();
   8833
   8834		/* If there is a bitmap, we need to make sure all
   8835		 * writes that started before we added a spare
   8836		 * complete before we start doing a recovery.
   8837		 * Otherwise the write might complete and (via
   8838		 * bitmap_endwrite) set a bit in the bitmap after the
   8839		 * recovery has checked that bit and skipped that
   8840		 * region.
   8841		 */
   8842		if (mddev->bitmap) {
   8843			mddev->pers->quiesce(mddev, 1);
   8844			mddev->pers->quiesce(mddev, 0);
   8845		}
   8846	}
   8847
   8848	pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
   8849	pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
   8850	pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
   8851		 speed_max(mddev), desc);
   8852
   8853	is_mddev_idle(mddev, 1); /* this initializes IO event counters */
   8854
   8855	io_sectors = 0;
   8856	for (m = 0; m < SYNC_MARKS; m++) {
   8857		mark[m] = jiffies;
   8858		mark_cnt[m] = io_sectors;
   8859	}
   8860	last_mark = 0;
   8861	mddev->resync_mark = mark[last_mark];
   8862	mddev->resync_mark_cnt = mark_cnt[last_mark];
   8863
   8864	/*
   8865	 * Tune reconstruction:
   8866	 */
   8867	window = 32 * (PAGE_SIZE / 512);
   8868	pr_debug("md: using %dk window, over a total of %lluk.\n",
   8869		 window/2, (unsigned long long)max_sectors/2);
   8870
   8871	atomic_set(&mddev->recovery_active, 0);
   8872	last_check = 0;
   8873
   8874	if (j>2) {
   8875		pr_debug("md: resuming %s of %s from checkpoint.\n",
   8876			 desc, mdname(mddev));
   8877		mddev->curr_resync = j;
   8878	} else
   8879		mddev->curr_resync = 3; /* no longer delayed */
   8880	mddev->curr_resync_completed = j;
   8881	sysfs_notify_dirent_safe(mddev->sysfs_completed);
   8882	md_new_event();
   8883	update_time = jiffies;
   8884
   8885	blk_start_plug(&plug);
   8886	while (j < max_sectors) {
   8887		sector_t sectors;
   8888
   8889		skipped = 0;
   8890
   8891		if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
   8892		    ((mddev->curr_resync > mddev->curr_resync_completed &&
   8893		      (mddev->curr_resync - mddev->curr_resync_completed)
   8894		      > (max_sectors >> 4)) ||
   8895		     time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
   8896		     (j - mddev->curr_resync_completed)*2
   8897		     >= mddev->resync_max - mddev->curr_resync_completed ||
   8898		     mddev->curr_resync_completed > mddev->resync_max
   8899			    )) {
   8900			/* time to update curr_resync_completed */
   8901			wait_event(mddev->recovery_wait,
   8902				   atomic_read(&mddev->recovery_active) == 0);
   8903			mddev->curr_resync_completed = j;
   8904			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
   8905			    j > mddev->recovery_cp)
   8906				mddev->recovery_cp = j;
   8907			update_time = jiffies;
   8908			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
   8909			sysfs_notify_dirent_safe(mddev->sysfs_completed);
   8910		}
   8911
   8912		while (j >= mddev->resync_max &&
   8913		       !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
   8914			/* As this condition is controlled by user-space,
   8915			 * we can block indefinitely, so use '_interruptible'
   8916			 * to avoid triggering warnings.
   8917			 */
   8918			flush_signals(current); /* just in case */
   8919			wait_event_interruptible(mddev->recovery_wait,
   8920						 mddev->resync_max > j
   8921						 || test_bit(MD_RECOVERY_INTR,
   8922							     &mddev->recovery));
   8923		}
   8924
   8925		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
   8926			break;
   8927
   8928		sectors = mddev->pers->sync_request(mddev, j, &skipped);
   8929		if (sectors == 0) {
   8930			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
   8931			break;
   8932		}
   8933
   8934		if (!skipped) { /* actual IO requested */
   8935			io_sectors += sectors;
   8936			atomic_add(sectors, &mddev->recovery_active);
   8937		}
   8938
   8939		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
   8940			break;
   8941
   8942		j += sectors;
   8943		if (j > max_sectors)
   8944			/* when skipping, extra large numbers can be returned. */
   8945			j = max_sectors;
   8946		if (j > 2)
   8947			mddev->curr_resync = j;
   8948		mddev->curr_mark_cnt = io_sectors;
   8949		if (last_check == 0)
   8950			/* this is the earliest that rebuild will be
   8951			 * visible in /proc/mdstat
   8952			 */
   8953			md_new_event();
   8954
   8955		if (last_check + window > io_sectors || j == max_sectors)
   8956			continue;
   8957
   8958		last_check = io_sectors;
   8959	repeat:
   8960		if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
   8961			/* step marks */
   8962			int next = (last_mark+1) % SYNC_MARKS;
   8963
   8964			mddev->resync_mark = mark[next];
   8965			mddev->resync_mark_cnt = mark_cnt[next];
   8966			mark[next] = jiffies;
   8967			mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
   8968			last_mark = next;
   8969		}
   8970
   8971		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
   8972			break;
   8973
   8974		/*
   8975		 * this loop exits only if either when we are slower than
   8976		 * the 'hard' speed limit, or the system was IO-idle for
   8977		 * a jiffy.
   8978		 * the system might be non-idle CPU-wise, but we only care
   8979		 * about not overloading the IO subsystem. (things like an
   8980		 * e2fsck being done on the RAID array should execute fast)
   8981		 */
   8982		cond_resched();
   8983
   8984		recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
   8985		currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
   8986			/((jiffies-mddev->resync_mark)/HZ +1) +1;
   8987
   8988		if (currspeed > speed_min(mddev)) {
   8989			if (currspeed > speed_max(mddev)) {
   8990				msleep(500);
   8991				goto repeat;
   8992			}
   8993			if (!is_mddev_idle(mddev, 0)) {
   8994				/*
   8995				 * Give other IO more of a chance.
   8996				 * The faster the devices, the less we wait.
   8997				 */
   8998				wait_event(mddev->recovery_wait,
   8999					   !atomic_read(&mddev->recovery_active));
   9000			}
   9001		}
   9002	}
   9003	pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
   9004		test_bit(MD_RECOVERY_INTR, &mddev->recovery)
   9005		? "interrupted" : "done");
   9006	/*
   9007	 * this also signals 'finished resyncing' to md_stop
   9008	 */
   9009	blk_finish_plug(&plug);
   9010	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
   9011
   9012	if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
   9013	    !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
   9014	    mddev->curr_resync > 3) {
   9015		mddev->curr_resync_completed = mddev->curr_resync;
   9016		sysfs_notify_dirent_safe(mddev->sysfs_completed);
   9017	}
   9018	mddev->pers->sync_request(mddev, max_sectors, &skipped);
   9019
   9020	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
   9021	    mddev->curr_resync > 3) {
   9022		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
   9023			if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
   9024				if (mddev->curr_resync >= mddev->recovery_cp) {
   9025					pr_debug("md: checkpointing %s of %s.\n",
   9026						 desc, mdname(mddev));
   9027					if (test_bit(MD_RECOVERY_ERROR,
   9028						&mddev->recovery))
   9029						mddev->recovery_cp =
   9030							mddev->curr_resync_completed;
   9031					else
   9032						mddev->recovery_cp =
   9033							mddev->curr_resync;
   9034				}
   9035			} else
   9036				mddev->recovery_cp = MaxSector;
   9037		} else {
   9038			if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
   9039				mddev->curr_resync = MaxSector;
   9040			if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
   9041			    test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
   9042				rcu_read_lock();
   9043				rdev_for_each_rcu(rdev, mddev)
   9044					if (rdev->raid_disk >= 0 &&
   9045					    mddev->delta_disks >= 0 &&
   9046					    !test_bit(Journal, &rdev->flags) &&
   9047					    !test_bit(Faulty, &rdev->flags) &&
   9048					    !test_bit(In_sync, &rdev->flags) &&
   9049					    rdev->recovery_offset < mddev->curr_resync)
   9050						rdev->recovery_offset = mddev->curr_resync;
   9051				rcu_read_unlock();
   9052			}
   9053		}
   9054	}
   9055 skip:
   9056	/* set CHANGE_PENDING here since maybe another update is needed,
   9057	 * so other nodes are informed. It should be harmless for normal
   9058	 * raid */
   9059	set_mask_bits(&mddev->sb_flags, 0,
   9060		      BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
   9061
   9062	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
   9063			!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
   9064			mddev->delta_disks > 0 &&
   9065			mddev->pers->finish_reshape &&
   9066			mddev->pers->size &&
   9067			mddev->queue) {
   9068		mddev_lock_nointr(mddev);
   9069		md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
   9070		mddev_unlock(mddev);
   9071		if (!mddev_is_clustered(mddev))
   9072			set_capacity_and_notify(mddev->gendisk,
   9073						mddev->array_sectors);
   9074	}
   9075
   9076	spin_lock(&mddev->lock);
   9077	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
   9078		/* We completed so min/max setting can be forgotten if used. */
   9079		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
   9080			mddev->resync_min = 0;
   9081		mddev->resync_max = MaxSector;
   9082	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
   9083		mddev->resync_min = mddev->curr_resync_completed;
   9084	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
   9085	mddev->curr_resync = 0;
   9086	spin_unlock(&mddev->lock);
   9087
   9088	wake_up(&resync_wait);
   9089	md_wakeup_thread(mddev->thread);
   9090	return;
   9091}
   9092EXPORT_SYMBOL_GPL(md_do_sync);
   9093
   9094static int remove_and_add_spares(struct mddev *mddev,
   9095				 struct md_rdev *this)
   9096{
   9097	struct md_rdev *rdev;
   9098	int spares = 0;
   9099	int removed = 0;
   9100	bool remove_some = false;
   9101
   9102	if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
   9103		/* Mustn't remove devices when resync thread is running */
   9104		return 0;
   9105
   9106	rdev_for_each(rdev, mddev) {
   9107		if ((this == NULL || rdev == this) &&
   9108		    rdev->raid_disk >= 0 &&
   9109		    !test_bit(Blocked, &rdev->flags) &&
   9110		    test_bit(Faulty, &rdev->flags) &&
   9111		    atomic_read(&rdev->nr_pending)==0) {
   9112			/* Faulty non-Blocked devices with nr_pending == 0
   9113			 * never get nr_pending incremented,
   9114			 * never get Faulty cleared, and never get Blocked set.
   9115			 * So we can synchronize_rcu now rather than once per device
   9116			 */
   9117			remove_some = true;
   9118			set_bit(RemoveSynchronized, &rdev->flags);
   9119		}
   9120	}
   9121
   9122	if (remove_some)
   9123		synchronize_rcu();
   9124	rdev_for_each(rdev, mddev) {
   9125		if ((this == NULL || rdev == this) &&
   9126		    rdev->raid_disk >= 0 &&
   9127		    !test_bit(Blocked, &rdev->flags) &&
   9128		    ((test_bit(RemoveSynchronized, &rdev->flags) ||
   9129		     (!test_bit(In_sync, &rdev->flags) &&
   9130		      !test_bit(Journal, &rdev->flags))) &&
   9131		    atomic_read(&rdev->nr_pending)==0)) {
   9132			if (mddev->pers->hot_remove_disk(
   9133				    mddev, rdev) == 0) {
   9134				sysfs_unlink_rdev(mddev, rdev);
   9135				rdev->saved_raid_disk = rdev->raid_disk;
   9136				rdev->raid_disk = -1;
   9137				removed++;
   9138			}
   9139		}
   9140		if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
   9141			clear_bit(RemoveSynchronized, &rdev->flags);
   9142	}
   9143
   9144	if (removed && mddev->kobj.sd)
   9145		sysfs_notify_dirent_safe(mddev->sysfs_degraded);
   9146
   9147	if (this && removed)
   9148		goto no_add;
   9149
   9150	rdev_for_each(rdev, mddev) {
   9151		if (this && this != rdev)
   9152			continue;
   9153		if (test_bit(Candidate, &rdev->flags))
   9154			continue;
   9155		if (rdev->raid_disk >= 0 &&
   9156		    !test_bit(In_sync, &rdev->flags) &&
   9157		    !test_bit(Journal, &rdev->flags) &&
   9158		    !test_bit(Faulty, &rdev->flags))
   9159			spares++;
   9160		if (rdev->raid_disk >= 0)
   9161			continue;
   9162		if (test_bit(Faulty, &rdev->flags))
   9163			continue;
   9164		if (!test_bit(Journal, &rdev->flags)) {
   9165			if (mddev->ro &&
   9166			    ! (rdev->saved_raid_disk >= 0 &&
   9167			       !test_bit(Bitmap_sync, &rdev->flags)))
   9168				continue;
   9169
   9170			rdev->recovery_offset = 0;
   9171		}
   9172		if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
   9173			/* failure here is OK */
   9174			sysfs_link_rdev(mddev, rdev);
   9175			if (!test_bit(Journal, &rdev->flags))
   9176				spares++;
   9177			md_new_event();
   9178			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   9179		}
   9180	}
   9181no_add:
   9182	if (removed)
   9183		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   9184	return spares;
   9185}
   9186
   9187static void md_start_sync(struct work_struct *ws)
   9188{
   9189	struct mddev *mddev = container_of(ws, struct mddev, del_work);
   9190
   9191	mddev->sync_thread = md_register_thread(md_do_sync,
   9192						mddev,
   9193						"resync");
   9194	if (!mddev->sync_thread) {
   9195		pr_warn("%s: could not start resync thread...\n",
   9196			mdname(mddev));
   9197		/* leave the spares where they are, it shouldn't hurt */
   9198		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
   9199		clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
   9200		clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
   9201		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
   9202		clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
   9203		wake_up(&resync_wait);
   9204		if (test_and_clear_bit(MD_RECOVERY_RECOVER,
   9205				       &mddev->recovery))
   9206			if (mddev->sysfs_action)
   9207				sysfs_notify_dirent_safe(mddev->sysfs_action);
   9208	} else
   9209		md_wakeup_thread(mddev->sync_thread);
   9210	sysfs_notify_dirent_safe(mddev->sysfs_action);
   9211	md_new_event();
   9212}
   9213
   9214/*
   9215 * This routine is regularly called by all per-raid-array threads to
   9216 * deal with generic issues like resync and super-block update.
   9217 * Raid personalities that don't have a thread (linear/raid0) do not
   9218 * need this as they never do any recovery or update the superblock.
   9219 *
   9220 * It does not do any resync itself, but rather "forks" off other threads
   9221 * to do that as needed.
   9222 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
   9223 * "->recovery" and create a thread at ->sync_thread.
   9224 * When the thread finishes it sets MD_RECOVERY_DONE
   9225 * and wakeups up this thread which will reap the thread and finish up.
   9226 * This thread also removes any faulty devices (with nr_pending == 0).
   9227 *
   9228 * The overall approach is:
   9229 *  1/ if the superblock needs updating, update it.
   9230 *  2/ If a recovery thread is running, don't do anything else.
   9231 *  3/ If recovery has finished, clean up, possibly marking spares active.
   9232 *  4/ If there are any faulty devices, remove them.
   9233 *  5/ If array is degraded, try to add spares devices
   9234 *  6/ If array has spares or is not in-sync, start a resync thread.
   9235 */
   9236void md_check_recovery(struct mddev *mddev)
   9237{
   9238	if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
   9239		/* Write superblock - thread that called mddev_suspend()
   9240		 * holds reconfig_mutex for us.
   9241		 */
   9242		set_bit(MD_UPDATING_SB, &mddev->flags);
   9243		smp_mb__after_atomic();
   9244		if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
   9245			md_update_sb(mddev, 0);
   9246		clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
   9247		wake_up(&mddev->sb_wait);
   9248	}
   9249
   9250	if (mddev->suspended)
   9251		return;
   9252
   9253	if (mddev->bitmap)
   9254		md_bitmap_daemon_work(mddev);
   9255
   9256	if (signal_pending(current)) {
   9257		if (mddev->pers->sync_request && !mddev->external) {
   9258			pr_debug("md: %s in immediate safe mode\n",
   9259				 mdname(mddev));
   9260			mddev->safemode = 2;
   9261		}
   9262		flush_signals(current);
   9263	}
   9264
   9265	if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
   9266		return;
   9267	if ( ! (
   9268		(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
   9269		test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
   9270		test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
   9271		(mddev->external == 0 && mddev->safemode == 1) ||
   9272		(mddev->safemode == 2
   9273		 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
   9274		))
   9275		return;
   9276
   9277	if (mddev_trylock(mddev)) {
   9278		int spares = 0;
   9279		bool try_set_sync = mddev->safemode != 0;
   9280
   9281		if (!mddev->external && mddev->safemode == 1)
   9282			mddev->safemode = 0;
   9283
   9284		if (mddev->ro) {
   9285			struct md_rdev *rdev;
   9286			if (!mddev->external && mddev->in_sync)
   9287				/* 'Blocked' flag not needed as failed devices
   9288				 * will be recorded if array switched to read/write.
   9289				 * Leaving it set will prevent the device
   9290				 * from being removed.
   9291				 */
   9292				rdev_for_each(rdev, mddev)
   9293					clear_bit(Blocked, &rdev->flags);
   9294			/* On a read-only array we can:
   9295			 * - remove failed devices
   9296			 * - add already-in_sync devices if the array itself
   9297			 *   is in-sync.
   9298			 * As we only add devices that are already in-sync,
   9299			 * we can activate the spares immediately.
   9300			 */
   9301			remove_and_add_spares(mddev, NULL);
   9302			/* There is no thread, but we need to call
   9303			 * ->spare_active and clear saved_raid_disk
   9304			 */
   9305			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
   9306			md_reap_sync_thread(mddev);
   9307			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
   9308			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   9309			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
   9310			goto unlock;
   9311		}
   9312
   9313		if (mddev_is_clustered(mddev)) {
   9314			struct md_rdev *rdev, *tmp;
   9315			/* kick the device if another node issued a
   9316			 * remove disk.
   9317			 */
   9318			rdev_for_each_safe(rdev, tmp, mddev) {
   9319				if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
   9320						rdev->raid_disk < 0)
   9321					md_kick_rdev_from_array(rdev);
   9322			}
   9323		}
   9324
   9325		if (try_set_sync && !mddev->external && !mddev->in_sync) {
   9326			spin_lock(&mddev->lock);
   9327			set_in_sync(mddev);
   9328			spin_unlock(&mddev->lock);
   9329		}
   9330
   9331		if (mddev->sb_flags)
   9332			md_update_sb(mddev, 0);
   9333
   9334		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
   9335		    !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
   9336			/* resync/recovery still happening */
   9337			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   9338			goto unlock;
   9339		}
   9340		if (mddev->sync_thread) {
   9341			md_reap_sync_thread(mddev);
   9342			goto unlock;
   9343		}
   9344		/* Set RUNNING before clearing NEEDED to avoid
   9345		 * any transients in the value of "sync_action".
   9346		 */
   9347		mddev->curr_resync_completed = 0;
   9348		spin_lock(&mddev->lock);
   9349		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
   9350		spin_unlock(&mddev->lock);
   9351		/* Clear some bits that don't mean anything, but
   9352		 * might be left set
   9353		 */
   9354		clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
   9355		clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
   9356
   9357		if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
   9358		    test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
   9359			goto not_running;
   9360		/* no recovery is running.
   9361		 * remove any failed drives, then
   9362		 * add spares if possible.
   9363		 * Spares are also removed and re-added, to allow
   9364		 * the personality to fail the re-add.
   9365		 */
   9366
   9367		if (mddev->reshape_position != MaxSector) {
   9368			if (mddev->pers->check_reshape == NULL ||
   9369			    mddev->pers->check_reshape(mddev) != 0)
   9370				/* Cannot proceed */
   9371				goto not_running;
   9372			set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
   9373			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
   9374		} else if ((spares = remove_and_add_spares(mddev, NULL))) {
   9375			clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
   9376			clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
   9377			clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
   9378			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
   9379		} else if (mddev->recovery_cp < MaxSector) {
   9380			set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
   9381			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
   9382		} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
   9383			/* nothing to be done ... */
   9384			goto not_running;
   9385
   9386		if (mddev->pers->sync_request) {
   9387			if (spares) {
   9388				/* We are adding a device or devices to an array
   9389				 * which has the bitmap stored on all devices.
   9390				 * So make sure all bitmap pages get written
   9391				 */
   9392				md_bitmap_write_all(mddev->bitmap);
   9393			}
   9394			INIT_WORK(&mddev->del_work, md_start_sync);
   9395			queue_work(md_misc_wq, &mddev->del_work);
   9396			goto unlock;
   9397		}
   9398	not_running:
   9399		if (!mddev->sync_thread) {
   9400			clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
   9401			wake_up(&resync_wait);
   9402			if (test_and_clear_bit(MD_RECOVERY_RECOVER,
   9403					       &mddev->recovery))
   9404				if (mddev->sysfs_action)
   9405					sysfs_notify_dirent_safe(mddev->sysfs_action);
   9406		}
   9407	unlock:
   9408		wake_up(&mddev->sb_wait);
   9409		mddev_unlock(mddev);
   9410	}
   9411}
   9412EXPORT_SYMBOL(md_check_recovery);
   9413
   9414void md_reap_sync_thread(struct mddev *mddev)
   9415{
   9416	struct md_rdev *rdev;
   9417	sector_t old_dev_sectors = mddev->dev_sectors;
   9418	bool is_reshaped = false;
   9419
   9420	/* resync has finished, collect result */
   9421	md_unregister_thread(&mddev->sync_thread);
   9422	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
   9423	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
   9424	    mddev->degraded != mddev->raid_disks) {
   9425		/* success...*/
   9426		/* activate any spares */
   9427		if (mddev->pers->spare_active(mddev)) {
   9428			sysfs_notify_dirent_safe(mddev->sysfs_degraded);
   9429			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
   9430		}
   9431	}
   9432	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
   9433	    mddev->pers->finish_reshape) {
   9434		mddev->pers->finish_reshape(mddev);
   9435		if (mddev_is_clustered(mddev))
   9436			is_reshaped = true;
   9437	}
   9438
   9439	/* If array is no-longer degraded, then any saved_raid_disk
   9440	 * information must be scrapped.
   9441	 */
   9442	if (!mddev->degraded)
   9443		rdev_for_each(rdev, mddev)
   9444			rdev->saved_raid_disk = -1;
   9445
   9446	md_update_sb(mddev, 1);
   9447	/* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
   9448	 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
   9449	 * clustered raid */
   9450	if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
   9451		md_cluster_ops->resync_finish(mddev);
   9452	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
   9453	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
   9454	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
   9455	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
   9456	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
   9457	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
   9458	/*
   9459	 * We call md_cluster_ops->update_size here because sync_size could
   9460	 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
   9461	 * so it is time to update size across cluster.
   9462	 */
   9463	if (mddev_is_clustered(mddev) && is_reshaped
   9464				      && !test_bit(MD_CLOSING, &mddev->flags))
   9465		md_cluster_ops->update_size(mddev, old_dev_sectors);
   9466	wake_up(&resync_wait);
   9467	/* flag recovery needed just to double check */
   9468	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   9469	sysfs_notify_dirent_safe(mddev->sysfs_action);
   9470	md_new_event();
   9471	if (mddev->event_work.func)
   9472		queue_work(md_misc_wq, &mddev->event_work);
   9473}
   9474EXPORT_SYMBOL(md_reap_sync_thread);
   9475
   9476void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
   9477{
   9478	sysfs_notify_dirent_safe(rdev->sysfs_state);
   9479	wait_event_timeout(rdev->blocked_wait,
   9480			   !test_bit(Blocked, &rdev->flags) &&
   9481			   !test_bit(BlockedBadBlocks, &rdev->flags),
   9482			   msecs_to_jiffies(5000));
   9483	rdev_dec_pending(rdev, mddev);
   9484}
   9485EXPORT_SYMBOL(md_wait_for_blocked_rdev);
   9486
   9487void md_finish_reshape(struct mddev *mddev)
   9488{
   9489	/* called be personality module when reshape completes. */
   9490	struct md_rdev *rdev;
   9491
   9492	rdev_for_each(rdev, mddev) {
   9493		if (rdev->data_offset > rdev->new_data_offset)
   9494			rdev->sectors += rdev->data_offset - rdev->new_data_offset;
   9495		else
   9496			rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
   9497		rdev->data_offset = rdev->new_data_offset;
   9498	}
   9499}
   9500EXPORT_SYMBOL(md_finish_reshape);
   9501
   9502/* Bad block management */
   9503
   9504/* Returns 1 on success, 0 on failure */
   9505int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
   9506		       int is_new)
   9507{
   9508	struct mddev *mddev = rdev->mddev;
   9509	int rv;
   9510	if (is_new)
   9511		s += rdev->new_data_offset;
   9512	else
   9513		s += rdev->data_offset;
   9514	rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
   9515	if (rv == 0) {
   9516		/* Make sure they get written out promptly */
   9517		if (test_bit(ExternalBbl, &rdev->flags))
   9518			sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
   9519		sysfs_notify_dirent_safe(rdev->sysfs_state);
   9520		set_mask_bits(&mddev->sb_flags, 0,
   9521			      BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
   9522		md_wakeup_thread(rdev->mddev->thread);
   9523		return 1;
   9524	} else
   9525		return 0;
   9526}
   9527EXPORT_SYMBOL_GPL(rdev_set_badblocks);
   9528
   9529int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
   9530			 int is_new)
   9531{
   9532	int rv;
   9533	if (is_new)
   9534		s += rdev->new_data_offset;
   9535	else
   9536		s += rdev->data_offset;
   9537	rv = badblocks_clear(&rdev->badblocks, s, sectors);
   9538	if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
   9539		sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
   9540	return rv;
   9541}
   9542EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
   9543
   9544static int md_notify_reboot(struct notifier_block *this,
   9545			    unsigned long code, void *x)
   9546{
   9547	struct list_head *tmp;
   9548	struct mddev *mddev;
   9549	int need_delay = 0;
   9550
   9551	for_each_mddev(mddev, tmp) {
   9552		if (mddev_trylock(mddev)) {
   9553			if (mddev->pers)
   9554				__md_stop_writes(mddev);
   9555			if (mddev->persistent)
   9556				mddev->safemode = 2;
   9557			mddev_unlock(mddev);
   9558		}
   9559		need_delay = 1;
   9560	}
   9561	/*
   9562	 * certain more exotic SCSI devices are known to be
   9563	 * volatile wrt too early system reboots. While the
   9564	 * right place to handle this issue is the given
   9565	 * driver, we do want to have a safe RAID driver ...
   9566	 */
   9567	if (need_delay)
   9568		msleep(1000);
   9569
   9570	return NOTIFY_DONE;
   9571}
   9572
   9573static struct notifier_block md_notifier = {
   9574	.notifier_call	= md_notify_reboot,
   9575	.next		= NULL,
   9576	.priority	= INT_MAX, /* before any real devices */
   9577};
   9578
   9579static void md_geninit(void)
   9580{
   9581	pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
   9582
   9583	proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
   9584}
   9585
   9586static int __init md_init(void)
   9587{
   9588	int ret = -ENOMEM;
   9589
   9590	md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
   9591	if (!md_wq)
   9592		goto err_wq;
   9593
   9594	md_misc_wq = alloc_workqueue("md_misc", 0, 0);
   9595	if (!md_misc_wq)
   9596		goto err_misc_wq;
   9597
   9598	md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
   9599	if (!md_rdev_misc_wq)
   9600		goto err_rdev_misc_wq;
   9601
   9602	ret = __register_blkdev(MD_MAJOR, "md", md_probe);
   9603	if (ret < 0)
   9604		goto err_md;
   9605
   9606	ret = __register_blkdev(0, "mdp", md_probe);
   9607	if (ret < 0)
   9608		goto err_mdp;
   9609	mdp_major = ret;
   9610
   9611	register_reboot_notifier(&md_notifier);
   9612	raid_table_header = register_sysctl_table(raid_root_table);
   9613
   9614	md_geninit();
   9615	return 0;
   9616
   9617err_mdp:
   9618	unregister_blkdev(MD_MAJOR, "md");
   9619err_md:
   9620	destroy_workqueue(md_rdev_misc_wq);
   9621err_rdev_misc_wq:
   9622	destroy_workqueue(md_misc_wq);
   9623err_misc_wq:
   9624	destroy_workqueue(md_wq);
   9625err_wq:
   9626	return ret;
   9627}
   9628
   9629static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
   9630{
   9631	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
   9632	struct md_rdev *rdev2, *tmp;
   9633	int role, ret;
   9634
   9635	/*
   9636	 * If size is changed in another node then we need to
   9637	 * do resize as well.
   9638	 */
   9639	if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
   9640		ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
   9641		if (ret)
   9642			pr_info("md-cluster: resize failed\n");
   9643		else
   9644			md_bitmap_update_sb(mddev->bitmap);
   9645	}
   9646
   9647	/* Check for change of roles in the active devices */
   9648	rdev_for_each_safe(rdev2, tmp, mddev) {
   9649		if (test_bit(Faulty, &rdev2->flags))
   9650			continue;
   9651
   9652		/* Check if the roles changed */
   9653		role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
   9654
   9655		if (test_bit(Candidate, &rdev2->flags)) {
   9656			if (role == MD_DISK_ROLE_FAULTY) {
   9657				pr_info("md: Removing Candidate device %pg because add failed\n",
   9658					rdev2->bdev);
   9659				md_kick_rdev_from_array(rdev2);
   9660				continue;
   9661			}
   9662			else
   9663				clear_bit(Candidate, &rdev2->flags);
   9664		}
   9665
   9666		if (role != rdev2->raid_disk) {
   9667			/*
   9668			 * got activated except reshape is happening.
   9669			 */
   9670			if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
   9671			    !(le32_to_cpu(sb->feature_map) &
   9672			      MD_FEATURE_RESHAPE_ACTIVE)) {
   9673				rdev2->saved_raid_disk = role;
   9674				ret = remove_and_add_spares(mddev, rdev2);
   9675				pr_info("Activated spare: %pg\n",
   9676					rdev2->bdev);
   9677				/* wakeup mddev->thread here, so array could
   9678				 * perform resync with the new activated disk */
   9679				set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
   9680				md_wakeup_thread(mddev->thread);
   9681			}
   9682			/* device faulty
   9683			 * We just want to do the minimum to mark the disk
   9684			 * as faulty. The recovery is performed by the
   9685			 * one who initiated the error.
   9686			 */
   9687			if (role == MD_DISK_ROLE_FAULTY ||
   9688			    role == MD_DISK_ROLE_JOURNAL) {
   9689				md_error(mddev, rdev2);
   9690				clear_bit(Blocked, &rdev2->flags);
   9691			}
   9692		}
   9693	}
   9694
   9695	if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
   9696		ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
   9697		if (ret)
   9698			pr_warn("md: updating array disks failed. %d\n", ret);
   9699	}
   9700
   9701	/*
   9702	 * Since mddev->delta_disks has already updated in update_raid_disks,
   9703	 * so it is time to check reshape.
   9704	 */
   9705	if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
   9706	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
   9707		/*
   9708		 * reshape is happening in the remote node, we need to
   9709		 * update reshape_position and call start_reshape.
   9710		 */
   9711		mddev->reshape_position = le64_to_cpu(sb->reshape_position);
   9712		if (mddev->pers->update_reshape_pos)
   9713			mddev->pers->update_reshape_pos(mddev);
   9714		if (mddev->pers->start_reshape)
   9715			mddev->pers->start_reshape(mddev);
   9716	} else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
   9717		   mddev->reshape_position != MaxSector &&
   9718		   !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
   9719		/* reshape is just done in another node. */
   9720		mddev->reshape_position = MaxSector;
   9721		if (mddev->pers->update_reshape_pos)
   9722			mddev->pers->update_reshape_pos(mddev);
   9723	}
   9724
   9725	/* Finally set the event to be up to date */
   9726	mddev->events = le64_to_cpu(sb->events);
   9727}
   9728
   9729static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
   9730{
   9731	int err;
   9732	struct page *swapout = rdev->sb_page;
   9733	struct mdp_superblock_1 *sb;
   9734
   9735	/* Store the sb page of the rdev in the swapout temporary
   9736	 * variable in case we err in the future
   9737	 */
   9738	rdev->sb_page = NULL;
   9739	err = alloc_disk_sb(rdev);
   9740	if (err == 0) {
   9741		ClearPageUptodate(rdev->sb_page);
   9742		rdev->sb_loaded = 0;
   9743		err = super_types[mddev->major_version].
   9744			load_super(rdev, NULL, mddev->minor_version);
   9745	}
   9746	if (err < 0) {
   9747		pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
   9748				__func__, __LINE__, rdev->desc_nr, err);
   9749		if (rdev->sb_page)
   9750			put_page(rdev->sb_page);
   9751		rdev->sb_page = swapout;
   9752		rdev->sb_loaded = 1;
   9753		return err;
   9754	}
   9755
   9756	sb = page_address(rdev->sb_page);
   9757	/* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
   9758	 * is not set
   9759	 */
   9760
   9761	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
   9762		rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
   9763
   9764	/* The other node finished recovery, call spare_active to set
   9765	 * device In_sync and mddev->degraded
   9766	 */
   9767	if (rdev->recovery_offset == MaxSector &&
   9768	    !test_bit(In_sync, &rdev->flags) &&
   9769	    mddev->pers->spare_active(mddev))
   9770		sysfs_notify_dirent_safe(mddev->sysfs_degraded);
   9771
   9772	put_page(swapout);
   9773	return 0;
   9774}
   9775
   9776void md_reload_sb(struct mddev *mddev, int nr)
   9777{
   9778	struct md_rdev *rdev = NULL, *iter;
   9779	int err;
   9780
   9781	/* Find the rdev */
   9782	rdev_for_each_rcu(iter, mddev) {
   9783		if (iter->desc_nr == nr) {
   9784			rdev = iter;
   9785			break;
   9786		}
   9787	}
   9788
   9789	if (!rdev) {
   9790		pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
   9791		return;
   9792	}
   9793
   9794	err = read_rdev(mddev, rdev);
   9795	if (err < 0)
   9796		return;
   9797
   9798	check_sb_changes(mddev, rdev);
   9799
   9800	/* Read all rdev's to update recovery_offset */
   9801	rdev_for_each_rcu(rdev, mddev) {
   9802		if (!test_bit(Faulty, &rdev->flags))
   9803			read_rdev(mddev, rdev);
   9804	}
   9805}
   9806EXPORT_SYMBOL(md_reload_sb);
   9807
   9808#ifndef MODULE
   9809
   9810/*
   9811 * Searches all registered partitions for autorun RAID arrays
   9812 * at boot time.
   9813 */
   9814
   9815static DEFINE_MUTEX(detected_devices_mutex);
   9816static LIST_HEAD(all_detected_devices);
   9817struct detected_devices_node {
   9818	struct list_head list;
   9819	dev_t dev;
   9820};
   9821
   9822void md_autodetect_dev(dev_t dev)
   9823{
   9824	struct detected_devices_node *node_detected_dev;
   9825
   9826	node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
   9827	if (node_detected_dev) {
   9828		node_detected_dev->dev = dev;
   9829		mutex_lock(&detected_devices_mutex);
   9830		list_add_tail(&node_detected_dev->list, &all_detected_devices);
   9831		mutex_unlock(&detected_devices_mutex);
   9832	}
   9833}
   9834
   9835void md_autostart_arrays(int part)
   9836{
   9837	struct md_rdev *rdev;
   9838	struct detected_devices_node *node_detected_dev;
   9839	dev_t dev;
   9840	int i_scanned, i_passed;
   9841
   9842	i_scanned = 0;
   9843	i_passed = 0;
   9844
   9845	pr_info("md: Autodetecting RAID arrays.\n");
   9846
   9847	mutex_lock(&detected_devices_mutex);
   9848	while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
   9849		i_scanned++;
   9850		node_detected_dev = list_entry(all_detected_devices.next,
   9851					struct detected_devices_node, list);
   9852		list_del(&node_detected_dev->list);
   9853		dev = node_detected_dev->dev;
   9854		kfree(node_detected_dev);
   9855		mutex_unlock(&detected_devices_mutex);
   9856		rdev = md_import_device(dev,0, 90);
   9857		mutex_lock(&detected_devices_mutex);
   9858		if (IS_ERR(rdev))
   9859			continue;
   9860
   9861		if (test_bit(Faulty, &rdev->flags))
   9862			continue;
   9863
   9864		set_bit(AutoDetected, &rdev->flags);
   9865		list_add(&rdev->same_set, &pending_raid_disks);
   9866		i_passed++;
   9867	}
   9868	mutex_unlock(&detected_devices_mutex);
   9869
   9870	pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
   9871
   9872	autorun_devices(part);
   9873}
   9874
   9875#endif /* !MODULE */
   9876
   9877static __exit void md_exit(void)
   9878{
   9879	struct mddev *mddev;
   9880	struct list_head *tmp;
   9881	int delay = 1;
   9882
   9883	unregister_blkdev(MD_MAJOR,"md");
   9884	unregister_blkdev(mdp_major, "mdp");
   9885	unregister_reboot_notifier(&md_notifier);
   9886	unregister_sysctl_table(raid_table_header);
   9887
   9888	/* We cannot unload the modules while some process is
   9889	 * waiting for us in select() or poll() - wake them up
   9890	 */
   9891	md_unloading = 1;
   9892	while (waitqueue_active(&md_event_waiters)) {
   9893		/* not safe to leave yet */
   9894		wake_up(&md_event_waiters);
   9895		msleep(delay);
   9896		delay += delay;
   9897	}
   9898	remove_proc_entry("mdstat", NULL);
   9899
   9900	for_each_mddev(mddev, tmp) {
   9901		export_array(mddev);
   9902		mddev->ctime = 0;
   9903		mddev->hold_active = 0;
   9904		/*
   9905		 * for_each_mddev() will call mddev_put() at the end of each
   9906		 * iteration.  As the mddev is now fully clear, this will
   9907		 * schedule the mddev for destruction by a workqueue, and the
   9908		 * destroy_workqueue() below will wait for that to complete.
   9909		 */
   9910	}
   9911	destroy_workqueue(md_rdev_misc_wq);
   9912	destroy_workqueue(md_misc_wq);
   9913	destroy_workqueue(md_wq);
   9914}
   9915
   9916subsys_initcall(md_init);
   9917module_exit(md_exit)
   9918
   9919static int get_ro(char *buffer, const struct kernel_param *kp)
   9920{
   9921	return sprintf(buffer, "%d\n", start_readonly);
   9922}
   9923static int set_ro(const char *val, const struct kernel_param *kp)
   9924{
   9925	return kstrtouint(val, 10, (unsigned int *)&start_readonly);
   9926}
   9927
   9928module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
   9929module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
   9930module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
   9931module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
   9932
   9933MODULE_LICENSE("GPL");
   9934MODULE_DESCRIPTION("MD RAID framework");
   9935MODULE_ALIAS("md");
   9936MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);