cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

build.c (43272B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright (c) International Business Machines Corp., 2006
      4 * Copyright (c) Nokia Corporation, 2007
      5 *
      6 * Author: Artem Bityutskiy (Битюцкий Артём),
      7 *         Frank Haverkamp
      8 */
      9
     10/*
     11 * This file includes UBI initialization and building of UBI devices.
     12 *
     13 * When UBI is initialized, it attaches all the MTD devices specified as the
     14 * module load parameters or the kernel boot parameters. If MTD devices were
     15 * specified, UBI does not attach any MTD device, but it is possible to do
     16 * later using the "UBI control device".
     17 */
     18
     19#include <linux/err.h>
     20#include <linux/module.h>
     21#include <linux/moduleparam.h>
     22#include <linux/stringify.h>
     23#include <linux/namei.h>
     24#include <linux/stat.h>
     25#include <linux/miscdevice.h>
     26#include <linux/mtd/partitions.h>
     27#include <linux/log2.h>
     28#include <linux/kthread.h>
     29#include <linux/kernel.h>
     30#include <linux/slab.h>
     31#include <linux/major.h>
     32#include "ubi.h"
     33
     34/* Maximum length of the 'mtd=' parameter */
     35#define MTD_PARAM_LEN_MAX 64
     36
     37/* Maximum number of comma-separated items in the 'mtd=' parameter */
     38#define MTD_PARAM_MAX_COUNT 4
     39
     40/* Maximum value for the number of bad PEBs per 1024 PEBs */
     41#define MAX_MTD_UBI_BEB_LIMIT 768
     42
     43#ifdef CONFIG_MTD_UBI_MODULE
     44#define ubi_is_module() 1
     45#else
     46#define ubi_is_module() 0
     47#endif
     48
     49/**
     50 * struct mtd_dev_param - MTD device parameter description data structure.
     51 * @name: MTD character device node path, MTD device name, or MTD device number
     52 *        string
     53 * @ubi_num: UBI number
     54 * @vid_hdr_offs: VID header offset
     55 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
     56 */
     57struct mtd_dev_param {
     58	char name[MTD_PARAM_LEN_MAX];
     59	int ubi_num;
     60	int vid_hdr_offs;
     61	int max_beb_per1024;
     62};
     63
     64/* Numbers of elements set in the @mtd_dev_param array */
     65static int mtd_devs;
     66
     67/* MTD devices specification parameters */
     68static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
     69#ifdef CONFIG_MTD_UBI_FASTMAP
     70/* UBI module parameter to enable fastmap automatically on non-fastmap images */
     71static bool fm_autoconvert;
     72static bool fm_debug;
     73#endif
     74
     75/* Slab cache for wear-leveling entries */
     76struct kmem_cache *ubi_wl_entry_slab;
     77
     78/* UBI control character device */
     79static struct miscdevice ubi_ctrl_cdev = {
     80	.minor = MISC_DYNAMIC_MINOR,
     81	.name = "ubi_ctrl",
     82	.fops = &ubi_ctrl_cdev_operations,
     83};
     84
     85/* All UBI devices in system */
     86static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
     87
     88/* Serializes UBI devices creations and removals */
     89DEFINE_MUTEX(ubi_devices_mutex);
     90
     91/* Protects @ubi_devices and @ubi->ref_count */
     92static DEFINE_SPINLOCK(ubi_devices_lock);
     93
     94/* "Show" method for files in '/<sysfs>/class/ubi/' */
     95/* UBI version attribute ('/<sysfs>/class/ubi/version') */
     96static ssize_t version_show(struct class *class, struct class_attribute *attr,
     97			    char *buf)
     98{
     99	return sprintf(buf, "%d\n", UBI_VERSION);
    100}
    101static CLASS_ATTR_RO(version);
    102
    103static struct attribute *ubi_class_attrs[] = {
    104	&class_attr_version.attr,
    105	NULL,
    106};
    107ATTRIBUTE_GROUPS(ubi_class);
    108
    109/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
    110struct class ubi_class = {
    111	.name		= UBI_NAME_STR,
    112	.owner		= THIS_MODULE,
    113	.class_groups	= ubi_class_groups,
    114};
    115
    116static ssize_t dev_attribute_show(struct device *dev,
    117				  struct device_attribute *attr, char *buf);
    118
    119/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
    120static struct device_attribute dev_eraseblock_size =
    121	__ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
    122static struct device_attribute dev_avail_eraseblocks =
    123	__ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
    124static struct device_attribute dev_total_eraseblocks =
    125	__ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
    126static struct device_attribute dev_volumes_count =
    127	__ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
    128static struct device_attribute dev_max_ec =
    129	__ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
    130static struct device_attribute dev_reserved_for_bad =
    131	__ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
    132static struct device_attribute dev_bad_peb_count =
    133	__ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
    134static struct device_attribute dev_max_vol_count =
    135	__ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
    136static struct device_attribute dev_min_io_size =
    137	__ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
    138static struct device_attribute dev_bgt_enabled =
    139	__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
    140static struct device_attribute dev_mtd_num =
    141	__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
    142static struct device_attribute dev_ro_mode =
    143	__ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
    144
    145/**
    146 * ubi_volume_notify - send a volume change notification.
    147 * @ubi: UBI device description object
    148 * @vol: volume description object of the changed volume
    149 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
    150 *
    151 * This is a helper function which notifies all subscribers about a volume
    152 * change event (creation, removal, re-sizing, re-naming, updating). Returns
    153 * zero in case of success and a negative error code in case of failure.
    154 */
    155int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
    156{
    157	int ret;
    158	struct ubi_notification nt;
    159
    160	ubi_do_get_device_info(ubi, &nt.di);
    161	ubi_do_get_volume_info(ubi, vol, &nt.vi);
    162
    163	switch (ntype) {
    164	case UBI_VOLUME_ADDED:
    165	case UBI_VOLUME_REMOVED:
    166	case UBI_VOLUME_RESIZED:
    167	case UBI_VOLUME_RENAMED:
    168		ret = ubi_update_fastmap(ubi);
    169		if (ret)
    170			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
    171	}
    172
    173	return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
    174}
    175
    176/**
    177 * ubi_notify_all - send a notification to all volumes.
    178 * @ubi: UBI device description object
    179 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
    180 * @nb: the notifier to call
    181 *
    182 * This function walks all volumes of UBI device @ubi and sends the @ntype
    183 * notification for each volume. If @nb is %NULL, then all registered notifiers
    184 * are called, otherwise only the @nb notifier is called. Returns the number of
    185 * sent notifications.
    186 */
    187int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
    188{
    189	struct ubi_notification nt;
    190	int i, count = 0;
    191
    192	ubi_do_get_device_info(ubi, &nt.di);
    193
    194	mutex_lock(&ubi->device_mutex);
    195	for (i = 0; i < ubi->vtbl_slots; i++) {
    196		/*
    197		 * Since the @ubi->device is locked, and we are not going to
    198		 * change @ubi->volumes, we do not have to lock
    199		 * @ubi->volumes_lock.
    200		 */
    201		if (!ubi->volumes[i])
    202			continue;
    203
    204		ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
    205		if (nb)
    206			nb->notifier_call(nb, ntype, &nt);
    207		else
    208			blocking_notifier_call_chain(&ubi_notifiers, ntype,
    209						     &nt);
    210		count += 1;
    211	}
    212	mutex_unlock(&ubi->device_mutex);
    213
    214	return count;
    215}
    216
    217/**
    218 * ubi_enumerate_volumes - send "add" notification for all existing volumes.
    219 * @nb: the notifier to call
    220 *
    221 * This function walks all UBI devices and volumes and sends the
    222 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
    223 * registered notifiers are called, otherwise only the @nb notifier is called.
    224 * Returns the number of sent notifications.
    225 */
    226int ubi_enumerate_volumes(struct notifier_block *nb)
    227{
    228	int i, count = 0;
    229
    230	/*
    231	 * Since the @ubi_devices_mutex is locked, and we are not going to
    232	 * change @ubi_devices, we do not have to lock @ubi_devices_lock.
    233	 */
    234	for (i = 0; i < UBI_MAX_DEVICES; i++) {
    235		struct ubi_device *ubi = ubi_devices[i];
    236
    237		if (!ubi)
    238			continue;
    239		count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
    240	}
    241
    242	return count;
    243}
    244
    245/**
    246 * ubi_get_device - get UBI device.
    247 * @ubi_num: UBI device number
    248 *
    249 * This function returns UBI device description object for UBI device number
    250 * @ubi_num, or %NULL if the device does not exist. This function increases the
    251 * device reference count to prevent removal of the device. In other words, the
    252 * device cannot be removed if its reference count is not zero.
    253 */
    254struct ubi_device *ubi_get_device(int ubi_num)
    255{
    256	struct ubi_device *ubi;
    257
    258	spin_lock(&ubi_devices_lock);
    259	ubi = ubi_devices[ubi_num];
    260	if (ubi) {
    261		ubi_assert(ubi->ref_count >= 0);
    262		ubi->ref_count += 1;
    263		get_device(&ubi->dev);
    264	}
    265	spin_unlock(&ubi_devices_lock);
    266
    267	return ubi;
    268}
    269
    270/**
    271 * ubi_put_device - drop an UBI device reference.
    272 * @ubi: UBI device description object
    273 */
    274void ubi_put_device(struct ubi_device *ubi)
    275{
    276	spin_lock(&ubi_devices_lock);
    277	ubi->ref_count -= 1;
    278	put_device(&ubi->dev);
    279	spin_unlock(&ubi_devices_lock);
    280}
    281
    282/**
    283 * ubi_get_by_major - get UBI device by character device major number.
    284 * @major: major number
    285 *
    286 * This function is similar to 'ubi_get_device()', but it searches the device
    287 * by its major number.
    288 */
    289struct ubi_device *ubi_get_by_major(int major)
    290{
    291	int i;
    292	struct ubi_device *ubi;
    293
    294	spin_lock(&ubi_devices_lock);
    295	for (i = 0; i < UBI_MAX_DEVICES; i++) {
    296		ubi = ubi_devices[i];
    297		if (ubi && MAJOR(ubi->cdev.dev) == major) {
    298			ubi_assert(ubi->ref_count >= 0);
    299			ubi->ref_count += 1;
    300			get_device(&ubi->dev);
    301			spin_unlock(&ubi_devices_lock);
    302			return ubi;
    303		}
    304	}
    305	spin_unlock(&ubi_devices_lock);
    306
    307	return NULL;
    308}
    309
    310/**
    311 * ubi_major2num - get UBI device number by character device major number.
    312 * @major: major number
    313 *
    314 * This function searches UBI device number object by its major number. If UBI
    315 * device was not found, this function returns -ENODEV, otherwise the UBI device
    316 * number is returned.
    317 */
    318int ubi_major2num(int major)
    319{
    320	int i, ubi_num = -ENODEV;
    321
    322	spin_lock(&ubi_devices_lock);
    323	for (i = 0; i < UBI_MAX_DEVICES; i++) {
    324		struct ubi_device *ubi = ubi_devices[i];
    325
    326		if (ubi && MAJOR(ubi->cdev.dev) == major) {
    327			ubi_num = ubi->ubi_num;
    328			break;
    329		}
    330	}
    331	spin_unlock(&ubi_devices_lock);
    332
    333	return ubi_num;
    334}
    335
    336/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
    337static ssize_t dev_attribute_show(struct device *dev,
    338				  struct device_attribute *attr, char *buf)
    339{
    340	ssize_t ret;
    341	struct ubi_device *ubi;
    342
    343	/*
    344	 * The below code looks weird, but it actually makes sense. We get the
    345	 * UBI device reference from the contained 'struct ubi_device'. But it
    346	 * is unclear if the device was removed or not yet. Indeed, if the
    347	 * device was removed before we increased its reference count,
    348	 * 'ubi_get_device()' will return -ENODEV and we fail.
    349	 *
    350	 * Remember, 'struct ubi_device' is freed in the release function, so
    351	 * we still can use 'ubi->ubi_num'.
    352	 */
    353	ubi = container_of(dev, struct ubi_device, dev);
    354
    355	if (attr == &dev_eraseblock_size)
    356		ret = sprintf(buf, "%d\n", ubi->leb_size);
    357	else if (attr == &dev_avail_eraseblocks)
    358		ret = sprintf(buf, "%d\n", ubi->avail_pebs);
    359	else if (attr == &dev_total_eraseblocks)
    360		ret = sprintf(buf, "%d\n", ubi->good_peb_count);
    361	else if (attr == &dev_volumes_count)
    362		ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
    363	else if (attr == &dev_max_ec)
    364		ret = sprintf(buf, "%d\n", ubi->max_ec);
    365	else if (attr == &dev_reserved_for_bad)
    366		ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
    367	else if (attr == &dev_bad_peb_count)
    368		ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
    369	else if (attr == &dev_max_vol_count)
    370		ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
    371	else if (attr == &dev_min_io_size)
    372		ret = sprintf(buf, "%d\n", ubi->min_io_size);
    373	else if (attr == &dev_bgt_enabled)
    374		ret = sprintf(buf, "%d\n", ubi->thread_enabled);
    375	else if (attr == &dev_mtd_num)
    376		ret = sprintf(buf, "%d\n", ubi->mtd->index);
    377	else if (attr == &dev_ro_mode)
    378		ret = sprintf(buf, "%d\n", ubi->ro_mode);
    379	else
    380		ret = -EINVAL;
    381
    382	return ret;
    383}
    384
    385static struct attribute *ubi_dev_attrs[] = {
    386	&dev_eraseblock_size.attr,
    387	&dev_avail_eraseblocks.attr,
    388	&dev_total_eraseblocks.attr,
    389	&dev_volumes_count.attr,
    390	&dev_max_ec.attr,
    391	&dev_reserved_for_bad.attr,
    392	&dev_bad_peb_count.attr,
    393	&dev_max_vol_count.attr,
    394	&dev_min_io_size.attr,
    395	&dev_bgt_enabled.attr,
    396	&dev_mtd_num.attr,
    397	&dev_ro_mode.attr,
    398	NULL
    399};
    400ATTRIBUTE_GROUPS(ubi_dev);
    401
    402static void dev_release(struct device *dev)
    403{
    404	struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
    405
    406	kfree(ubi);
    407}
    408
    409/**
    410 * kill_volumes - destroy all user volumes.
    411 * @ubi: UBI device description object
    412 */
    413static void kill_volumes(struct ubi_device *ubi)
    414{
    415	int i;
    416
    417	for (i = 0; i < ubi->vtbl_slots; i++)
    418		if (ubi->volumes[i])
    419			ubi_free_volume(ubi, ubi->volumes[i]);
    420}
    421
    422/**
    423 * uif_init - initialize user interfaces for an UBI device.
    424 * @ubi: UBI device description object
    425 *
    426 * This function initializes various user interfaces for an UBI device. If the
    427 * initialization fails at an early stage, this function frees all the
    428 * resources it allocated, returns an error.
    429 *
    430 * This function returns zero in case of success and a negative error code in
    431 * case of failure.
    432 */
    433static int uif_init(struct ubi_device *ubi)
    434{
    435	int i, err;
    436	dev_t dev;
    437
    438	sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
    439
    440	/*
    441	 * Major numbers for the UBI character devices are allocated
    442	 * dynamically. Major numbers of volume character devices are
    443	 * equivalent to ones of the corresponding UBI character device. Minor
    444	 * numbers of UBI character devices are 0, while minor numbers of
    445	 * volume character devices start from 1. Thus, we allocate one major
    446	 * number and ubi->vtbl_slots + 1 minor numbers.
    447	 */
    448	err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
    449	if (err) {
    450		ubi_err(ubi, "cannot register UBI character devices");
    451		return err;
    452	}
    453
    454	ubi->dev.devt = dev;
    455
    456	ubi_assert(MINOR(dev) == 0);
    457	cdev_init(&ubi->cdev, &ubi_cdev_operations);
    458	dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
    459	ubi->cdev.owner = THIS_MODULE;
    460
    461	dev_set_name(&ubi->dev, UBI_NAME_STR "%d", ubi->ubi_num);
    462	err = cdev_device_add(&ubi->cdev, &ubi->dev);
    463	if (err)
    464		goto out_unreg;
    465
    466	for (i = 0; i < ubi->vtbl_slots; i++)
    467		if (ubi->volumes[i]) {
    468			err = ubi_add_volume(ubi, ubi->volumes[i]);
    469			if (err) {
    470				ubi_err(ubi, "cannot add volume %d", i);
    471				goto out_volumes;
    472			}
    473		}
    474
    475	return 0;
    476
    477out_volumes:
    478	kill_volumes(ubi);
    479	cdev_device_del(&ubi->cdev, &ubi->dev);
    480out_unreg:
    481	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
    482	ubi_err(ubi, "cannot initialize UBI %s, error %d",
    483		ubi->ubi_name, err);
    484	return err;
    485}
    486
    487/**
    488 * uif_close - close user interfaces for an UBI device.
    489 * @ubi: UBI device description object
    490 *
    491 * Note, since this function un-registers UBI volume device objects (@vol->dev),
    492 * the memory allocated voe the volumes is freed as well (in the release
    493 * function).
    494 */
    495static void uif_close(struct ubi_device *ubi)
    496{
    497	kill_volumes(ubi);
    498	cdev_device_del(&ubi->cdev, &ubi->dev);
    499	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
    500}
    501
    502/**
    503 * ubi_free_volumes_from - free volumes from specific index.
    504 * @ubi: UBI device description object
    505 * @from: the start index used for volume free.
    506 */
    507static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
    508{
    509	int i;
    510
    511	for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
    512		if (!ubi->volumes[i])
    513			continue;
    514		ubi_eba_replace_table(ubi->volumes[i], NULL);
    515		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
    516		kfree(ubi->volumes[i]);
    517		ubi->volumes[i] = NULL;
    518	}
    519}
    520
    521/**
    522 * ubi_free_all_volumes - free all volumes.
    523 * @ubi: UBI device description object
    524 */
    525void ubi_free_all_volumes(struct ubi_device *ubi)
    526{
    527	ubi_free_volumes_from(ubi, 0);
    528}
    529
    530/**
    531 * ubi_free_internal_volumes - free internal volumes.
    532 * @ubi: UBI device description object
    533 */
    534void ubi_free_internal_volumes(struct ubi_device *ubi)
    535{
    536	ubi_free_volumes_from(ubi, ubi->vtbl_slots);
    537}
    538
    539static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
    540{
    541	int limit, device_pebs;
    542	uint64_t device_size;
    543
    544	if (!max_beb_per1024) {
    545		/*
    546		 * Since max_beb_per1024 has not been set by the user in either
    547		 * the cmdline or Kconfig, use mtd_max_bad_blocks to set the
    548		 * limit if it is supported by the device.
    549		 */
    550		limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
    551		if (limit < 0)
    552			return 0;
    553		return limit;
    554	}
    555
    556	/*
    557	 * Here we are using size of the entire flash chip and
    558	 * not just the MTD partition size because the maximum
    559	 * number of bad eraseblocks is a percentage of the
    560	 * whole device and bad eraseblocks are not fairly
    561	 * distributed over the flash chip. So the worst case
    562	 * is that all the bad eraseblocks of the chip are in
    563	 * the MTD partition we are attaching (ubi->mtd).
    564	 */
    565	device_size = mtd_get_device_size(ubi->mtd);
    566	device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
    567	limit = mult_frac(device_pebs, max_beb_per1024, 1024);
    568
    569	/* Round it up */
    570	if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
    571		limit += 1;
    572
    573	return limit;
    574}
    575
    576/**
    577 * io_init - initialize I/O sub-system for a given UBI device.
    578 * @ubi: UBI device description object
    579 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
    580 *
    581 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
    582 * assumed:
    583 *   o EC header is always at offset zero - this cannot be changed;
    584 *   o VID header starts just after the EC header at the closest address
    585 *     aligned to @io->hdrs_min_io_size;
    586 *   o data starts just after the VID header at the closest address aligned to
    587 *     @io->min_io_size
    588 *
    589 * This function returns zero in case of success and a negative error code in
    590 * case of failure.
    591 */
    592static int io_init(struct ubi_device *ubi, int max_beb_per1024)
    593{
    594	dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
    595	dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
    596
    597	if (ubi->mtd->numeraseregions != 0) {
    598		/*
    599		 * Some flashes have several erase regions. Different regions
    600		 * may have different eraseblock size and other
    601		 * characteristics. It looks like mostly multi-region flashes
    602		 * have one "main" region and one or more small regions to
    603		 * store boot loader code or boot parameters or whatever. I
    604		 * guess we should just pick the largest region. But this is
    605		 * not implemented.
    606		 */
    607		ubi_err(ubi, "multiple regions, not implemented");
    608		return -EINVAL;
    609	}
    610
    611	if (ubi->vid_hdr_offset < 0)
    612		return -EINVAL;
    613
    614	/*
    615	 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
    616	 * physical eraseblocks maximum.
    617	 */
    618
    619	ubi->peb_size   = ubi->mtd->erasesize;
    620	ubi->peb_count  = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
    621	ubi->flash_size = ubi->mtd->size;
    622
    623	if (mtd_can_have_bb(ubi->mtd)) {
    624		ubi->bad_allowed = 1;
    625		ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
    626	}
    627
    628	if (ubi->mtd->type == MTD_NORFLASH)
    629		ubi->nor_flash = 1;
    630
    631	ubi->min_io_size = ubi->mtd->writesize;
    632	ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
    633
    634	/*
    635	 * Make sure minimal I/O unit is power of 2. Note, there is no
    636	 * fundamental reason for this assumption. It is just an optimization
    637	 * which allows us to avoid costly division operations.
    638	 */
    639	if (!is_power_of_2(ubi->min_io_size)) {
    640		ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
    641			ubi->min_io_size);
    642		return -EINVAL;
    643	}
    644
    645	ubi_assert(ubi->hdrs_min_io_size > 0);
    646	ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
    647	ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
    648
    649	ubi->max_write_size = ubi->mtd->writebufsize;
    650	/*
    651	 * Maximum write size has to be greater or equivalent to min. I/O
    652	 * size, and be multiple of min. I/O size.
    653	 */
    654	if (ubi->max_write_size < ubi->min_io_size ||
    655	    ubi->max_write_size % ubi->min_io_size ||
    656	    !is_power_of_2(ubi->max_write_size)) {
    657		ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
    658			ubi->max_write_size, ubi->min_io_size);
    659		return -EINVAL;
    660	}
    661
    662	/* Calculate default aligned sizes of EC and VID headers */
    663	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
    664	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
    665
    666	dbg_gen("min_io_size      %d", ubi->min_io_size);
    667	dbg_gen("max_write_size   %d", ubi->max_write_size);
    668	dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
    669	dbg_gen("ec_hdr_alsize    %d", ubi->ec_hdr_alsize);
    670	dbg_gen("vid_hdr_alsize   %d", ubi->vid_hdr_alsize);
    671
    672	if (ubi->vid_hdr_offset == 0)
    673		/* Default offset */
    674		ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
    675				      ubi->ec_hdr_alsize;
    676	else {
    677		ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
    678						~(ubi->hdrs_min_io_size - 1);
    679		ubi->vid_hdr_shift = ubi->vid_hdr_offset -
    680						ubi->vid_hdr_aloffset;
    681	}
    682
    683	/* Similar for the data offset */
    684	ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
    685	ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
    686
    687	dbg_gen("vid_hdr_offset   %d", ubi->vid_hdr_offset);
    688	dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
    689	dbg_gen("vid_hdr_shift    %d", ubi->vid_hdr_shift);
    690	dbg_gen("leb_start        %d", ubi->leb_start);
    691
    692	/* The shift must be aligned to 32-bit boundary */
    693	if (ubi->vid_hdr_shift % 4) {
    694		ubi_err(ubi, "unaligned VID header shift %d",
    695			ubi->vid_hdr_shift);
    696		return -EINVAL;
    697	}
    698
    699	/* Check sanity */
    700	if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
    701	    ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
    702	    ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
    703	    ubi->leb_start & (ubi->min_io_size - 1)) {
    704		ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
    705			ubi->vid_hdr_offset, ubi->leb_start);
    706		return -EINVAL;
    707	}
    708
    709	/*
    710	 * Set maximum amount of physical erroneous eraseblocks to be 10%.
    711	 * Erroneous PEB are those which have read errors.
    712	 */
    713	ubi->max_erroneous = ubi->peb_count / 10;
    714	if (ubi->max_erroneous < 16)
    715		ubi->max_erroneous = 16;
    716	dbg_gen("max_erroneous    %d", ubi->max_erroneous);
    717
    718	/*
    719	 * It may happen that EC and VID headers are situated in one minimal
    720	 * I/O unit. In this case we can only accept this UBI image in
    721	 * read-only mode.
    722	 */
    723	if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
    724		ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
    725		ubi->ro_mode = 1;
    726	}
    727
    728	ubi->leb_size = ubi->peb_size - ubi->leb_start;
    729
    730	if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
    731		ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
    732			ubi->mtd->index);
    733		ubi->ro_mode = 1;
    734	}
    735
    736	/*
    737	 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
    738	 * unfortunately, MTD does not provide this information. We should loop
    739	 * over all physical eraseblocks and invoke mtd->block_is_bad() for
    740	 * each physical eraseblock. So, we leave @ubi->bad_peb_count
    741	 * uninitialized so far.
    742	 */
    743
    744	return 0;
    745}
    746
    747/**
    748 * autoresize - re-size the volume which has the "auto-resize" flag set.
    749 * @ubi: UBI device description object
    750 * @vol_id: ID of the volume to re-size
    751 *
    752 * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
    753 * the volume table to the largest possible size. See comments in ubi-header.h
    754 * for more description of the flag. Returns zero in case of success and a
    755 * negative error code in case of failure.
    756 */
    757static int autoresize(struct ubi_device *ubi, int vol_id)
    758{
    759	struct ubi_volume_desc desc;
    760	struct ubi_volume *vol = ubi->volumes[vol_id];
    761	int err, old_reserved_pebs = vol->reserved_pebs;
    762
    763	if (ubi->ro_mode) {
    764		ubi_warn(ubi, "skip auto-resize because of R/O mode");
    765		return 0;
    766	}
    767
    768	/*
    769	 * Clear the auto-resize flag in the volume in-memory copy of the
    770	 * volume table, and 'ubi_resize_volume()' will propagate this change
    771	 * to the flash.
    772	 */
    773	ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
    774
    775	if (ubi->avail_pebs == 0) {
    776		struct ubi_vtbl_record vtbl_rec;
    777
    778		/*
    779		 * No available PEBs to re-size the volume, clear the flag on
    780		 * flash and exit.
    781		 */
    782		vtbl_rec = ubi->vtbl[vol_id];
    783		err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
    784		if (err)
    785			ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
    786				vol_id);
    787	} else {
    788		desc.vol = vol;
    789		err = ubi_resize_volume(&desc,
    790					old_reserved_pebs + ubi->avail_pebs);
    791		if (err)
    792			ubi_err(ubi, "cannot auto-resize volume %d",
    793				vol_id);
    794	}
    795
    796	if (err)
    797		return err;
    798
    799	ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
    800		vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
    801	return 0;
    802}
    803
    804/**
    805 * ubi_attach_mtd_dev - attach an MTD device.
    806 * @mtd: MTD device description object
    807 * @ubi_num: number to assign to the new UBI device
    808 * @vid_hdr_offset: VID header offset
    809 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
    810 *
    811 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
    812 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
    813 * which case this function finds a vacant device number and assigns it
    814 * automatically. Returns the new UBI device number in case of success and a
    815 * negative error code in case of failure.
    816 *
    817 * Note, the invocations of this function has to be serialized by the
    818 * @ubi_devices_mutex.
    819 */
    820int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
    821		       int vid_hdr_offset, int max_beb_per1024)
    822{
    823	struct ubi_device *ubi;
    824	int i, err;
    825
    826	if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
    827		return -EINVAL;
    828
    829	if (!max_beb_per1024)
    830		max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
    831
    832	/*
    833	 * Check if we already have the same MTD device attached.
    834	 *
    835	 * Note, this function assumes that UBI devices creations and deletions
    836	 * are serialized, so it does not take the &ubi_devices_lock.
    837	 */
    838	for (i = 0; i < UBI_MAX_DEVICES; i++) {
    839		ubi = ubi_devices[i];
    840		if (ubi && mtd->index == ubi->mtd->index) {
    841			pr_err("ubi: mtd%d is already attached to ubi%d\n",
    842				mtd->index, i);
    843			return -EEXIST;
    844		}
    845	}
    846
    847	/*
    848	 * Make sure this MTD device is not emulated on top of an UBI volume
    849	 * already. Well, generally this recursion works fine, but there are
    850	 * different problems like the UBI module takes a reference to itself
    851	 * by attaching (and thus, opening) the emulated MTD device. This
    852	 * results in inability to unload the module. And in general it makes
    853	 * no sense to attach emulated MTD devices, so we prohibit this.
    854	 */
    855	if (mtd->type == MTD_UBIVOLUME) {
    856		pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI\n",
    857			mtd->index);
    858		return -EINVAL;
    859	}
    860
    861	/*
    862	 * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
    863	 * MLC NAND is different and needs special care, otherwise UBI or UBIFS
    864	 * will die soon and you will lose all your data.
    865	 * Relax this rule if the partition we're attaching to operates in SLC
    866	 * mode.
    867	 */
    868	if (mtd->type == MTD_MLCNANDFLASH &&
    869	    !(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) {
    870		pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
    871			mtd->index);
    872		return -EINVAL;
    873	}
    874
    875	if (ubi_num == UBI_DEV_NUM_AUTO) {
    876		/* Search for an empty slot in the @ubi_devices array */
    877		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
    878			if (!ubi_devices[ubi_num])
    879				break;
    880		if (ubi_num == UBI_MAX_DEVICES) {
    881			pr_err("ubi: only %d UBI devices may be created\n",
    882				UBI_MAX_DEVICES);
    883			return -ENFILE;
    884		}
    885	} else {
    886		if (ubi_num >= UBI_MAX_DEVICES)
    887			return -EINVAL;
    888
    889		/* Make sure ubi_num is not busy */
    890		if (ubi_devices[ubi_num]) {
    891			pr_err("ubi: ubi%i already exists\n", ubi_num);
    892			return -EEXIST;
    893		}
    894	}
    895
    896	ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
    897	if (!ubi)
    898		return -ENOMEM;
    899
    900	device_initialize(&ubi->dev);
    901	ubi->dev.release = dev_release;
    902	ubi->dev.class = &ubi_class;
    903	ubi->dev.groups = ubi_dev_groups;
    904
    905	ubi->mtd = mtd;
    906	ubi->ubi_num = ubi_num;
    907	ubi->vid_hdr_offset = vid_hdr_offset;
    908	ubi->autoresize_vol_id = -1;
    909
    910#ifdef CONFIG_MTD_UBI_FASTMAP
    911	ubi->fm_pool.used = ubi->fm_pool.size = 0;
    912	ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
    913
    914	/*
    915	 * fm_pool.max_size is 5% of the total number of PEBs but it's also
    916	 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
    917	 */
    918	ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
    919		ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
    920	ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
    921		UBI_FM_MIN_POOL_SIZE);
    922
    923	ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
    924	ubi->fm_disabled = !fm_autoconvert;
    925	if (fm_debug)
    926		ubi_enable_dbg_chk_fastmap(ubi);
    927
    928	if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
    929	    <= UBI_FM_MAX_START) {
    930		ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
    931			UBI_FM_MAX_START);
    932		ubi->fm_disabled = 1;
    933	}
    934
    935	ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
    936	ubi_msg(ubi, "default fastmap WL pool size: %d",
    937		ubi->fm_wl_pool.max_size);
    938#else
    939	ubi->fm_disabled = 1;
    940#endif
    941	mutex_init(&ubi->buf_mutex);
    942	mutex_init(&ubi->ckvol_mutex);
    943	mutex_init(&ubi->device_mutex);
    944	spin_lock_init(&ubi->volumes_lock);
    945	init_rwsem(&ubi->fm_protect);
    946	init_rwsem(&ubi->fm_eba_sem);
    947
    948	ubi_msg(ubi, "attaching mtd%d", mtd->index);
    949
    950	err = io_init(ubi, max_beb_per1024);
    951	if (err)
    952		goto out_free;
    953
    954	err = -ENOMEM;
    955	ubi->peb_buf = vmalloc(ubi->peb_size);
    956	if (!ubi->peb_buf)
    957		goto out_free;
    958
    959#ifdef CONFIG_MTD_UBI_FASTMAP
    960	ubi->fm_size = ubi_calc_fm_size(ubi);
    961	ubi->fm_buf = vzalloc(ubi->fm_size);
    962	if (!ubi->fm_buf)
    963		goto out_free;
    964#endif
    965	err = ubi_attach(ubi, 0);
    966	if (err) {
    967		ubi_err(ubi, "failed to attach mtd%d, error %d",
    968			mtd->index, err);
    969		goto out_free;
    970	}
    971
    972	if (ubi->autoresize_vol_id != -1) {
    973		err = autoresize(ubi, ubi->autoresize_vol_id);
    974		if (err)
    975			goto out_detach;
    976	}
    977
    978	err = uif_init(ubi);
    979	if (err)
    980		goto out_detach;
    981
    982	err = ubi_debugfs_init_dev(ubi);
    983	if (err)
    984		goto out_uif;
    985
    986	ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
    987	if (IS_ERR(ubi->bgt_thread)) {
    988		err = PTR_ERR(ubi->bgt_thread);
    989		ubi_err(ubi, "cannot spawn \"%s\", error %d",
    990			ubi->bgt_name, err);
    991		goto out_debugfs;
    992	}
    993
    994	ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
    995		mtd->index, mtd->name, ubi->flash_size >> 20);
    996	ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
    997		ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
    998	ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
    999		ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
   1000	ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
   1001		ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
   1002	ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
   1003		ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
   1004	ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
   1005		ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
   1006		ubi->vtbl_slots);
   1007	ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
   1008		ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
   1009		ubi->image_seq);
   1010	ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
   1011		ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
   1012
   1013	/*
   1014	 * The below lock makes sure we do not race with 'ubi_thread()' which
   1015	 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
   1016	 */
   1017	spin_lock(&ubi->wl_lock);
   1018	ubi->thread_enabled = 1;
   1019	wake_up_process(ubi->bgt_thread);
   1020	spin_unlock(&ubi->wl_lock);
   1021
   1022	ubi_devices[ubi_num] = ubi;
   1023	ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
   1024	return ubi_num;
   1025
   1026out_debugfs:
   1027	ubi_debugfs_exit_dev(ubi);
   1028out_uif:
   1029	uif_close(ubi);
   1030out_detach:
   1031	ubi_wl_close(ubi);
   1032	ubi_free_all_volumes(ubi);
   1033	vfree(ubi->vtbl);
   1034out_free:
   1035	vfree(ubi->peb_buf);
   1036	vfree(ubi->fm_buf);
   1037	put_device(&ubi->dev);
   1038	return err;
   1039}
   1040
   1041/**
   1042 * ubi_detach_mtd_dev - detach an MTD device.
   1043 * @ubi_num: UBI device number to detach from
   1044 * @anyway: detach MTD even if device reference count is not zero
   1045 *
   1046 * This function destroys an UBI device number @ubi_num and detaches the
   1047 * underlying MTD device. Returns zero in case of success and %-EBUSY if the
   1048 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
   1049 * exist.
   1050 *
   1051 * Note, the invocations of this function has to be serialized by the
   1052 * @ubi_devices_mutex.
   1053 */
   1054int ubi_detach_mtd_dev(int ubi_num, int anyway)
   1055{
   1056	struct ubi_device *ubi;
   1057
   1058	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
   1059		return -EINVAL;
   1060
   1061	ubi = ubi_get_device(ubi_num);
   1062	if (!ubi)
   1063		return -EINVAL;
   1064
   1065	spin_lock(&ubi_devices_lock);
   1066	put_device(&ubi->dev);
   1067	ubi->ref_count -= 1;
   1068	if (ubi->ref_count) {
   1069		if (!anyway) {
   1070			spin_unlock(&ubi_devices_lock);
   1071			return -EBUSY;
   1072		}
   1073		/* This may only happen if there is a bug */
   1074		ubi_err(ubi, "%s reference count %d, destroy anyway",
   1075			ubi->ubi_name, ubi->ref_count);
   1076	}
   1077	ubi_devices[ubi_num] = NULL;
   1078	spin_unlock(&ubi_devices_lock);
   1079
   1080	ubi_assert(ubi_num == ubi->ubi_num);
   1081	ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
   1082	ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
   1083#ifdef CONFIG_MTD_UBI_FASTMAP
   1084	/* If we don't write a new fastmap at detach time we lose all
   1085	 * EC updates that have been made since the last written fastmap.
   1086	 * In case of fastmap debugging we omit the update to simulate an
   1087	 * unclean shutdown. */
   1088	if (!ubi_dbg_chk_fastmap(ubi))
   1089		ubi_update_fastmap(ubi);
   1090#endif
   1091	/*
   1092	 * Before freeing anything, we have to stop the background thread to
   1093	 * prevent it from doing anything on this device while we are freeing.
   1094	 */
   1095	if (ubi->bgt_thread)
   1096		kthread_stop(ubi->bgt_thread);
   1097
   1098#ifdef CONFIG_MTD_UBI_FASTMAP
   1099	cancel_work_sync(&ubi->fm_work);
   1100#endif
   1101	ubi_debugfs_exit_dev(ubi);
   1102	uif_close(ubi);
   1103
   1104	ubi_wl_close(ubi);
   1105	ubi_free_internal_volumes(ubi);
   1106	vfree(ubi->vtbl);
   1107	vfree(ubi->peb_buf);
   1108	vfree(ubi->fm_buf);
   1109	ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
   1110	put_mtd_device(ubi->mtd);
   1111	put_device(&ubi->dev);
   1112	return 0;
   1113}
   1114
   1115/**
   1116 * open_mtd_by_chdev - open an MTD device by its character device node path.
   1117 * @mtd_dev: MTD character device node path
   1118 *
   1119 * This helper function opens an MTD device by its character node device path.
   1120 * Returns MTD device description object in case of success and a negative
   1121 * error code in case of failure.
   1122 */
   1123static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
   1124{
   1125	int err, minor;
   1126	struct path path;
   1127	struct kstat stat;
   1128
   1129	/* Probably this is an MTD character device node path */
   1130	err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
   1131	if (err)
   1132		return ERR_PTR(err);
   1133
   1134	err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
   1135	path_put(&path);
   1136	if (err)
   1137		return ERR_PTR(err);
   1138
   1139	/* MTD device number is defined by the major / minor numbers */
   1140	if (MAJOR(stat.rdev) != MTD_CHAR_MAJOR || !S_ISCHR(stat.mode))
   1141		return ERR_PTR(-EINVAL);
   1142
   1143	minor = MINOR(stat.rdev);
   1144
   1145	if (minor & 1)
   1146		/*
   1147		 * Just do not think the "/dev/mtdrX" devices support is need,
   1148		 * so do not support them to avoid doing extra work.
   1149		 */
   1150		return ERR_PTR(-EINVAL);
   1151
   1152	return get_mtd_device(NULL, minor / 2);
   1153}
   1154
   1155/**
   1156 * open_mtd_device - open MTD device by name, character device path, or number.
   1157 * @mtd_dev: name, character device node path, or MTD device device number
   1158 *
   1159 * This function tries to open and MTD device described by @mtd_dev string,
   1160 * which is first treated as ASCII MTD device number, and if it is not true, it
   1161 * is treated as MTD device name, and if that is also not true, it is treated
   1162 * as MTD character device node path. Returns MTD device description object in
   1163 * case of success and a negative error code in case of failure.
   1164 */
   1165static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
   1166{
   1167	struct mtd_info *mtd;
   1168	int mtd_num;
   1169	char *endp;
   1170
   1171	mtd_num = simple_strtoul(mtd_dev, &endp, 0);
   1172	if (*endp != '\0' || mtd_dev == endp) {
   1173		/*
   1174		 * This does not look like an ASCII integer, probably this is
   1175		 * MTD device name.
   1176		 */
   1177		mtd = get_mtd_device_nm(mtd_dev);
   1178		if (PTR_ERR(mtd) == -ENODEV)
   1179			/* Probably this is an MTD character device node path */
   1180			mtd = open_mtd_by_chdev(mtd_dev);
   1181	} else
   1182		mtd = get_mtd_device(NULL, mtd_num);
   1183
   1184	return mtd;
   1185}
   1186
   1187static int __init ubi_init(void)
   1188{
   1189	int err, i, k;
   1190
   1191	/* Ensure that EC and VID headers have correct size */
   1192	BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
   1193	BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
   1194
   1195	if (mtd_devs > UBI_MAX_DEVICES) {
   1196		pr_err("UBI error: too many MTD devices, maximum is %d\n",
   1197		       UBI_MAX_DEVICES);
   1198		return -EINVAL;
   1199	}
   1200
   1201	/* Create base sysfs directory and sysfs files */
   1202	err = class_register(&ubi_class);
   1203	if (err < 0)
   1204		return err;
   1205
   1206	err = misc_register(&ubi_ctrl_cdev);
   1207	if (err) {
   1208		pr_err("UBI error: cannot register device\n");
   1209		goto out;
   1210	}
   1211
   1212	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
   1213					      sizeof(struct ubi_wl_entry),
   1214					      0, 0, NULL);
   1215	if (!ubi_wl_entry_slab) {
   1216		err = -ENOMEM;
   1217		goto out_dev_unreg;
   1218	}
   1219
   1220	err = ubi_debugfs_init();
   1221	if (err)
   1222		goto out_slab;
   1223
   1224
   1225	/* Attach MTD devices */
   1226	for (i = 0; i < mtd_devs; i++) {
   1227		struct mtd_dev_param *p = &mtd_dev_param[i];
   1228		struct mtd_info *mtd;
   1229
   1230		cond_resched();
   1231
   1232		mtd = open_mtd_device(p->name);
   1233		if (IS_ERR(mtd)) {
   1234			err = PTR_ERR(mtd);
   1235			pr_err("UBI error: cannot open mtd %s, error %d\n",
   1236			       p->name, err);
   1237			/* See comment below re-ubi_is_module(). */
   1238			if (ubi_is_module())
   1239				goto out_detach;
   1240			continue;
   1241		}
   1242
   1243		mutex_lock(&ubi_devices_mutex);
   1244		err = ubi_attach_mtd_dev(mtd, p->ubi_num,
   1245					 p->vid_hdr_offs, p->max_beb_per1024);
   1246		mutex_unlock(&ubi_devices_mutex);
   1247		if (err < 0) {
   1248			pr_err("UBI error: cannot attach mtd%d\n",
   1249			       mtd->index);
   1250			put_mtd_device(mtd);
   1251
   1252			/*
   1253			 * Originally UBI stopped initializing on any error.
   1254			 * However, later on it was found out that this
   1255			 * behavior is not very good when UBI is compiled into
   1256			 * the kernel and the MTD devices to attach are passed
   1257			 * through the command line. Indeed, UBI failure
   1258			 * stopped whole boot sequence.
   1259			 *
   1260			 * To fix this, we changed the behavior for the
   1261			 * non-module case, but preserved the old behavior for
   1262			 * the module case, just for compatibility. This is a
   1263			 * little inconsistent, though.
   1264			 */
   1265			if (ubi_is_module())
   1266				goto out_detach;
   1267		}
   1268	}
   1269
   1270	err = ubiblock_init();
   1271	if (err) {
   1272		pr_err("UBI error: block: cannot initialize, error %d\n", err);
   1273
   1274		/* See comment above re-ubi_is_module(). */
   1275		if (ubi_is_module())
   1276			goto out_detach;
   1277	}
   1278
   1279	return 0;
   1280
   1281out_detach:
   1282	for (k = 0; k < i; k++)
   1283		if (ubi_devices[k]) {
   1284			mutex_lock(&ubi_devices_mutex);
   1285			ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
   1286			mutex_unlock(&ubi_devices_mutex);
   1287		}
   1288	ubi_debugfs_exit();
   1289out_slab:
   1290	kmem_cache_destroy(ubi_wl_entry_slab);
   1291out_dev_unreg:
   1292	misc_deregister(&ubi_ctrl_cdev);
   1293out:
   1294	class_unregister(&ubi_class);
   1295	pr_err("UBI error: cannot initialize UBI, error %d\n", err);
   1296	return err;
   1297}
   1298late_initcall(ubi_init);
   1299
   1300static void __exit ubi_exit(void)
   1301{
   1302	int i;
   1303
   1304	ubiblock_exit();
   1305
   1306	for (i = 0; i < UBI_MAX_DEVICES; i++)
   1307		if (ubi_devices[i]) {
   1308			mutex_lock(&ubi_devices_mutex);
   1309			ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
   1310			mutex_unlock(&ubi_devices_mutex);
   1311		}
   1312	ubi_debugfs_exit();
   1313	kmem_cache_destroy(ubi_wl_entry_slab);
   1314	misc_deregister(&ubi_ctrl_cdev);
   1315	class_unregister(&ubi_class);
   1316}
   1317module_exit(ubi_exit);
   1318
   1319/**
   1320 * bytes_str_to_int - convert a number of bytes string into an integer.
   1321 * @str: the string to convert
   1322 *
   1323 * This function returns positive resulting integer in case of success and a
   1324 * negative error code in case of failure.
   1325 */
   1326static int bytes_str_to_int(const char *str)
   1327{
   1328	char *endp;
   1329	unsigned long result;
   1330
   1331	result = simple_strtoul(str, &endp, 0);
   1332	if (str == endp || result >= INT_MAX) {
   1333		pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
   1334		return -EINVAL;
   1335	}
   1336
   1337	switch (*endp) {
   1338	case 'G':
   1339		result *= 1024;
   1340		fallthrough;
   1341	case 'M':
   1342		result *= 1024;
   1343		fallthrough;
   1344	case 'K':
   1345		result *= 1024;
   1346		break;
   1347	case '\0':
   1348		break;
   1349	default:
   1350		pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
   1351		return -EINVAL;
   1352	}
   1353
   1354	return result;
   1355}
   1356
   1357/**
   1358 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
   1359 * @val: the parameter value to parse
   1360 * @kp: not used
   1361 *
   1362 * This function returns zero in case of success and a negative error code in
   1363 * case of error.
   1364 */
   1365static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
   1366{
   1367	int i, len;
   1368	struct mtd_dev_param *p;
   1369	char buf[MTD_PARAM_LEN_MAX];
   1370	char *pbuf = &buf[0];
   1371	char *tokens[MTD_PARAM_MAX_COUNT], *token;
   1372
   1373	if (!val)
   1374		return -EINVAL;
   1375
   1376	if (mtd_devs == UBI_MAX_DEVICES) {
   1377		pr_err("UBI error: too many parameters, max. is %d\n",
   1378		       UBI_MAX_DEVICES);
   1379		return -EINVAL;
   1380	}
   1381
   1382	len = strnlen(val, MTD_PARAM_LEN_MAX);
   1383	if (len == MTD_PARAM_LEN_MAX) {
   1384		pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
   1385		       val, MTD_PARAM_LEN_MAX);
   1386		return -EINVAL;
   1387	}
   1388
   1389	if (len == 0) {
   1390		pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
   1391		return 0;
   1392	}
   1393
   1394	strcpy(buf, val);
   1395
   1396	/* Get rid of the final newline */
   1397	if (buf[len - 1] == '\n')
   1398		buf[len - 1] = '\0';
   1399
   1400	for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
   1401		tokens[i] = strsep(&pbuf, ",");
   1402
   1403	if (pbuf) {
   1404		pr_err("UBI error: too many arguments at \"%s\"\n", val);
   1405		return -EINVAL;
   1406	}
   1407
   1408	p = &mtd_dev_param[mtd_devs];
   1409	strcpy(&p->name[0], tokens[0]);
   1410
   1411	token = tokens[1];
   1412	if (token) {
   1413		p->vid_hdr_offs = bytes_str_to_int(token);
   1414
   1415		if (p->vid_hdr_offs < 0)
   1416			return p->vid_hdr_offs;
   1417	}
   1418
   1419	token = tokens[2];
   1420	if (token) {
   1421		int err = kstrtoint(token, 10, &p->max_beb_per1024);
   1422
   1423		if (err) {
   1424			pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
   1425			       token);
   1426			return -EINVAL;
   1427		}
   1428	}
   1429
   1430	token = tokens[3];
   1431	if (token) {
   1432		int err = kstrtoint(token, 10, &p->ubi_num);
   1433
   1434		if (err) {
   1435			pr_err("UBI error: bad value for ubi_num parameter: %s",
   1436			       token);
   1437			return -EINVAL;
   1438		}
   1439	} else
   1440		p->ubi_num = UBI_DEV_NUM_AUTO;
   1441
   1442	mtd_devs += 1;
   1443	return 0;
   1444}
   1445
   1446module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 0400);
   1447MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
   1448		      "Multiple \"mtd\" parameters may be specified.\n"
   1449		      "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
   1450		      "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
   1451		      "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
   1452		      __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
   1453		      "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
   1454		      "\n"
   1455		      "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
   1456		      "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
   1457		      "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
   1458		      "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
   1459		      "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
   1460#ifdef CONFIG_MTD_UBI_FASTMAP
   1461module_param(fm_autoconvert, bool, 0644);
   1462MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
   1463module_param(fm_debug, bool, 0);
   1464MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
   1465#endif
   1466MODULE_VERSION(__stringify(UBI_VERSION));
   1467MODULE_DESCRIPTION("UBI - Unsorted Block Images");
   1468MODULE_AUTHOR("Artem Bityutskiy");
   1469MODULE_LICENSE("GPL");