cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

super.c (68647B)


      1/*
      2 * super.c
      3 *
      4 * PURPOSE
      5 *  Super block routines for the OSTA-UDF(tm) filesystem.
      6 *
      7 * DESCRIPTION
      8 *  OSTA-UDF(tm) = Optical Storage Technology Association
      9 *  Universal Disk Format.
     10 *
     11 *  This code is based on version 2.00 of the UDF specification,
     12 *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
     13 *    http://www.osta.org/
     14 *    https://www.ecma.ch/
     15 *    https://www.iso.org/
     16 *
     17 * COPYRIGHT
     18 *  This file is distributed under the terms of the GNU General Public
     19 *  License (GPL). Copies of the GPL can be obtained from:
     20 *    ftp://prep.ai.mit.edu/pub/gnu/GPL
     21 *  Each contributing author retains all rights to their own work.
     22 *
     23 *  (C) 1998 Dave Boynton
     24 *  (C) 1998-2004 Ben Fennema
     25 *  (C) 2000 Stelias Computing Inc
     26 *
     27 * HISTORY
     28 *
     29 *  09/24/98 dgb  changed to allow compiling outside of kernel, and
     30 *                added some debugging.
     31 *  10/01/98 dgb  updated to allow (some) possibility of compiling w/2.0.34
     32 *  10/16/98      attempting some multi-session support
     33 *  10/17/98      added freespace count for "df"
     34 *  11/11/98 gr   added novrs option
     35 *  11/26/98 dgb  added fileset,anchor mount options
     36 *  12/06/98 blf  really hosed things royally. vat/sparing support. sequenced
     37 *                vol descs. rewrote option handling based on isofs
     38 *  12/20/98      find the free space bitmap (if it exists)
     39 */
     40
     41#include "udfdecl.h"
     42
     43#include <linux/blkdev.h>
     44#include <linux/slab.h>
     45#include <linux/kernel.h>
     46#include <linux/module.h>
     47#include <linux/parser.h>
     48#include <linux/stat.h>
     49#include <linux/cdrom.h>
     50#include <linux/nls.h>
     51#include <linux/vfs.h>
     52#include <linux/vmalloc.h>
     53#include <linux/errno.h>
     54#include <linux/mount.h>
     55#include <linux/seq_file.h>
     56#include <linux/bitmap.h>
     57#include <linux/crc-itu-t.h>
     58#include <linux/log2.h>
     59#include <asm/byteorder.h>
     60#include <linux/iversion.h>
     61
     62#include "udf_sb.h"
     63#include "udf_i.h"
     64
     65#include <linux/init.h>
     66#include <linux/uaccess.h>
     67
     68enum {
     69	VDS_POS_PRIMARY_VOL_DESC,
     70	VDS_POS_UNALLOC_SPACE_DESC,
     71	VDS_POS_LOGICAL_VOL_DESC,
     72	VDS_POS_IMP_USE_VOL_DESC,
     73	VDS_POS_LENGTH
     74};
     75
     76#define VSD_FIRST_SECTOR_OFFSET		32768
     77#define VSD_MAX_SECTOR_OFFSET		0x800000
     78
     79/*
     80 * Maximum number of Terminating Descriptor / Logical Volume Integrity
     81 * Descriptor redirections. The chosen numbers are arbitrary - just that we
     82 * hopefully don't limit any real use of rewritten inode on write-once media
     83 * but avoid looping for too long on corrupted media.
     84 */
     85#define UDF_MAX_TD_NESTING 64
     86#define UDF_MAX_LVID_NESTING 1000
     87
     88enum { UDF_MAX_LINKS = 0xffff };
     89
     90/* These are the "meat" - everything else is stuffing */
     91static int udf_fill_super(struct super_block *, void *, int);
     92static void udf_put_super(struct super_block *);
     93static int udf_sync_fs(struct super_block *, int);
     94static int udf_remount_fs(struct super_block *, int *, char *);
     95static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
     96static void udf_open_lvid(struct super_block *);
     97static void udf_close_lvid(struct super_block *);
     98static unsigned int udf_count_free(struct super_block *);
     99static int udf_statfs(struct dentry *, struct kstatfs *);
    100static int udf_show_options(struct seq_file *, struct dentry *);
    101
    102struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
    103{
    104	struct logicalVolIntegrityDesc *lvid;
    105	unsigned int partnum;
    106	unsigned int offset;
    107
    108	if (!UDF_SB(sb)->s_lvid_bh)
    109		return NULL;
    110	lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
    111	partnum = le32_to_cpu(lvid->numOfPartitions);
    112	/* The offset is to skip freeSpaceTable and sizeTable arrays */
    113	offset = partnum * 2 * sizeof(uint32_t);
    114	return (struct logicalVolIntegrityDescImpUse *)
    115					(((uint8_t *)(lvid + 1)) + offset);
    116}
    117
    118/* UDF filesystem type */
    119static struct dentry *udf_mount(struct file_system_type *fs_type,
    120		      int flags, const char *dev_name, void *data)
    121{
    122	return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
    123}
    124
    125static struct file_system_type udf_fstype = {
    126	.owner		= THIS_MODULE,
    127	.name		= "udf",
    128	.mount		= udf_mount,
    129	.kill_sb	= kill_block_super,
    130	.fs_flags	= FS_REQUIRES_DEV,
    131};
    132MODULE_ALIAS_FS("udf");
    133
    134static struct kmem_cache *udf_inode_cachep;
    135
    136static struct inode *udf_alloc_inode(struct super_block *sb)
    137{
    138	struct udf_inode_info *ei;
    139	ei = alloc_inode_sb(sb, udf_inode_cachep, GFP_KERNEL);
    140	if (!ei)
    141		return NULL;
    142
    143	ei->i_unique = 0;
    144	ei->i_lenExtents = 0;
    145	ei->i_lenStreams = 0;
    146	ei->i_next_alloc_block = 0;
    147	ei->i_next_alloc_goal = 0;
    148	ei->i_strat4096 = 0;
    149	ei->i_streamdir = 0;
    150	init_rwsem(&ei->i_data_sem);
    151	ei->cached_extent.lstart = -1;
    152	spin_lock_init(&ei->i_extent_cache_lock);
    153	inode_set_iversion(&ei->vfs_inode, 1);
    154
    155	return &ei->vfs_inode;
    156}
    157
    158static void udf_free_in_core_inode(struct inode *inode)
    159{
    160	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
    161}
    162
    163static void init_once(void *foo)
    164{
    165	struct udf_inode_info *ei = (struct udf_inode_info *)foo;
    166
    167	ei->i_data = NULL;
    168	inode_init_once(&ei->vfs_inode);
    169}
    170
    171static int __init init_inodecache(void)
    172{
    173	udf_inode_cachep = kmem_cache_create("udf_inode_cache",
    174					     sizeof(struct udf_inode_info),
    175					     0, (SLAB_RECLAIM_ACCOUNT |
    176						 SLAB_MEM_SPREAD |
    177						 SLAB_ACCOUNT),
    178					     init_once);
    179	if (!udf_inode_cachep)
    180		return -ENOMEM;
    181	return 0;
    182}
    183
    184static void destroy_inodecache(void)
    185{
    186	/*
    187	 * Make sure all delayed rcu free inodes are flushed before we
    188	 * destroy cache.
    189	 */
    190	rcu_barrier();
    191	kmem_cache_destroy(udf_inode_cachep);
    192}
    193
    194/* Superblock operations */
    195static const struct super_operations udf_sb_ops = {
    196	.alloc_inode	= udf_alloc_inode,
    197	.free_inode	= udf_free_in_core_inode,
    198	.write_inode	= udf_write_inode,
    199	.evict_inode	= udf_evict_inode,
    200	.put_super	= udf_put_super,
    201	.sync_fs	= udf_sync_fs,
    202	.statfs		= udf_statfs,
    203	.remount_fs	= udf_remount_fs,
    204	.show_options	= udf_show_options,
    205};
    206
    207struct udf_options {
    208	unsigned char novrs;
    209	unsigned int blocksize;
    210	unsigned int session;
    211	unsigned int lastblock;
    212	unsigned int anchor;
    213	unsigned int flags;
    214	umode_t umask;
    215	kgid_t gid;
    216	kuid_t uid;
    217	umode_t fmode;
    218	umode_t dmode;
    219	struct nls_table *nls_map;
    220};
    221
    222static int __init init_udf_fs(void)
    223{
    224	int err;
    225
    226	err = init_inodecache();
    227	if (err)
    228		goto out1;
    229	err = register_filesystem(&udf_fstype);
    230	if (err)
    231		goto out;
    232
    233	return 0;
    234
    235out:
    236	destroy_inodecache();
    237
    238out1:
    239	return err;
    240}
    241
    242static void __exit exit_udf_fs(void)
    243{
    244	unregister_filesystem(&udf_fstype);
    245	destroy_inodecache();
    246}
    247
    248static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
    249{
    250	struct udf_sb_info *sbi = UDF_SB(sb);
    251
    252	sbi->s_partmaps = kcalloc(count, sizeof(*sbi->s_partmaps), GFP_KERNEL);
    253	if (!sbi->s_partmaps) {
    254		sbi->s_partitions = 0;
    255		return -ENOMEM;
    256	}
    257
    258	sbi->s_partitions = count;
    259	return 0;
    260}
    261
    262static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
    263{
    264	int i;
    265	int nr_groups = bitmap->s_nr_groups;
    266
    267	for (i = 0; i < nr_groups; i++)
    268		brelse(bitmap->s_block_bitmap[i]);
    269
    270	kvfree(bitmap);
    271}
    272
    273static void udf_free_partition(struct udf_part_map *map)
    274{
    275	int i;
    276	struct udf_meta_data *mdata;
    277
    278	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
    279		iput(map->s_uspace.s_table);
    280	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
    281		udf_sb_free_bitmap(map->s_uspace.s_bitmap);
    282	if (map->s_partition_type == UDF_SPARABLE_MAP15)
    283		for (i = 0; i < 4; i++)
    284			brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
    285	else if (map->s_partition_type == UDF_METADATA_MAP25) {
    286		mdata = &map->s_type_specific.s_metadata;
    287		iput(mdata->s_metadata_fe);
    288		mdata->s_metadata_fe = NULL;
    289
    290		iput(mdata->s_mirror_fe);
    291		mdata->s_mirror_fe = NULL;
    292
    293		iput(mdata->s_bitmap_fe);
    294		mdata->s_bitmap_fe = NULL;
    295	}
    296}
    297
    298static void udf_sb_free_partitions(struct super_block *sb)
    299{
    300	struct udf_sb_info *sbi = UDF_SB(sb);
    301	int i;
    302
    303	if (!sbi->s_partmaps)
    304		return;
    305	for (i = 0; i < sbi->s_partitions; i++)
    306		udf_free_partition(&sbi->s_partmaps[i]);
    307	kfree(sbi->s_partmaps);
    308	sbi->s_partmaps = NULL;
    309}
    310
    311static int udf_show_options(struct seq_file *seq, struct dentry *root)
    312{
    313	struct super_block *sb = root->d_sb;
    314	struct udf_sb_info *sbi = UDF_SB(sb);
    315
    316	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
    317		seq_puts(seq, ",nostrict");
    318	if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
    319		seq_printf(seq, ",bs=%lu", sb->s_blocksize);
    320	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
    321		seq_puts(seq, ",unhide");
    322	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
    323		seq_puts(seq, ",undelete");
    324	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
    325		seq_puts(seq, ",noadinicb");
    326	if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
    327		seq_puts(seq, ",shortad");
    328	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
    329		seq_puts(seq, ",uid=forget");
    330	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
    331		seq_puts(seq, ",gid=forget");
    332	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
    333		seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
    334	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
    335		seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
    336	if (sbi->s_umask != 0)
    337		seq_printf(seq, ",umask=%ho", sbi->s_umask);
    338	if (sbi->s_fmode != UDF_INVALID_MODE)
    339		seq_printf(seq, ",mode=%ho", sbi->s_fmode);
    340	if (sbi->s_dmode != UDF_INVALID_MODE)
    341		seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
    342	if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
    343		seq_printf(seq, ",session=%d", sbi->s_session);
    344	if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
    345		seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
    346	if (sbi->s_anchor != 0)
    347		seq_printf(seq, ",anchor=%u", sbi->s_anchor);
    348	if (sbi->s_nls_map)
    349		seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
    350	else
    351		seq_puts(seq, ",iocharset=utf8");
    352
    353	return 0;
    354}
    355
    356/*
    357 * udf_parse_options
    358 *
    359 * PURPOSE
    360 *	Parse mount options.
    361 *
    362 * DESCRIPTION
    363 *	The following mount options are supported:
    364 *
    365 *	gid=		Set the default group.
    366 *	umask=		Set the default umask.
    367 *	mode=		Set the default file permissions.
    368 *	dmode=		Set the default directory permissions.
    369 *	uid=		Set the default user.
    370 *	bs=		Set the block size.
    371 *	unhide		Show otherwise hidden files.
    372 *	undelete	Show deleted files in lists.
    373 *	adinicb		Embed data in the inode (default)
    374 *	noadinicb	Don't embed data in the inode
    375 *	shortad		Use short ad's
    376 *	longad		Use long ad's (default)
    377 *	nostrict	Unset strict conformance
    378 *	iocharset=	Set the NLS character set
    379 *
    380 *	The remaining are for debugging and disaster recovery:
    381 *
    382 *	novrs		Skip volume sequence recognition
    383 *
    384 *	The following expect a offset from 0.
    385 *
    386 *	session=	Set the CDROM session (default= last session)
    387 *	anchor=		Override standard anchor location. (default= 256)
    388 *	volume=		Override the VolumeDesc location. (unused)
    389 *	partition=	Override the PartitionDesc location. (unused)
    390 *	lastblock=	Set the last block of the filesystem/
    391 *
    392 *	The following expect a offset from the partition root.
    393 *
    394 *	fileset=	Override the fileset block location. (unused)
    395 *	rootdir=	Override the root directory location. (unused)
    396 *		WARNING: overriding the rootdir to a non-directory may
    397 *		yield highly unpredictable results.
    398 *
    399 * PRE-CONDITIONS
    400 *	options		Pointer to mount options string.
    401 *	uopts		Pointer to mount options variable.
    402 *
    403 * POST-CONDITIONS
    404 *	<return>	1	Mount options parsed okay.
    405 *	<return>	0	Error parsing mount options.
    406 *
    407 * HISTORY
    408 *	July 1, 1997 - Andrew E. Mileski
    409 *	Written, tested, and released.
    410 */
    411
    412enum {
    413	Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
    414	Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
    415	Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
    416	Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
    417	Opt_rootdir, Opt_utf8, Opt_iocharset,
    418	Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
    419	Opt_fmode, Opt_dmode
    420};
    421
    422static const match_table_t tokens = {
    423	{Opt_novrs,	"novrs"},
    424	{Opt_nostrict,	"nostrict"},
    425	{Opt_bs,	"bs=%u"},
    426	{Opt_unhide,	"unhide"},
    427	{Opt_undelete,	"undelete"},
    428	{Opt_noadinicb,	"noadinicb"},
    429	{Opt_adinicb,	"adinicb"},
    430	{Opt_shortad,	"shortad"},
    431	{Opt_longad,	"longad"},
    432	{Opt_uforget,	"uid=forget"},
    433	{Opt_uignore,	"uid=ignore"},
    434	{Opt_gforget,	"gid=forget"},
    435	{Opt_gignore,	"gid=ignore"},
    436	{Opt_gid,	"gid=%u"},
    437	{Opt_uid,	"uid=%u"},
    438	{Opt_umask,	"umask=%o"},
    439	{Opt_session,	"session=%u"},
    440	{Opt_lastblock,	"lastblock=%u"},
    441	{Opt_anchor,	"anchor=%u"},
    442	{Opt_volume,	"volume=%u"},
    443	{Opt_partition,	"partition=%u"},
    444	{Opt_fileset,	"fileset=%u"},
    445	{Opt_rootdir,	"rootdir=%u"},
    446	{Opt_utf8,	"utf8"},
    447	{Opt_iocharset,	"iocharset=%s"},
    448	{Opt_fmode,     "mode=%o"},
    449	{Opt_dmode,     "dmode=%o"},
    450	{Opt_err,	NULL}
    451};
    452
    453static int udf_parse_options(char *options, struct udf_options *uopt,
    454			     bool remount)
    455{
    456	char *p;
    457	int option;
    458	unsigned int uv;
    459
    460	uopt->novrs = 0;
    461	uopt->session = 0xFFFFFFFF;
    462	uopt->lastblock = 0;
    463	uopt->anchor = 0;
    464
    465	if (!options)
    466		return 1;
    467
    468	while ((p = strsep(&options, ",")) != NULL) {
    469		substring_t args[MAX_OPT_ARGS];
    470		int token;
    471		unsigned n;
    472		if (!*p)
    473			continue;
    474
    475		token = match_token(p, tokens, args);
    476		switch (token) {
    477		case Opt_novrs:
    478			uopt->novrs = 1;
    479			break;
    480		case Opt_bs:
    481			if (match_int(&args[0], &option))
    482				return 0;
    483			n = option;
    484			if (n != 512 && n != 1024 && n != 2048 && n != 4096)
    485				return 0;
    486			uopt->blocksize = n;
    487			uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
    488			break;
    489		case Opt_unhide:
    490			uopt->flags |= (1 << UDF_FLAG_UNHIDE);
    491			break;
    492		case Opt_undelete:
    493			uopt->flags |= (1 << UDF_FLAG_UNDELETE);
    494			break;
    495		case Opt_noadinicb:
    496			uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
    497			break;
    498		case Opt_adinicb:
    499			uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
    500			break;
    501		case Opt_shortad:
    502			uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
    503			break;
    504		case Opt_longad:
    505			uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
    506			break;
    507		case Opt_gid:
    508			if (match_uint(args, &uv))
    509				return 0;
    510			uopt->gid = make_kgid(current_user_ns(), uv);
    511			if (!gid_valid(uopt->gid))
    512				return 0;
    513			uopt->flags |= (1 << UDF_FLAG_GID_SET);
    514			break;
    515		case Opt_uid:
    516			if (match_uint(args, &uv))
    517				return 0;
    518			uopt->uid = make_kuid(current_user_ns(), uv);
    519			if (!uid_valid(uopt->uid))
    520				return 0;
    521			uopt->flags |= (1 << UDF_FLAG_UID_SET);
    522			break;
    523		case Opt_umask:
    524			if (match_octal(args, &option))
    525				return 0;
    526			uopt->umask = option;
    527			break;
    528		case Opt_nostrict:
    529			uopt->flags &= ~(1 << UDF_FLAG_STRICT);
    530			break;
    531		case Opt_session:
    532			if (match_int(args, &option))
    533				return 0;
    534			uopt->session = option;
    535			if (!remount)
    536				uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
    537			break;
    538		case Opt_lastblock:
    539			if (match_int(args, &option))
    540				return 0;
    541			uopt->lastblock = option;
    542			if (!remount)
    543				uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
    544			break;
    545		case Opt_anchor:
    546			if (match_int(args, &option))
    547				return 0;
    548			uopt->anchor = option;
    549			break;
    550		case Opt_volume:
    551		case Opt_partition:
    552		case Opt_fileset:
    553		case Opt_rootdir:
    554			/* Ignored (never implemented properly) */
    555			break;
    556		case Opt_utf8:
    557			if (!remount) {
    558				unload_nls(uopt->nls_map);
    559				uopt->nls_map = NULL;
    560			}
    561			break;
    562		case Opt_iocharset:
    563			if (!remount) {
    564				unload_nls(uopt->nls_map);
    565				uopt->nls_map = NULL;
    566			}
    567			/* When nls_map is not loaded then UTF-8 is used */
    568			if (!remount && strcmp(args[0].from, "utf8") != 0) {
    569				uopt->nls_map = load_nls(args[0].from);
    570				if (!uopt->nls_map) {
    571					pr_err("iocharset %s not found\n",
    572						args[0].from);
    573					return 0;
    574				}
    575			}
    576			break;
    577		case Opt_uforget:
    578			uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
    579			break;
    580		case Opt_uignore:
    581		case Opt_gignore:
    582			/* These options are superseeded by uid=<number> */
    583			break;
    584		case Opt_gforget:
    585			uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
    586			break;
    587		case Opt_fmode:
    588			if (match_octal(args, &option))
    589				return 0;
    590			uopt->fmode = option & 0777;
    591			break;
    592		case Opt_dmode:
    593			if (match_octal(args, &option))
    594				return 0;
    595			uopt->dmode = option & 0777;
    596			break;
    597		default:
    598			pr_err("bad mount option \"%s\" or missing value\n", p);
    599			return 0;
    600		}
    601	}
    602	return 1;
    603}
    604
    605static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
    606{
    607	struct udf_options uopt;
    608	struct udf_sb_info *sbi = UDF_SB(sb);
    609	int error = 0;
    610
    611	if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
    612		return -EACCES;
    613
    614	sync_filesystem(sb);
    615
    616	uopt.flags = sbi->s_flags;
    617	uopt.uid   = sbi->s_uid;
    618	uopt.gid   = sbi->s_gid;
    619	uopt.umask = sbi->s_umask;
    620	uopt.fmode = sbi->s_fmode;
    621	uopt.dmode = sbi->s_dmode;
    622	uopt.nls_map = NULL;
    623
    624	if (!udf_parse_options(options, &uopt, true))
    625		return -EINVAL;
    626
    627	write_lock(&sbi->s_cred_lock);
    628	sbi->s_flags = uopt.flags;
    629	sbi->s_uid   = uopt.uid;
    630	sbi->s_gid   = uopt.gid;
    631	sbi->s_umask = uopt.umask;
    632	sbi->s_fmode = uopt.fmode;
    633	sbi->s_dmode = uopt.dmode;
    634	write_unlock(&sbi->s_cred_lock);
    635
    636	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
    637		goto out_unlock;
    638
    639	if (*flags & SB_RDONLY)
    640		udf_close_lvid(sb);
    641	else
    642		udf_open_lvid(sb);
    643
    644out_unlock:
    645	return error;
    646}
    647
    648/*
    649 * Check VSD descriptor. Returns -1 in case we are at the end of volume
    650 * recognition area, 0 if the descriptor is valid but non-interesting, 1 if
    651 * we found one of NSR descriptors we are looking for.
    652 */
    653static int identify_vsd(const struct volStructDesc *vsd)
    654{
    655	int ret = 0;
    656
    657	if (!memcmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
    658		switch (vsd->structType) {
    659		case 0:
    660			udf_debug("ISO9660 Boot Record found\n");
    661			break;
    662		case 1:
    663			udf_debug("ISO9660 Primary Volume Descriptor found\n");
    664			break;
    665		case 2:
    666			udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
    667			break;
    668		case 3:
    669			udf_debug("ISO9660 Volume Partition Descriptor found\n");
    670			break;
    671		case 255:
    672			udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
    673			break;
    674		default:
    675			udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
    676			break;
    677		}
    678	} else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
    679		; /* ret = 0 */
    680	else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
    681		ret = 1;
    682	else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
    683		ret = 1;
    684	else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BOOT2, VSD_STD_ID_LEN))
    685		; /* ret = 0 */
    686	else if (!memcmp(vsd->stdIdent, VSD_STD_ID_CDW02, VSD_STD_ID_LEN))
    687		; /* ret = 0 */
    688	else {
    689		/* TEA01 or invalid id : end of volume recognition area */
    690		ret = -1;
    691	}
    692
    693	return ret;
    694}
    695
    696/*
    697 * Check Volume Structure Descriptors (ECMA 167 2/9.1)
    698 * We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1)
    699 * @return   1 if NSR02 or NSR03 found,
    700 *	    -1 if first sector read error, 0 otherwise
    701 */
    702static int udf_check_vsd(struct super_block *sb)
    703{
    704	struct volStructDesc *vsd = NULL;
    705	loff_t sector = VSD_FIRST_SECTOR_OFFSET;
    706	int sectorsize;
    707	struct buffer_head *bh = NULL;
    708	int nsr = 0;
    709	struct udf_sb_info *sbi;
    710	loff_t session_offset;
    711
    712	sbi = UDF_SB(sb);
    713	if (sb->s_blocksize < sizeof(struct volStructDesc))
    714		sectorsize = sizeof(struct volStructDesc);
    715	else
    716		sectorsize = sb->s_blocksize;
    717
    718	session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
    719	sector += session_offset;
    720
    721	udf_debug("Starting at sector %u (%lu byte sectors)\n",
    722		  (unsigned int)(sector >> sb->s_blocksize_bits),
    723		  sb->s_blocksize);
    724	/* Process the sequence (if applicable). The hard limit on the sector
    725	 * offset is arbitrary, hopefully large enough so that all valid UDF
    726	 * filesystems will be recognised. There is no mention of an upper
    727	 * bound to the size of the volume recognition area in the standard.
    728	 *  The limit will prevent the code to read all the sectors of a
    729	 * specially crafted image (like a bluray disc full of CD001 sectors),
    730	 * potentially causing minutes or even hours of uninterruptible I/O
    731	 * activity. This actually happened with uninitialised SSD partitions
    732	 * (all 0xFF) before the check for the limit and all valid IDs were
    733	 * added */
    734	for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) {
    735		/* Read a block */
    736		bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
    737		if (!bh)
    738			break;
    739
    740		vsd = (struct volStructDesc *)(bh->b_data +
    741					      (sector & (sb->s_blocksize - 1)));
    742		nsr = identify_vsd(vsd);
    743		/* Found NSR or end? */
    744		if (nsr) {
    745			brelse(bh);
    746			break;
    747		}
    748		/*
    749		 * Special handling for improperly formatted VRS (e.g., Win10)
    750		 * where components are separated by 2048 bytes even though
    751		 * sectors are 4K
    752		 */
    753		if (sb->s_blocksize == 4096) {
    754			nsr = identify_vsd(vsd + 1);
    755			/* Ignore unknown IDs... */
    756			if (nsr < 0)
    757				nsr = 0;
    758		}
    759		brelse(bh);
    760	}
    761
    762	if (nsr > 0)
    763		return 1;
    764	else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
    765		return -1;
    766	else
    767		return 0;
    768}
    769
    770static int udf_verify_domain_identifier(struct super_block *sb,
    771					struct regid *ident, char *dname)
    772{
    773	struct domainIdentSuffix *suffix;
    774
    775	if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) {
    776		udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname);
    777		goto force_ro;
    778	}
    779	if (ident->flags & ENTITYID_FLAGS_DIRTY) {
    780		udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n",
    781			 dname);
    782		goto force_ro;
    783	}
    784	suffix = (struct domainIdentSuffix *)ident->identSuffix;
    785	if ((suffix->domainFlags & DOMAIN_FLAGS_HARD_WRITE_PROTECT) ||
    786	    (suffix->domainFlags & DOMAIN_FLAGS_SOFT_WRITE_PROTECT)) {
    787		if (!sb_rdonly(sb)) {
    788			udf_warn(sb, "Descriptor for %s marked write protected."
    789				 " Forcing read only mount.\n", dname);
    790		}
    791		goto force_ro;
    792	}
    793	return 0;
    794
    795force_ro:
    796	if (!sb_rdonly(sb))
    797		return -EACCES;
    798	UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
    799	return 0;
    800}
    801
    802static int udf_load_fileset(struct super_block *sb, struct fileSetDesc *fset,
    803			    struct kernel_lb_addr *root)
    804{
    805	int ret;
    806
    807	ret = udf_verify_domain_identifier(sb, &fset->domainIdent, "file set");
    808	if (ret < 0)
    809		return ret;
    810
    811	*root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
    812	UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
    813
    814	udf_debug("Rootdir at block=%u, partition=%u\n",
    815		  root->logicalBlockNum, root->partitionReferenceNum);
    816	return 0;
    817}
    818
    819static int udf_find_fileset(struct super_block *sb,
    820			    struct kernel_lb_addr *fileset,
    821			    struct kernel_lb_addr *root)
    822{
    823	struct buffer_head *bh = NULL;
    824	uint16_t ident;
    825	int ret;
    826
    827	if (fileset->logicalBlockNum == 0xFFFFFFFF &&
    828	    fileset->partitionReferenceNum == 0xFFFF)
    829		return -EINVAL;
    830
    831	bh = udf_read_ptagged(sb, fileset, 0, &ident);
    832	if (!bh)
    833		return -EIO;
    834	if (ident != TAG_IDENT_FSD) {
    835		brelse(bh);
    836		return -EINVAL;
    837	}
    838
    839	udf_debug("Fileset at block=%u, partition=%u\n",
    840		  fileset->logicalBlockNum, fileset->partitionReferenceNum);
    841
    842	UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
    843	ret = udf_load_fileset(sb, (struct fileSetDesc *)bh->b_data, root);
    844	brelse(bh);
    845	return ret;
    846}
    847
    848/*
    849 * Load primary Volume Descriptor Sequence
    850 *
    851 * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
    852 * should be tried.
    853 */
    854static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
    855{
    856	struct primaryVolDesc *pvoldesc;
    857	uint8_t *outstr;
    858	struct buffer_head *bh;
    859	uint16_t ident;
    860	int ret;
    861	struct timestamp *ts;
    862
    863	outstr = kmalloc(128, GFP_NOFS);
    864	if (!outstr)
    865		return -ENOMEM;
    866
    867	bh = udf_read_tagged(sb, block, block, &ident);
    868	if (!bh) {
    869		ret = -EAGAIN;
    870		goto out2;
    871	}
    872
    873	if (ident != TAG_IDENT_PVD) {
    874		ret = -EIO;
    875		goto out_bh;
    876	}
    877
    878	pvoldesc = (struct primaryVolDesc *)bh->b_data;
    879
    880	udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
    881			      pvoldesc->recordingDateAndTime);
    882	ts = &pvoldesc->recordingDateAndTime;
    883	udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
    884		  le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
    885		  ts->minute, le16_to_cpu(ts->typeAndTimezone));
    886
    887	ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
    888	if (ret < 0) {
    889		strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
    890		pr_warn("incorrect volume identification, setting to "
    891			"'InvalidName'\n");
    892	} else {
    893		strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
    894	}
    895	udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
    896
    897	ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
    898	if (ret < 0) {
    899		ret = 0;
    900		goto out_bh;
    901	}
    902	outstr[ret] = 0;
    903	udf_debug("volSetIdent[] = '%s'\n", outstr);
    904
    905	ret = 0;
    906out_bh:
    907	brelse(bh);
    908out2:
    909	kfree(outstr);
    910	return ret;
    911}
    912
    913struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
    914					u32 meta_file_loc, u32 partition_ref)
    915{
    916	struct kernel_lb_addr addr;
    917	struct inode *metadata_fe;
    918
    919	addr.logicalBlockNum = meta_file_loc;
    920	addr.partitionReferenceNum = partition_ref;
    921
    922	metadata_fe = udf_iget_special(sb, &addr);
    923
    924	if (IS_ERR(metadata_fe)) {
    925		udf_warn(sb, "metadata inode efe not found\n");
    926		return metadata_fe;
    927	}
    928	if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
    929		udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
    930		iput(metadata_fe);
    931		return ERR_PTR(-EIO);
    932	}
    933
    934	return metadata_fe;
    935}
    936
    937static int udf_load_metadata_files(struct super_block *sb, int partition,
    938				   int type1_index)
    939{
    940	struct udf_sb_info *sbi = UDF_SB(sb);
    941	struct udf_part_map *map;
    942	struct udf_meta_data *mdata;
    943	struct kernel_lb_addr addr;
    944	struct inode *fe;
    945
    946	map = &sbi->s_partmaps[partition];
    947	mdata = &map->s_type_specific.s_metadata;
    948	mdata->s_phys_partition_ref = type1_index;
    949
    950	/* metadata address */
    951	udf_debug("Metadata file location: block = %u part = %u\n",
    952		  mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
    953
    954	fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
    955					 mdata->s_phys_partition_ref);
    956	if (IS_ERR(fe)) {
    957		/* mirror file entry */
    958		udf_debug("Mirror metadata file location: block = %u part = %u\n",
    959			  mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
    960
    961		fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
    962						 mdata->s_phys_partition_ref);
    963
    964		if (IS_ERR(fe)) {
    965			udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
    966			return PTR_ERR(fe);
    967		}
    968		mdata->s_mirror_fe = fe;
    969	} else
    970		mdata->s_metadata_fe = fe;
    971
    972
    973	/*
    974	 * bitmap file entry
    975	 * Note:
    976	 * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
    977	*/
    978	if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
    979		addr.logicalBlockNum = mdata->s_bitmap_file_loc;
    980		addr.partitionReferenceNum = mdata->s_phys_partition_ref;
    981
    982		udf_debug("Bitmap file location: block = %u part = %u\n",
    983			  addr.logicalBlockNum, addr.partitionReferenceNum);
    984
    985		fe = udf_iget_special(sb, &addr);
    986		if (IS_ERR(fe)) {
    987			if (sb_rdonly(sb))
    988				udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
    989			else {
    990				udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
    991				return PTR_ERR(fe);
    992			}
    993		} else
    994			mdata->s_bitmap_fe = fe;
    995	}
    996
    997	udf_debug("udf_load_metadata_files Ok\n");
    998	return 0;
    999}
   1000
   1001int udf_compute_nr_groups(struct super_block *sb, u32 partition)
   1002{
   1003	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
   1004	return DIV_ROUND_UP(map->s_partition_len +
   1005			    (sizeof(struct spaceBitmapDesc) << 3),
   1006			    sb->s_blocksize * 8);
   1007}
   1008
   1009static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
   1010{
   1011	struct udf_bitmap *bitmap;
   1012	int nr_groups = udf_compute_nr_groups(sb, index);
   1013
   1014	bitmap = kvzalloc(struct_size(bitmap, s_block_bitmap, nr_groups),
   1015			  GFP_KERNEL);
   1016	if (!bitmap)
   1017		return NULL;
   1018
   1019	bitmap->s_nr_groups = nr_groups;
   1020	return bitmap;
   1021}
   1022
   1023static int check_partition_desc(struct super_block *sb,
   1024				struct partitionDesc *p,
   1025				struct udf_part_map *map)
   1026{
   1027	bool umap, utable, fmap, ftable;
   1028	struct partitionHeaderDesc *phd;
   1029
   1030	switch (le32_to_cpu(p->accessType)) {
   1031	case PD_ACCESS_TYPE_READ_ONLY:
   1032	case PD_ACCESS_TYPE_WRITE_ONCE:
   1033	case PD_ACCESS_TYPE_NONE:
   1034		goto force_ro;
   1035	}
   1036
   1037	/* No Partition Header Descriptor? */
   1038	if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
   1039	    strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
   1040		goto force_ro;
   1041
   1042	phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
   1043	utable = phd->unallocSpaceTable.extLength;
   1044	umap = phd->unallocSpaceBitmap.extLength;
   1045	ftable = phd->freedSpaceTable.extLength;
   1046	fmap = phd->freedSpaceBitmap.extLength;
   1047
   1048	/* No allocation info? */
   1049	if (!utable && !umap && !ftable && !fmap)
   1050		goto force_ro;
   1051
   1052	/* We don't support blocks that require erasing before overwrite */
   1053	if (ftable || fmap)
   1054		goto force_ro;
   1055	/* UDF 2.60: 2.3.3 - no mixing of tables & bitmaps, no VAT. */
   1056	if (utable && umap)
   1057		goto force_ro;
   1058
   1059	if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
   1060	    map->s_partition_type == UDF_VIRTUAL_MAP20 ||
   1061	    map->s_partition_type == UDF_METADATA_MAP25)
   1062		goto force_ro;
   1063
   1064	return 0;
   1065force_ro:
   1066	if (!sb_rdonly(sb))
   1067		return -EACCES;
   1068	UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
   1069	return 0;
   1070}
   1071
   1072static int udf_fill_partdesc_info(struct super_block *sb,
   1073		struct partitionDesc *p, int p_index)
   1074{
   1075	struct udf_part_map *map;
   1076	struct udf_sb_info *sbi = UDF_SB(sb);
   1077	struct partitionHeaderDesc *phd;
   1078	int err;
   1079
   1080	map = &sbi->s_partmaps[p_index];
   1081
   1082	map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
   1083	map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
   1084
   1085	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
   1086		map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
   1087	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
   1088		map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
   1089	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
   1090		map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
   1091	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
   1092		map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
   1093
   1094	udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n",
   1095		  p_index, map->s_partition_type,
   1096		  map->s_partition_root, map->s_partition_len);
   1097
   1098	err = check_partition_desc(sb, p, map);
   1099	if (err)
   1100		return err;
   1101
   1102	/*
   1103	 * Skip loading allocation info it we cannot ever write to the fs.
   1104	 * This is a correctness thing as we may have decided to force ro mount
   1105	 * to avoid allocation info we don't support.
   1106	 */
   1107	if (UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
   1108		return 0;
   1109
   1110	phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
   1111	if (phd->unallocSpaceTable.extLength) {
   1112		struct kernel_lb_addr loc = {
   1113			.logicalBlockNum = le32_to_cpu(
   1114				phd->unallocSpaceTable.extPosition),
   1115			.partitionReferenceNum = p_index,
   1116		};
   1117		struct inode *inode;
   1118
   1119		inode = udf_iget_special(sb, &loc);
   1120		if (IS_ERR(inode)) {
   1121			udf_debug("cannot load unallocSpaceTable (part %d)\n",
   1122				  p_index);
   1123			return PTR_ERR(inode);
   1124		}
   1125		map->s_uspace.s_table = inode;
   1126		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
   1127		udf_debug("unallocSpaceTable (part %d) @ %lu\n",
   1128			  p_index, map->s_uspace.s_table->i_ino);
   1129	}
   1130
   1131	if (phd->unallocSpaceBitmap.extLength) {
   1132		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
   1133		if (!bitmap)
   1134			return -ENOMEM;
   1135		map->s_uspace.s_bitmap = bitmap;
   1136		bitmap->s_extPosition = le32_to_cpu(
   1137				phd->unallocSpaceBitmap.extPosition);
   1138		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
   1139		udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
   1140			  p_index, bitmap->s_extPosition);
   1141	}
   1142
   1143	return 0;
   1144}
   1145
   1146static void udf_find_vat_block(struct super_block *sb, int p_index,
   1147			       int type1_index, sector_t start_block)
   1148{
   1149	struct udf_sb_info *sbi = UDF_SB(sb);
   1150	struct udf_part_map *map = &sbi->s_partmaps[p_index];
   1151	sector_t vat_block;
   1152	struct kernel_lb_addr ino;
   1153	struct inode *inode;
   1154
   1155	/*
   1156	 * VAT file entry is in the last recorded block. Some broken disks have
   1157	 * it a few blocks before so try a bit harder...
   1158	 */
   1159	ino.partitionReferenceNum = type1_index;
   1160	for (vat_block = start_block;
   1161	     vat_block >= map->s_partition_root &&
   1162	     vat_block >= start_block - 3; vat_block--) {
   1163		ino.logicalBlockNum = vat_block - map->s_partition_root;
   1164		inode = udf_iget_special(sb, &ino);
   1165		if (!IS_ERR(inode)) {
   1166			sbi->s_vat_inode = inode;
   1167			break;
   1168		}
   1169	}
   1170}
   1171
   1172static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
   1173{
   1174	struct udf_sb_info *sbi = UDF_SB(sb);
   1175	struct udf_part_map *map = &sbi->s_partmaps[p_index];
   1176	struct buffer_head *bh = NULL;
   1177	struct udf_inode_info *vati;
   1178	uint32_t pos;
   1179	struct virtualAllocationTable20 *vat20;
   1180	sector_t blocks = sb_bdev_nr_blocks(sb);
   1181
   1182	udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
   1183	if (!sbi->s_vat_inode &&
   1184	    sbi->s_last_block != blocks - 1) {
   1185		pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
   1186			  (unsigned long)sbi->s_last_block,
   1187			  (unsigned long)blocks - 1);
   1188		udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
   1189	}
   1190	if (!sbi->s_vat_inode)
   1191		return -EIO;
   1192
   1193	if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
   1194		map->s_type_specific.s_virtual.s_start_offset = 0;
   1195		map->s_type_specific.s_virtual.s_num_entries =
   1196			(sbi->s_vat_inode->i_size - 36) >> 2;
   1197	} else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
   1198		vati = UDF_I(sbi->s_vat_inode);
   1199		if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
   1200			pos = udf_block_map(sbi->s_vat_inode, 0);
   1201			bh = sb_bread(sb, pos);
   1202			if (!bh)
   1203				return -EIO;
   1204			vat20 = (struct virtualAllocationTable20 *)bh->b_data;
   1205		} else {
   1206			vat20 = (struct virtualAllocationTable20 *)
   1207							vati->i_data;
   1208		}
   1209
   1210		map->s_type_specific.s_virtual.s_start_offset =
   1211			le16_to_cpu(vat20->lengthHeader);
   1212		map->s_type_specific.s_virtual.s_num_entries =
   1213			(sbi->s_vat_inode->i_size -
   1214				map->s_type_specific.s_virtual.
   1215					s_start_offset) >> 2;
   1216		brelse(bh);
   1217	}
   1218	return 0;
   1219}
   1220
   1221/*
   1222 * Load partition descriptor block
   1223 *
   1224 * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
   1225 * sequence.
   1226 */
   1227static int udf_load_partdesc(struct super_block *sb, sector_t block)
   1228{
   1229	struct buffer_head *bh;
   1230	struct partitionDesc *p;
   1231	struct udf_part_map *map;
   1232	struct udf_sb_info *sbi = UDF_SB(sb);
   1233	int i, type1_idx;
   1234	uint16_t partitionNumber;
   1235	uint16_t ident;
   1236	int ret;
   1237
   1238	bh = udf_read_tagged(sb, block, block, &ident);
   1239	if (!bh)
   1240		return -EAGAIN;
   1241	if (ident != TAG_IDENT_PD) {
   1242		ret = 0;
   1243		goto out_bh;
   1244	}
   1245
   1246	p = (struct partitionDesc *)bh->b_data;
   1247	partitionNumber = le16_to_cpu(p->partitionNumber);
   1248
   1249	/* First scan for TYPE1 and SPARABLE partitions */
   1250	for (i = 0; i < sbi->s_partitions; i++) {
   1251		map = &sbi->s_partmaps[i];
   1252		udf_debug("Searching map: (%u == %u)\n",
   1253			  map->s_partition_num, partitionNumber);
   1254		if (map->s_partition_num == partitionNumber &&
   1255		    (map->s_partition_type == UDF_TYPE1_MAP15 ||
   1256		     map->s_partition_type == UDF_SPARABLE_MAP15))
   1257			break;
   1258	}
   1259
   1260	if (i >= sbi->s_partitions) {
   1261		udf_debug("Partition (%u) not found in partition map\n",
   1262			  partitionNumber);
   1263		ret = 0;
   1264		goto out_bh;
   1265	}
   1266
   1267	ret = udf_fill_partdesc_info(sb, p, i);
   1268	if (ret < 0)
   1269		goto out_bh;
   1270
   1271	/*
   1272	 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
   1273	 * PHYSICAL partitions are already set up
   1274	 */
   1275	type1_idx = i;
   1276	map = NULL; /* supress 'maybe used uninitialized' warning */
   1277	for (i = 0; i < sbi->s_partitions; i++) {
   1278		map = &sbi->s_partmaps[i];
   1279
   1280		if (map->s_partition_num == partitionNumber &&
   1281		    (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
   1282		     map->s_partition_type == UDF_VIRTUAL_MAP20 ||
   1283		     map->s_partition_type == UDF_METADATA_MAP25))
   1284			break;
   1285	}
   1286
   1287	if (i >= sbi->s_partitions) {
   1288		ret = 0;
   1289		goto out_bh;
   1290	}
   1291
   1292	ret = udf_fill_partdesc_info(sb, p, i);
   1293	if (ret < 0)
   1294		goto out_bh;
   1295
   1296	if (map->s_partition_type == UDF_METADATA_MAP25) {
   1297		ret = udf_load_metadata_files(sb, i, type1_idx);
   1298		if (ret < 0) {
   1299			udf_err(sb, "error loading MetaData partition map %d\n",
   1300				i);
   1301			goto out_bh;
   1302		}
   1303	} else {
   1304		/*
   1305		 * If we have a partition with virtual map, we don't handle
   1306		 * writing to it (we overwrite blocks instead of relocating
   1307		 * them).
   1308		 */
   1309		if (!sb_rdonly(sb)) {
   1310			ret = -EACCES;
   1311			goto out_bh;
   1312		}
   1313		UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
   1314		ret = udf_load_vat(sb, i, type1_idx);
   1315		if (ret < 0)
   1316			goto out_bh;
   1317	}
   1318	ret = 0;
   1319out_bh:
   1320	/* In case loading failed, we handle cleanup in udf_fill_super */
   1321	brelse(bh);
   1322	return ret;
   1323}
   1324
   1325static int udf_load_sparable_map(struct super_block *sb,
   1326				 struct udf_part_map *map,
   1327				 struct sparablePartitionMap *spm)
   1328{
   1329	uint32_t loc;
   1330	uint16_t ident;
   1331	struct sparingTable *st;
   1332	struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
   1333	int i;
   1334	struct buffer_head *bh;
   1335
   1336	map->s_partition_type = UDF_SPARABLE_MAP15;
   1337	sdata->s_packet_len = le16_to_cpu(spm->packetLength);
   1338	if (!is_power_of_2(sdata->s_packet_len)) {
   1339		udf_err(sb, "error loading logical volume descriptor: "
   1340			"Invalid packet length %u\n",
   1341			(unsigned)sdata->s_packet_len);
   1342		return -EIO;
   1343	}
   1344	if (spm->numSparingTables > 4) {
   1345		udf_err(sb, "error loading logical volume descriptor: "
   1346			"Too many sparing tables (%d)\n",
   1347			(int)spm->numSparingTables);
   1348		return -EIO;
   1349	}
   1350	if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
   1351		udf_err(sb, "error loading logical volume descriptor: "
   1352			"Too big sparing table size (%u)\n",
   1353			le32_to_cpu(spm->sizeSparingTable));
   1354		return -EIO;
   1355	}
   1356
   1357	for (i = 0; i < spm->numSparingTables; i++) {
   1358		loc = le32_to_cpu(spm->locSparingTable[i]);
   1359		bh = udf_read_tagged(sb, loc, loc, &ident);
   1360		if (!bh)
   1361			continue;
   1362
   1363		st = (struct sparingTable *)bh->b_data;
   1364		if (ident != 0 ||
   1365		    strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
   1366			    strlen(UDF_ID_SPARING)) ||
   1367		    sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
   1368							sb->s_blocksize) {
   1369			brelse(bh);
   1370			continue;
   1371		}
   1372
   1373		sdata->s_spar_map[i] = bh;
   1374	}
   1375	map->s_partition_func = udf_get_pblock_spar15;
   1376	return 0;
   1377}
   1378
   1379static int udf_load_logicalvol(struct super_block *sb, sector_t block,
   1380			       struct kernel_lb_addr *fileset)
   1381{
   1382	struct logicalVolDesc *lvd;
   1383	int i, offset;
   1384	uint8_t type;
   1385	struct udf_sb_info *sbi = UDF_SB(sb);
   1386	struct genericPartitionMap *gpm;
   1387	uint16_t ident;
   1388	struct buffer_head *bh;
   1389	unsigned int table_len;
   1390	int ret;
   1391
   1392	bh = udf_read_tagged(sb, block, block, &ident);
   1393	if (!bh)
   1394		return -EAGAIN;
   1395	BUG_ON(ident != TAG_IDENT_LVD);
   1396	lvd = (struct logicalVolDesc *)bh->b_data;
   1397	table_len = le32_to_cpu(lvd->mapTableLength);
   1398	if (table_len > sb->s_blocksize - sizeof(*lvd)) {
   1399		udf_err(sb, "error loading logical volume descriptor: "
   1400			"Partition table too long (%u > %lu)\n", table_len,
   1401			sb->s_blocksize - sizeof(*lvd));
   1402		ret = -EIO;
   1403		goto out_bh;
   1404	}
   1405
   1406	ret = udf_verify_domain_identifier(sb, &lvd->domainIdent,
   1407					   "logical volume");
   1408	if (ret)
   1409		goto out_bh;
   1410	ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
   1411	if (ret)
   1412		goto out_bh;
   1413
   1414	for (i = 0, offset = 0;
   1415	     i < sbi->s_partitions && offset < table_len;
   1416	     i++, offset += gpm->partitionMapLength) {
   1417		struct udf_part_map *map = &sbi->s_partmaps[i];
   1418		gpm = (struct genericPartitionMap *)
   1419				&(lvd->partitionMaps[offset]);
   1420		type = gpm->partitionMapType;
   1421		if (type == 1) {
   1422			struct genericPartitionMap1 *gpm1 =
   1423				(struct genericPartitionMap1 *)gpm;
   1424			map->s_partition_type = UDF_TYPE1_MAP15;
   1425			map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
   1426			map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
   1427			map->s_partition_func = NULL;
   1428		} else if (type == 2) {
   1429			struct udfPartitionMap2 *upm2 =
   1430						(struct udfPartitionMap2 *)gpm;
   1431			if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
   1432						strlen(UDF_ID_VIRTUAL))) {
   1433				u16 suf =
   1434					le16_to_cpu(((__le16 *)upm2->partIdent.
   1435							identSuffix)[0]);
   1436				if (suf < 0x0200) {
   1437					map->s_partition_type =
   1438							UDF_VIRTUAL_MAP15;
   1439					map->s_partition_func =
   1440							udf_get_pblock_virt15;
   1441				} else {
   1442					map->s_partition_type =
   1443							UDF_VIRTUAL_MAP20;
   1444					map->s_partition_func =
   1445							udf_get_pblock_virt20;
   1446				}
   1447			} else if (!strncmp(upm2->partIdent.ident,
   1448						UDF_ID_SPARABLE,
   1449						strlen(UDF_ID_SPARABLE))) {
   1450				ret = udf_load_sparable_map(sb, map,
   1451					(struct sparablePartitionMap *)gpm);
   1452				if (ret < 0)
   1453					goto out_bh;
   1454			} else if (!strncmp(upm2->partIdent.ident,
   1455						UDF_ID_METADATA,
   1456						strlen(UDF_ID_METADATA))) {
   1457				struct udf_meta_data *mdata =
   1458					&map->s_type_specific.s_metadata;
   1459				struct metadataPartitionMap *mdm =
   1460						(struct metadataPartitionMap *)
   1461						&(lvd->partitionMaps[offset]);
   1462				udf_debug("Parsing Logical vol part %d type %u  id=%s\n",
   1463					  i, type, UDF_ID_METADATA);
   1464
   1465				map->s_partition_type = UDF_METADATA_MAP25;
   1466				map->s_partition_func = udf_get_pblock_meta25;
   1467
   1468				mdata->s_meta_file_loc   =
   1469					le32_to_cpu(mdm->metadataFileLoc);
   1470				mdata->s_mirror_file_loc =
   1471					le32_to_cpu(mdm->metadataMirrorFileLoc);
   1472				mdata->s_bitmap_file_loc =
   1473					le32_to_cpu(mdm->metadataBitmapFileLoc);
   1474				mdata->s_alloc_unit_size =
   1475					le32_to_cpu(mdm->allocUnitSize);
   1476				mdata->s_align_unit_size =
   1477					le16_to_cpu(mdm->alignUnitSize);
   1478				if (mdm->flags & 0x01)
   1479					mdata->s_flags |= MF_DUPLICATE_MD;
   1480
   1481				udf_debug("Metadata Ident suffix=0x%x\n",
   1482					  le16_to_cpu(*(__le16 *)
   1483						      mdm->partIdent.identSuffix));
   1484				udf_debug("Metadata part num=%u\n",
   1485					  le16_to_cpu(mdm->partitionNum));
   1486				udf_debug("Metadata part alloc unit size=%u\n",
   1487					  le32_to_cpu(mdm->allocUnitSize));
   1488				udf_debug("Metadata file loc=%u\n",
   1489					  le32_to_cpu(mdm->metadataFileLoc));
   1490				udf_debug("Mirror file loc=%u\n",
   1491					  le32_to_cpu(mdm->metadataMirrorFileLoc));
   1492				udf_debug("Bitmap file loc=%u\n",
   1493					  le32_to_cpu(mdm->metadataBitmapFileLoc));
   1494				udf_debug("Flags: %d %u\n",
   1495					  mdata->s_flags, mdm->flags);
   1496			} else {
   1497				udf_debug("Unknown ident: %s\n",
   1498					  upm2->partIdent.ident);
   1499				continue;
   1500			}
   1501			map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
   1502			map->s_partition_num = le16_to_cpu(upm2->partitionNum);
   1503		}
   1504		udf_debug("Partition (%d:%u) type %u on volume %u\n",
   1505			  i, map->s_partition_num, type, map->s_volumeseqnum);
   1506	}
   1507
   1508	if (fileset) {
   1509		struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
   1510
   1511		*fileset = lelb_to_cpu(la->extLocation);
   1512		udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n",
   1513			  fileset->logicalBlockNum,
   1514			  fileset->partitionReferenceNum);
   1515	}
   1516	if (lvd->integritySeqExt.extLength)
   1517		udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
   1518	ret = 0;
   1519
   1520	if (!sbi->s_lvid_bh) {
   1521		/* We can't generate unique IDs without a valid LVID */
   1522		if (sb_rdonly(sb)) {
   1523			UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
   1524		} else {
   1525			udf_warn(sb, "Damaged or missing LVID, forcing "
   1526				     "readonly mount\n");
   1527			ret = -EACCES;
   1528		}
   1529	}
   1530out_bh:
   1531	brelse(bh);
   1532	return ret;
   1533}
   1534
   1535/*
   1536 * Find the prevailing Logical Volume Integrity Descriptor.
   1537 */
   1538static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
   1539{
   1540	struct buffer_head *bh, *final_bh;
   1541	uint16_t ident;
   1542	struct udf_sb_info *sbi = UDF_SB(sb);
   1543	struct logicalVolIntegrityDesc *lvid;
   1544	int indirections = 0;
   1545	u32 parts, impuselen;
   1546
   1547	while (++indirections <= UDF_MAX_LVID_NESTING) {
   1548		final_bh = NULL;
   1549		while (loc.extLength > 0 &&
   1550			(bh = udf_read_tagged(sb, loc.extLocation,
   1551					loc.extLocation, &ident))) {
   1552			if (ident != TAG_IDENT_LVID) {
   1553				brelse(bh);
   1554				break;
   1555			}
   1556
   1557			brelse(final_bh);
   1558			final_bh = bh;
   1559
   1560			loc.extLength -= sb->s_blocksize;
   1561			loc.extLocation++;
   1562		}
   1563
   1564		if (!final_bh)
   1565			return;
   1566
   1567		brelse(sbi->s_lvid_bh);
   1568		sbi->s_lvid_bh = final_bh;
   1569
   1570		lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
   1571		if (lvid->nextIntegrityExt.extLength == 0)
   1572			goto check;
   1573
   1574		loc = leea_to_cpu(lvid->nextIntegrityExt);
   1575	}
   1576
   1577	udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
   1578		UDF_MAX_LVID_NESTING);
   1579out_err:
   1580	brelse(sbi->s_lvid_bh);
   1581	sbi->s_lvid_bh = NULL;
   1582	return;
   1583check:
   1584	parts = le32_to_cpu(lvid->numOfPartitions);
   1585	impuselen = le32_to_cpu(lvid->lengthOfImpUse);
   1586	if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
   1587	    sizeof(struct logicalVolIntegrityDesc) + impuselen +
   1588	    2 * parts * sizeof(u32) > sb->s_blocksize) {
   1589		udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
   1590			 "ignoring.\n", parts, impuselen);
   1591		goto out_err;
   1592	}
   1593}
   1594
   1595/*
   1596 * Step for reallocation of table of partition descriptor sequence numbers.
   1597 * Must be power of 2.
   1598 */
   1599#define PART_DESC_ALLOC_STEP 32
   1600
   1601struct part_desc_seq_scan_data {
   1602	struct udf_vds_record rec;
   1603	u32 partnum;
   1604};
   1605
   1606struct desc_seq_scan_data {
   1607	struct udf_vds_record vds[VDS_POS_LENGTH];
   1608	unsigned int size_part_descs;
   1609	unsigned int num_part_descs;
   1610	struct part_desc_seq_scan_data *part_descs_loc;
   1611};
   1612
   1613static struct udf_vds_record *handle_partition_descriptor(
   1614				struct buffer_head *bh,
   1615				struct desc_seq_scan_data *data)
   1616{
   1617	struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
   1618	int partnum;
   1619	int i;
   1620
   1621	partnum = le16_to_cpu(desc->partitionNumber);
   1622	for (i = 0; i < data->num_part_descs; i++)
   1623		if (partnum == data->part_descs_loc[i].partnum)
   1624			return &(data->part_descs_loc[i].rec);
   1625	if (data->num_part_descs >= data->size_part_descs) {
   1626		struct part_desc_seq_scan_data *new_loc;
   1627		unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
   1628
   1629		new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
   1630		if (!new_loc)
   1631			return ERR_PTR(-ENOMEM);
   1632		memcpy(new_loc, data->part_descs_loc,
   1633		       data->size_part_descs * sizeof(*new_loc));
   1634		kfree(data->part_descs_loc);
   1635		data->part_descs_loc = new_loc;
   1636		data->size_part_descs = new_size;
   1637	}
   1638	return &(data->part_descs_loc[data->num_part_descs++].rec);
   1639}
   1640
   1641
   1642static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident,
   1643		struct buffer_head *bh, struct desc_seq_scan_data *data)
   1644{
   1645	switch (ident) {
   1646	case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
   1647		return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]);
   1648	case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
   1649		return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]);
   1650	case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
   1651		return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]);
   1652	case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
   1653		return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]);
   1654	case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
   1655		return handle_partition_descriptor(bh, data);
   1656	}
   1657	return NULL;
   1658}
   1659
   1660/*
   1661 * Process a main/reserve volume descriptor sequence.
   1662 *   @block		First block of first extent of the sequence.
   1663 *   @lastblock		Lastblock of first extent of the sequence.
   1664 *   @fileset		There we store extent containing root fileset
   1665 *
   1666 * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
   1667 * sequence
   1668 */
   1669static noinline int udf_process_sequence(
   1670		struct super_block *sb,
   1671		sector_t block, sector_t lastblock,
   1672		struct kernel_lb_addr *fileset)
   1673{
   1674	struct buffer_head *bh = NULL;
   1675	struct udf_vds_record *curr;
   1676	struct generic_desc *gd;
   1677	struct volDescPtr *vdp;
   1678	bool done = false;
   1679	uint32_t vdsn;
   1680	uint16_t ident;
   1681	int ret;
   1682	unsigned int indirections = 0;
   1683	struct desc_seq_scan_data data;
   1684	unsigned int i;
   1685
   1686	memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
   1687	data.size_part_descs = PART_DESC_ALLOC_STEP;
   1688	data.num_part_descs = 0;
   1689	data.part_descs_loc = kcalloc(data.size_part_descs,
   1690				      sizeof(*data.part_descs_loc),
   1691				      GFP_KERNEL);
   1692	if (!data.part_descs_loc)
   1693		return -ENOMEM;
   1694
   1695	/*
   1696	 * Read the main descriptor sequence and find which descriptors
   1697	 * are in it.
   1698	 */
   1699	for (; (!done && block <= lastblock); block++) {
   1700		bh = udf_read_tagged(sb, block, block, &ident);
   1701		if (!bh)
   1702			break;
   1703
   1704		/* Process each descriptor (ISO 13346 3/8.3-8.4) */
   1705		gd = (struct generic_desc *)bh->b_data;
   1706		vdsn = le32_to_cpu(gd->volDescSeqNum);
   1707		switch (ident) {
   1708		case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
   1709			if (++indirections > UDF_MAX_TD_NESTING) {
   1710				udf_err(sb, "too many Volume Descriptor "
   1711					"Pointers (max %u supported)\n",
   1712					UDF_MAX_TD_NESTING);
   1713				brelse(bh);
   1714				ret = -EIO;
   1715				goto out;
   1716			}
   1717
   1718			vdp = (struct volDescPtr *)bh->b_data;
   1719			block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
   1720			lastblock = le32_to_cpu(
   1721				vdp->nextVolDescSeqExt.extLength) >>
   1722				sb->s_blocksize_bits;
   1723			lastblock += block - 1;
   1724			/* For loop is going to increment 'block' again */
   1725			block--;
   1726			break;
   1727		case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
   1728		case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
   1729		case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
   1730		case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
   1731		case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
   1732			curr = get_volume_descriptor_record(ident, bh, &data);
   1733			if (IS_ERR(curr)) {
   1734				brelse(bh);
   1735				ret = PTR_ERR(curr);
   1736				goto out;
   1737			}
   1738			/* Descriptor we don't care about? */
   1739			if (!curr)
   1740				break;
   1741			if (vdsn >= curr->volDescSeqNum) {
   1742				curr->volDescSeqNum = vdsn;
   1743				curr->block = block;
   1744			}
   1745			break;
   1746		case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
   1747			done = true;
   1748			break;
   1749		}
   1750		brelse(bh);
   1751	}
   1752	/*
   1753	 * Now read interesting descriptors again and process them
   1754	 * in a suitable order
   1755	 */
   1756	if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
   1757		udf_err(sb, "Primary Volume Descriptor not found!\n");
   1758		ret = -EAGAIN;
   1759		goto out;
   1760	}
   1761	ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
   1762	if (ret < 0)
   1763		goto out;
   1764
   1765	if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
   1766		ret = udf_load_logicalvol(sb,
   1767				data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
   1768				fileset);
   1769		if (ret < 0)
   1770			goto out;
   1771	}
   1772
   1773	/* Now handle prevailing Partition Descriptors */
   1774	for (i = 0; i < data.num_part_descs; i++) {
   1775		ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
   1776		if (ret < 0)
   1777			goto out;
   1778	}
   1779	ret = 0;
   1780out:
   1781	kfree(data.part_descs_loc);
   1782	return ret;
   1783}
   1784
   1785/*
   1786 * Load Volume Descriptor Sequence described by anchor in bh
   1787 *
   1788 * Returns <0 on error, 0 on success
   1789 */
   1790static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
   1791			     struct kernel_lb_addr *fileset)
   1792{
   1793	struct anchorVolDescPtr *anchor;
   1794	sector_t main_s, main_e, reserve_s, reserve_e;
   1795	int ret;
   1796
   1797	anchor = (struct anchorVolDescPtr *)bh->b_data;
   1798
   1799	/* Locate the main sequence */
   1800	main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
   1801	main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
   1802	main_e = main_e >> sb->s_blocksize_bits;
   1803	main_e += main_s - 1;
   1804
   1805	/* Locate the reserve sequence */
   1806	reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
   1807	reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
   1808	reserve_e = reserve_e >> sb->s_blocksize_bits;
   1809	reserve_e += reserve_s - 1;
   1810
   1811	/* Process the main & reserve sequences */
   1812	/* responsible for finding the PartitionDesc(s) */
   1813	ret = udf_process_sequence(sb, main_s, main_e, fileset);
   1814	if (ret != -EAGAIN)
   1815		return ret;
   1816	udf_sb_free_partitions(sb);
   1817	ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
   1818	if (ret < 0) {
   1819		udf_sb_free_partitions(sb);
   1820		/* No sequence was OK, return -EIO */
   1821		if (ret == -EAGAIN)
   1822			ret = -EIO;
   1823	}
   1824	return ret;
   1825}
   1826
   1827/*
   1828 * Check whether there is an anchor block in the given block and
   1829 * load Volume Descriptor Sequence if so.
   1830 *
   1831 * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
   1832 * block
   1833 */
   1834static int udf_check_anchor_block(struct super_block *sb, sector_t block,
   1835				  struct kernel_lb_addr *fileset)
   1836{
   1837	struct buffer_head *bh;
   1838	uint16_t ident;
   1839	int ret;
   1840
   1841	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
   1842	    udf_fixed_to_variable(block) >= sb_bdev_nr_blocks(sb))
   1843		return -EAGAIN;
   1844
   1845	bh = udf_read_tagged(sb, block, block, &ident);
   1846	if (!bh)
   1847		return -EAGAIN;
   1848	if (ident != TAG_IDENT_AVDP) {
   1849		brelse(bh);
   1850		return -EAGAIN;
   1851	}
   1852	ret = udf_load_sequence(sb, bh, fileset);
   1853	brelse(bh);
   1854	return ret;
   1855}
   1856
   1857/*
   1858 * Search for an anchor volume descriptor pointer.
   1859 *
   1860 * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
   1861 * of anchors.
   1862 */
   1863static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
   1864			    struct kernel_lb_addr *fileset)
   1865{
   1866	sector_t last[6];
   1867	int i;
   1868	struct udf_sb_info *sbi = UDF_SB(sb);
   1869	int last_count = 0;
   1870	int ret;
   1871
   1872	/* First try user provided anchor */
   1873	if (sbi->s_anchor) {
   1874		ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
   1875		if (ret != -EAGAIN)
   1876			return ret;
   1877	}
   1878	/*
   1879	 * according to spec, anchor is in either:
   1880	 *     block 256
   1881	 *     lastblock-256
   1882	 *     lastblock
   1883	 *  however, if the disc isn't closed, it could be 512.
   1884	 */
   1885	ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
   1886	if (ret != -EAGAIN)
   1887		return ret;
   1888	/*
   1889	 * The trouble is which block is the last one. Drives often misreport
   1890	 * this so we try various possibilities.
   1891	 */
   1892	last[last_count++] = *lastblock;
   1893	if (*lastblock >= 1)
   1894		last[last_count++] = *lastblock - 1;
   1895	last[last_count++] = *lastblock + 1;
   1896	if (*lastblock >= 2)
   1897		last[last_count++] = *lastblock - 2;
   1898	if (*lastblock >= 150)
   1899		last[last_count++] = *lastblock - 150;
   1900	if (*lastblock >= 152)
   1901		last[last_count++] = *lastblock - 152;
   1902
   1903	for (i = 0; i < last_count; i++) {
   1904		if (last[i] >= sb_bdev_nr_blocks(sb))
   1905			continue;
   1906		ret = udf_check_anchor_block(sb, last[i], fileset);
   1907		if (ret != -EAGAIN) {
   1908			if (!ret)
   1909				*lastblock = last[i];
   1910			return ret;
   1911		}
   1912		if (last[i] < 256)
   1913			continue;
   1914		ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
   1915		if (ret != -EAGAIN) {
   1916			if (!ret)
   1917				*lastblock = last[i];
   1918			return ret;
   1919		}
   1920	}
   1921
   1922	/* Finally try block 512 in case media is open */
   1923	return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
   1924}
   1925
   1926/*
   1927 * Find an anchor volume descriptor and load Volume Descriptor Sequence from
   1928 * area specified by it. The function expects sbi->s_lastblock to be the last
   1929 * block on the media.
   1930 *
   1931 * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
   1932 * was not found.
   1933 */
   1934static int udf_find_anchor(struct super_block *sb,
   1935			   struct kernel_lb_addr *fileset)
   1936{
   1937	struct udf_sb_info *sbi = UDF_SB(sb);
   1938	sector_t lastblock = sbi->s_last_block;
   1939	int ret;
   1940
   1941	ret = udf_scan_anchors(sb, &lastblock, fileset);
   1942	if (ret != -EAGAIN)
   1943		goto out;
   1944
   1945	/* No anchor found? Try VARCONV conversion of block numbers */
   1946	UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
   1947	lastblock = udf_variable_to_fixed(sbi->s_last_block);
   1948	/* Firstly, we try to not convert number of the last block */
   1949	ret = udf_scan_anchors(sb, &lastblock, fileset);
   1950	if (ret != -EAGAIN)
   1951		goto out;
   1952
   1953	lastblock = sbi->s_last_block;
   1954	/* Secondly, we try with converted number of the last block */
   1955	ret = udf_scan_anchors(sb, &lastblock, fileset);
   1956	if (ret < 0) {
   1957		/* VARCONV didn't help. Clear it. */
   1958		UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
   1959	}
   1960out:
   1961	if (ret == 0)
   1962		sbi->s_last_block = lastblock;
   1963	return ret;
   1964}
   1965
   1966/*
   1967 * Check Volume Structure Descriptor, find Anchor block and load Volume
   1968 * Descriptor Sequence.
   1969 *
   1970 * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
   1971 * block was not found.
   1972 */
   1973static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
   1974			int silent, struct kernel_lb_addr *fileset)
   1975{
   1976	struct udf_sb_info *sbi = UDF_SB(sb);
   1977	int nsr = 0;
   1978	int ret;
   1979
   1980	if (!sb_set_blocksize(sb, uopt->blocksize)) {
   1981		if (!silent)
   1982			udf_warn(sb, "Bad block size\n");
   1983		return -EINVAL;
   1984	}
   1985	sbi->s_last_block = uopt->lastblock;
   1986	if (!uopt->novrs) {
   1987		/* Check that it is NSR02 compliant */
   1988		nsr = udf_check_vsd(sb);
   1989		if (!nsr) {
   1990			if (!silent)
   1991				udf_warn(sb, "No VRS found\n");
   1992			return -EINVAL;
   1993		}
   1994		if (nsr == -1)
   1995			udf_debug("Failed to read sector at offset %d. "
   1996				  "Assuming open disc. Skipping validity "
   1997				  "check\n", VSD_FIRST_SECTOR_OFFSET);
   1998		if (!sbi->s_last_block)
   1999			sbi->s_last_block = udf_get_last_block(sb);
   2000	} else {
   2001		udf_debug("Validity check skipped because of novrs option\n");
   2002	}
   2003
   2004	/* Look for anchor block and load Volume Descriptor Sequence */
   2005	sbi->s_anchor = uopt->anchor;
   2006	ret = udf_find_anchor(sb, fileset);
   2007	if (ret < 0) {
   2008		if (!silent && ret == -EAGAIN)
   2009			udf_warn(sb, "No anchor found\n");
   2010		return ret;
   2011	}
   2012	return 0;
   2013}
   2014
   2015static void udf_finalize_lvid(struct logicalVolIntegrityDesc *lvid)
   2016{
   2017	struct timespec64 ts;
   2018
   2019	ktime_get_real_ts64(&ts);
   2020	udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
   2021	lvid->descTag.descCRC = cpu_to_le16(
   2022		crc_itu_t(0, (char *)lvid + sizeof(struct tag),
   2023			le16_to_cpu(lvid->descTag.descCRCLength)));
   2024	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
   2025}
   2026
   2027static void udf_open_lvid(struct super_block *sb)
   2028{
   2029	struct udf_sb_info *sbi = UDF_SB(sb);
   2030	struct buffer_head *bh = sbi->s_lvid_bh;
   2031	struct logicalVolIntegrityDesc *lvid;
   2032	struct logicalVolIntegrityDescImpUse *lvidiu;
   2033
   2034	if (!bh)
   2035		return;
   2036	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
   2037	lvidiu = udf_sb_lvidiu(sb);
   2038	if (!lvidiu)
   2039		return;
   2040
   2041	mutex_lock(&sbi->s_alloc_mutex);
   2042	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
   2043	lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
   2044	if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
   2045		lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
   2046	else
   2047		UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT);
   2048
   2049	udf_finalize_lvid(lvid);
   2050	mark_buffer_dirty(bh);
   2051	sbi->s_lvid_dirty = 0;
   2052	mutex_unlock(&sbi->s_alloc_mutex);
   2053	/* Make opening of filesystem visible on the media immediately */
   2054	sync_dirty_buffer(bh);
   2055}
   2056
   2057static void udf_close_lvid(struct super_block *sb)
   2058{
   2059	struct udf_sb_info *sbi = UDF_SB(sb);
   2060	struct buffer_head *bh = sbi->s_lvid_bh;
   2061	struct logicalVolIntegrityDesc *lvid;
   2062	struct logicalVolIntegrityDescImpUse *lvidiu;
   2063
   2064	if (!bh)
   2065		return;
   2066	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
   2067	lvidiu = udf_sb_lvidiu(sb);
   2068	if (!lvidiu)
   2069		return;
   2070
   2071	mutex_lock(&sbi->s_alloc_mutex);
   2072	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
   2073	lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
   2074	if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
   2075		lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
   2076	if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
   2077		lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
   2078	if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
   2079		lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
   2080	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT))
   2081		lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
   2082
   2083	/*
   2084	 * We set buffer uptodate unconditionally here to avoid spurious
   2085	 * warnings from mark_buffer_dirty() when previous EIO has marked
   2086	 * the buffer as !uptodate
   2087	 */
   2088	set_buffer_uptodate(bh);
   2089	udf_finalize_lvid(lvid);
   2090	mark_buffer_dirty(bh);
   2091	sbi->s_lvid_dirty = 0;
   2092	mutex_unlock(&sbi->s_alloc_mutex);
   2093	/* Make closing of filesystem visible on the media immediately */
   2094	sync_dirty_buffer(bh);
   2095}
   2096
   2097u64 lvid_get_unique_id(struct super_block *sb)
   2098{
   2099	struct buffer_head *bh;
   2100	struct udf_sb_info *sbi = UDF_SB(sb);
   2101	struct logicalVolIntegrityDesc *lvid;
   2102	struct logicalVolHeaderDesc *lvhd;
   2103	u64 uniqueID;
   2104	u64 ret;
   2105
   2106	bh = sbi->s_lvid_bh;
   2107	if (!bh)
   2108		return 0;
   2109
   2110	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
   2111	lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
   2112
   2113	mutex_lock(&sbi->s_alloc_mutex);
   2114	ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
   2115	if (!(++uniqueID & 0xFFFFFFFF))
   2116		uniqueID += 16;
   2117	lvhd->uniqueID = cpu_to_le64(uniqueID);
   2118	udf_updated_lvid(sb);
   2119	mutex_unlock(&sbi->s_alloc_mutex);
   2120
   2121	return ret;
   2122}
   2123
   2124static int udf_fill_super(struct super_block *sb, void *options, int silent)
   2125{
   2126	int ret = -EINVAL;
   2127	struct inode *inode = NULL;
   2128	struct udf_options uopt;
   2129	struct kernel_lb_addr rootdir, fileset;
   2130	struct udf_sb_info *sbi;
   2131	bool lvid_open = false;
   2132
   2133	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
   2134	/* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
   2135	uopt.uid = make_kuid(current_user_ns(), overflowuid);
   2136	uopt.gid = make_kgid(current_user_ns(), overflowgid);
   2137	uopt.umask = 0;
   2138	uopt.fmode = UDF_INVALID_MODE;
   2139	uopt.dmode = UDF_INVALID_MODE;
   2140	uopt.nls_map = NULL;
   2141
   2142	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
   2143	if (!sbi)
   2144		return -ENOMEM;
   2145
   2146	sb->s_fs_info = sbi;
   2147
   2148	mutex_init(&sbi->s_alloc_mutex);
   2149
   2150	if (!udf_parse_options((char *)options, &uopt, false))
   2151		goto parse_options_failure;
   2152
   2153	fileset.logicalBlockNum = 0xFFFFFFFF;
   2154	fileset.partitionReferenceNum = 0xFFFF;
   2155
   2156	sbi->s_flags = uopt.flags;
   2157	sbi->s_uid = uopt.uid;
   2158	sbi->s_gid = uopt.gid;
   2159	sbi->s_umask = uopt.umask;
   2160	sbi->s_fmode = uopt.fmode;
   2161	sbi->s_dmode = uopt.dmode;
   2162	sbi->s_nls_map = uopt.nls_map;
   2163	rwlock_init(&sbi->s_cred_lock);
   2164
   2165	if (uopt.session == 0xFFFFFFFF)
   2166		sbi->s_session = udf_get_last_session(sb);
   2167	else
   2168		sbi->s_session = uopt.session;
   2169
   2170	udf_debug("Multi-session=%d\n", sbi->s_session);
   2171
   2172	/* Fill in the rest of the superblock */
   2173	sb->s_op = &udf_sb_ops;
   2174	sb->s_export_op = &udf_export_ops;
   2175
   2176	sb->s_magic = UDF_SUPER_MAGIC;
   2177	sb->s_time_gran = 1000;
   2178
   2179	if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
   2180		ret = udf_load_vrs(sb, &uopt, silent, &fileset);
   2181	} else {
   2182		uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
   2183		while (uopt.blocksize <= 4096) {
   2184			ret = udf_load_vrs(sb, &uopt, silent, &fileset);
   2185			if (ret < 0) {
   2186				if (!silent && ret != -EACCES) {
   2187					pr_notice("Scanning with blocksize %u failed\n",
   2188						  uopt.blocksize);
   2189				}
   2190				brelse(sbi->s_lvid_bh);
   2191				sbi->s_lvid_bh = NULL;
   2192				/*
   2193				 * EACCES is special - we want to propagate to
   2194				 * upper layers that we cannot handle RW mount.
   2195				 */
   2196				if (ret == -EACCES)
   2197					break;
   2198			} else
   2199				break;
   2200
   2201			uopt.blocksize <<= 1;
   2202		}
   2203	}
   2204	if (ret < 0) {
   2205		if (ret == -EAGAIN) {
   2206			udf_warn(sb, "No partition found (1)\n");
   2207			ret = -EINVAL;
   2208		}
   2209		goto error_out;
   2210	}
   2211
   2212	udf_debug("Lastblock=%u\n", sbi->s_last_block);
   2213
   2214	if (sbi->s_lvid_bh) {
   2215		struct logicalVolIntegrityDescImpUse *lvidiu =
   2216							udf_sb_lvidiu(sb);
   2217		uint16_t minUDFReadRev;
   2218		uint16_t minUDFWriteRev;
   2219
   2220		if (!lvidiu) {
   2221			ret = -EINVAL;
   2222			goto error_out;
   2223		}
   2224		minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
   2225		minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
   2226		if (minUDFReadRev > UDF_MAX_READ_VERSION) {
   2227			udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
   2228				minUDFReadRev,
   2229				UDF_MAX_READ_VERSION);
   2230			ret = -EINVAL;
   2231			goto error_out;
   2232		} else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
   2233			if (!sb_rdonly(sb)) {
   2234				ret = -EACCES;
   2235				goto error_out;
   2236			}
   2237			UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
   2238		}
   2239
   2240		sbi->s_udfrev = minUDFWriteRev;
   2241
   2242		if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
   2243			UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
   2244		if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
   2245			UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
   2246	}
   2247
   2248	if (!sbi->s_partitions) {
   2249		udf_warn(sb, "No partition found (2)\n");
   2250		ret = -EINVAL;
   2251		goto error_out;
   2252	}
   2253
   2254	if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
   2255			UDF_PART_FLAG_READ_ONLY) {
   2256		if (!sb_rdonly(sb)) {
   2257			ret = -EACCES;
   2258			goto error_out;
   2259		}
   2260		UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
   2261	}
   2262
   2263	ret = udf_find_fileset(sb, &fileset, &rootdir);
   2264	if (ret < 0) {
   2265		udf_warn(sb, "No fileset found\n");
   2266		goto error_out;
   2267	}
   2268
   2269	if (!silent) {
   2270		struct timestamp ts;
   2271		udf_time_to_disk_stamp(&ts, sbi->s_record_time);
   2272		udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
   2273			 sbi->s_volume_ident,
   2274			 le16_to_cpu(ts.year), ts.month, ts.day,
   2275			 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
   2276	}
   2277	if (!sb_rdonly(sb)) {
   2278		udf_open_lvid(sb);
   2279		lvid_open = true;
   2280	}
   2281
   2282	/* Assign the root inode */
   2283	/* assign inodes by physical block number */
   2284	/* perhaps it's not extensible enough, but for now ... */
   2285	inode = udf_iget(sb, &rootdir);
   2286	if (IS_ERR(inode)) {
   2287		udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n",
   2288		       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
   2289		ret = PTR_ERR(inode);
   2290		goto error_out;
   2291	}
   2292
   2293	/* Allocate a dentry for the root inode */
   2294	sb->s_root = d_make_root(inode);
   2295	if (!sb->s_root) {
   2296		udf_err(sb, "Couldn't allocate root dentry\n");
   2297		ret = -ENOMEM;
   2298		goto error_out;
   2299	}
   2300	sb->s_maxbytes = MAX_LFS_FILESIZE;
   2301	sb->s_max_links = UDF_MAX_LINKS;
   2302	return 0;
   2303
   2304error_out:
   2305	iput(sbi->s_vat_inode);
   2306parse_options_failure:
   2307	unload_nls(uopt.nls_map);
   2308	if (lvid_open)
   2309		udf_close_lvid(sb);
   2310	brelse(sbi->s_lvid_bh);
   2311	udf_sb_free_partitions(sb);
   2312	kfree(sbi);
   2313	sb->s_fs_info = NULL;
   2314
   2315	return ret;
   2316}
   2317
   2318void _udf_err(struct super_block *sb, const char *function,
   2319	      const char *fmt, ...)
   2320{
   2321	struct va_format vaf;
   2322	va_list args;
   2323
   2324	va_start(args, fmt);
   2325
   2326	vaf.fmt = fmt;
   2327	vaf.va = &args;
   2328
   2329	pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
   2330
   2331	va_end(args);
   2332}
   2333
   2334void _udf_warn(struct super_block *sb, const char *function,
   2335	       const char *fmt, ...)
   2336{
   2337	struct va_format vaf;
   2338	va_list args;
   2339
   2340	va_start(args, fmt);
   2341
   2342	vaf.fmt = fmt;
   2343	vaf.va = &args;
   2344
   2345	pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
   2346
   2347	va_end(args);
   2348}
   2349
   2350static void udf_put_super(struct super_block *sb)
   2351{
   2352	struct udf_sb_info *sbi;
   2353
   2354	sbi = UDF_SB(sb);
   2355
   2356	iput(sbi->s_vat_inode);
   2357	unload_nls(sbi->s_nls_map);
   2358	if (!sb_rdonly(sb))
   2359		udf_close_lvid(sb);
   2360	brelse(sbi->s_lvid_bh);
   2361	udf_sb_free_partitions(sb);
   2362	mutex_destroy(&sbi->s_alloc_mutex);
   2363	kfree(sb->s_fs_info);
   2364	sb->s_fs_info = NULL;
   2365}
   2366
   2367static int udf_sync_fs(struct super_block *sb, int wait)
   2368{
   2369	struct udf_sb_info *sbi = UDF_SB(sb);
   2370
   2371	mutex_lock(&sbi->s_alloc_mutex);
   2372	if (sbi->s_lvid_dirty) {
   2373		struct buffer_head *bh = sbi->s_lvid_bh;
   2374		struct logicalVolIntegrityDesc *lvid;
   2375
   2376		lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
   2377		udf_finalize_lvid(lvid);
   2378
   2379		/*
   2380		 * Blockdevice will be synced later so we don't have to submit
   2381		 * the buffer for IO
   2382		 */
   2383		mark_buffer_dirty(bh);
   2384		sbi->s_lvid_dirty = 0;
   2385	}
   2386	mutex_unlock(&sbi->s_alloc_mutex);
   2387
   2388	return 0;
   2389}
   2390
   2391static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
   2392{
   2393	struct super_block *sb = dentry->d_sb;
   2394	struct udf_sb_info *sbi = UDF_SB(sb);
   2395	struct logicalVolIntegrityDescImpUse *lvidiu;
   2396	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
   2397
   2398	lvidiu = udf_sb_lvidiu(sb);
   2399	buf->f_type = UDF_SUPER_MAGIC;
   2400	buf->f_bsize = sb->s_blocksize;
   2401	buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
   2402	buf->f_bfree = udf_count_free(sb);
   2403	buf->f_bavail = buf->f_bfree;
   2404	/*
   2405	 * Let's pretend each free block is also a free 'inode' since UDF does
   2406	 * not have separate preallocated table of inodes.
   2407	 */
   2408	buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
   2409					  le32_to_cpu(lvidiu->numDirs)) : 0)
   2410			+ buf->f_bfree;
   2411	buf->f_ffree = buf->f_bfree;
   2412	buf->f_namelen = UDF_NAME_LEN;
   2413	buf->f_fsid = u64_to_fsid(id);
   2414
   2415	return 0;
   2416}
   2417
   2418static unsigned int udf_count_free_bitmap(struct super_block *sb,
   2419					  struct udf_bitmap *bitmap)
   2420{
   2421	struct buffer_head *bh = NULL;
   2422	unsigned int accum = 0;
   2423	int index;
   2424	udf_pblk_t block = 0, newblock;
   2425	struct kernel_lb_addr loc;
   2426	uint32_t bytes;
   2427	uint8_t *ptr;
   2428	uint16_t ident;
   2429	struct spaceBitmapDesc *bm;
   2430
   2431	loc.logicalBlockNum = bitmap->s_extPosition;
   2432	loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
   2433	bh = udf_read_ptagged(sb, &loc, 0, &ident);
   2434
   2435	if (!bh) {
   2436		udf_err(sb, "udf_count_free failed\n");
   2437		goto out;
   2438	} else if (ident != TAG_IDENT_SBD) {
   2439		brelse(bh);
   2440		udf_err(sb, "udf_count_free failed\n");
   2441		goto out;
   2442	}
   2443
   2444	bm = (struct spaceBitmapDesc *)bh->b_data;
   2445	bytes = le32_to_cpu(bm->numOfBytes);
   2446	index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
   2447	ptr = (uint8_t *)bh->b_data;
   2448
   2449	while (bytes > 0) {
   2450		u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
   2451		accum += bitmap_weight((const unsigned long *)(ptr + index),
   2452					cur_bytes * 8);
   2453		bytes -= cur_bytes;
   2454		if (bytes) {
   2455			brelse(bh);
   2456			newblock = udf_get_lb_pblock(sb, &loc, ++block);
   2457			bh = udf_tread(sb, newblock);
   2458			if (!bh) {
   2459				udf_debug("read failed\n");
   2460				goto out;
   2461			}
   2462			index = 0;
   2463			ptr = (uint8_t *)bh->b_data;
   2464		}
   2465	}
   2466	brelse(bh);
   2467out:
   2468	return accum;
   2469}
   2470
   2471static unsigned int udf_count_free_table(struct super_block *sb,
   2472					 struct inode *table)
   2473{
   2474	unsigned int accum = 0;
   2475	uint32_t elen;
   2476	struct kernel_lb_addr eloc;
   2477	struct extent_position epos;
   2478
   2479	mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
   2480	epos.block = UDF_I(table)->i_location;
   2481	epos.offset = sizeof(struct unallocSpaceEntry);
   2482	epos.bh = NULL;
   2483
   2484	while (udf_next_aext(table, &epos, &eloc, &elen, 1) != -1)
   2485		accum += (elen >> table->i_sb->s_blocksize_bits);
   2486
   2487	brelse(epos.bh);
   2488	mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
   2489
   2490	return accum;
   2491}
   2492
   2493static unsigned int udf_count_free(struct super_block *sb)
   2494{
   2495	unsigned int accum = 0;
   2496	struct udf_sb_info *sbi = UDF_SB(sb);
   2497	struct udf_part_map *map;
   2498	unsigned int part = sbi->s_partition;
   2499	int ptype = sbi->s_partmaps[part].s_partition_type;
   2500
   2501	if (ptype == UDF_METADATA_MAP25) {
   2502		part = sbi->s_partmaps[part].s_type_specific.s_metadata.
   2503							s_phys_partition_ref;
   2504	} else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
   2505		/*
   2506		 * Filesystems with VAT are append-only and we cannot write to
   2507 		 * them. Let's just report 0 here.
   2508		 */
   2509		return 0;
   2510	}
   2511
   2512	if (sbi->s_lvid_bh) {
   2513		struct logicalVolIntegrityDesc *lvid =
   2514			(struct logicalVolIntegrityDesc *)
   2515			sbi->s_lvid_bh->b_data;
   2516		if (le32_to_cpu(lvid->numOfPartitions) > part) {
   2517			accum = le32_to_cpu(
   2518					lvid->freeSpaceTable[part]);
   2519			if (accum == 0xFFFFFFFF)
   2520				accum = 0;
   2521		}
   2522	}
   2523
   2524	if (accum)
   2525		return accum;
   2526
   2527	map = &sbi->s_partmaps[part];
   2528	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
   2529		accum += udf_count_free_bitmap(sb,
   2530					       map->s_uspace.s_bitmap);
   2531	}
   2532	if (accum)
   2533		return accum;
   2534
   2535	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
   2536		accum += udf_count_free_table(sb,
   2537					      map->s_uspace.s_table);
   2538	}
   2539	return accum;
   2540}
   2541
   2542MODULE_AUTHOR("Ben Fennema");
   2543MODULE_DESCRIPTION("Universal Disk Format Filesystem");
   2544MODULE_LICENSE("GPL");
   2545module_init(init_udf_fs)
   2546module_exit(exit_udf_fs)