cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

inode.c (26778B)


      1/*
      2 * Compressed rom filesystem for Linux.
      3 *
      4 * Copyright (C) 1999 Linus Torvalds.
      5 *
      6 * This file is released under the GPL.
      7 */
      8
      9/*
     10 * These are the VFS interfaces to the compressed rom filesystem.
     11 * The actual compression is based on zlib, see the other files.
     12 */
     13
     14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     15
     16#include <linux/module.h>
     17#include <linux/fs.h>
     18#include <linux/file.h>
     19#include <linux/pagemap.h>
     20#include <linux/pfn_t.h>
     21#include <linux/ramfs.h>
     22#include <linux/init.h>
     23#include <linux/string.h>
     24#include <linux/blkdev.h>
     25#include <linux/mtd/mtd.h>
     26#include <linux/mtd/super.h>
     27#include <linux/fs_context.h>
     28#include <linux/slab.h>
     29#include <linux/vfs.h>
     30#include <linux/mutex.h>
     31#include <uapi/linux/cramfs_fs.h>
     32#include <linux/uaccess.h>
     33
     34#include "internal.h"
     35
     36/*
     37 * cramfs super-block data in memory
     38 */
     39struct cramfs_sb_info {
     40	unsigned long magic;
     41	unsigned long size;
     42	unsigned long blocks;
     43	unsigned long files;
     44	unsigned long flags;
     45	void *linear_virt_addr;
     46	resource_size_t linear_phys_addr;
     47	size_t mtd_point_size;
     48};
     49
     50static inline struct cramfs_sb_info *CRAMFS_SB(struct super_block *sb)
     51{
     52	return sb->s_fs_info;
     53}
     54
     55static const struct super_operations cramfs_ops;
     56static const struct inode_operations cramfs_dir_inode_operations;
     57static const struct file_operations cramfs_directory_operations;
     58static const struct file_operations cramfs_physmem_fops;
     59static const struct address_space_operations cramfs_aops;
     60
     61static DEFINE_MUTEX(read_mutex);
     62
     63
     64/* These macros may change in future, to provide better st_ino semantics. */
     65#define OFFSET(x)	((x)->i_ino)
     66
     67static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
     68{
     69	if (!cino->offset)
     70		return offset + 1;
     71	if (!cino->size)
     72		return offset + 1;
     73
     74	/*
     75	 * The file mode test fixes buggy mkcramfs implementations where
     76	 * cramfs_inode->offset is set to a non zero value for entries
     77	 * which did not contain data, like devices node and fifos.
     78	 */
     79	switch (cino->mode & S_IFMT) {
     80	case S_IFREG:
     81	case S_IFDIR:
     82	case S_IFLNK:
     83		return cino->offset << 2;
     84	default:
     85		break;
     86	}
     87	return offset + 1;
     88}
     89
     90static struct inode *get_cramfs_inode(struct super_block *sb,
     91	const struct cramfs_inode *cramfs_inode, unsigned int offset)
     92{
     93	struct inode *inode;
     94	static struct timespec64 zerotime;
     95
     96	inode = iget_locked(sb, cramino(cramfs_inode, offset));
     97	if (!inode)
     98		return ERR_PTR(-ENOMEM);
     99	if (!(inode->i_state & I_NEW))
    100		return inode;
    101
    102	switch (cramfs_inode->mode & S_IFMT) {
    103	case S_IFREG:
    104		inode->i_fop = &generic_ro_fops;
    105		inode->i_data.a_ops = &cramfs_aops;
    106		if (IS_ENABLED(CONFIG_CRAMFS_MTD) &&
    107		    CRAMFS_SB(sb)->flags & CRAMFS_FLAG_EXT_BLOCK_POINTERS &&
    108		    CRAMFS_SB(sb)->linear_phys_addr)
    109			inode->i_fop = &cramfs_physmem_fops;
    110		break;
    111	case S_IFDIR:
    112		inode->i_op = &cramfs_dir_inode_operations;
    113		inode->i_fop = &cramfs_directory_operations;
    114		break;
    115	case S_IFLNK:
    116		inode->i_op = &page_symlink_inode_operations;
    117		inode_nohighmem(inode);
    118		inode->i_data.a_ops = &cramfs_aops;
    119		break;
    120	default:
    121		init_special_inode(inode, cramfs_inode->mode,
    122				old_decode_dev(cramfs_inode->size));
    123	}
    124
    125	inode->i_mode = cramfs_inode->mode;
    126	i_uid_write(inode, cramfs_inode->uid);
    127	i_gid_write(inode, cramfs_inode->gid);
    128
    129	/* if the lower 2 bits are zero, the inode contains data */
    130	if (!(inode->i_ino & 3)) {
    131		inode->i_size = cramfs_inode->size;
    132		inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
    133	}
    134
    135	/* Struct copy intentional */
    136	inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
    137	/* inode->i_nlink is left 1 - arguably wrong for directories,
    138	   but it's the best we can do without reading the directory
    139	   contents.  1 yields the right result in GNU find, even
    140	   without -noleaf option. */
    141
    142	unlock_new_inode(inode);
    143
    144	return inode;
    145}
    146
    147/*
    148 * We have our own block cache: don't fill up the buffer cache
    149 * with the rom-image, because the way the filesystem is set
    150 * up the accesses should be fairly regular and cached in the
    151 * page cache and dentry tree anyway..
    152 *
    153 * This also acts as a way to guarantee contiguous areas of up to
    154 * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
    155 * worry about end-of-buffer issues even when decompressing a full
    156 * page cache.
    157 *
    158 * Note: This is all optimized away at compile time when
    159 *       CONFIG_CRAMFS_BLOCKDEV=n.
    160 */
    161#define READ_BUFFERS (2)
    162/* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */
    163#define NEXT_BUFFER(_ix) ((_ix) ^ 1)
    164
    165/*
    166 * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed"
    167 * data that takes up more space than the original and with unlucky
    168 * alignment.
    169 */
    170#define BLKS_PER_BUF_SHIFT	(2)
    171#define BLKS_PER_BUF		(1 << BLKS_PER_BUF_SHIFT)
    172#define BUFFER_SIZE		(BLKS_PER_BUF*PAGE_SIZE)
    173
    174static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
    175static unsigned buffer_blocknr[READ_BUFFERS];
    176static struct super_block *buffer_dev[READ_BUFFERS];
    177static int next_buffer;
    178
    179/*
    180 * Populate our block cache and return a pointer to it.
    181 */
    182static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
    183				unsigned int len)
    184{
    185	struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
    186	struct page *pages[BLKS_PER_BUF];
    187	unsigned i, blocknr, buffer;
    188	unsigned long devsize;
    189	char *data;
    190
    191	if (!len)
    192		return NULL;
    193	blocknr = offset >> PAGE_SHIFT;
    194	offset &= PAGE_SIZE - 1;
    195
    196	/* Check if an existing buffer already has the data.. */
    197	for (i = 0; i < READ_BUFFERS; i++) {
    198		unsigned int blk_offset;
    199
    200		if (buffer_dev[i] != sb)
    201			continue;
    202		if (blocknr < buffer_blocknr[i])
    203			continue;
    204		blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
    205		blk_offset += offset;
    206		if (blk_offset > BUFFER_SIZE ||
    207		    blk_offset + len > BUFFER_SIZE)
    208			continue;
    209		return read_buffers[i] + blk_offset;
    210	}
    211
    212	devsize = bdev_nr_bytes(sb->s_bdev) >> PAGE_SHIFT;
    213
    214	/* Ok, read in BLKS_PER_BUF pages completely first. */
    215	for (i = 0; i < BLKS_PER_BUF; i++) {
    216		struct page *page = NULL;
    217
    218		if (blocknr + i < devsize) {
    219			page = read_mapping_page(mapping, blocknr + i, NULL);
    220			/* synchronous error? */
    221			if (IS_ERR(page))
    222				page = NULL;
    223		}
    224		pages[i] = page;
    225	}
    226
    227	for (i = 0; i < BLKS_PER_BUF; i++) {
    228		struct page *page = pages[i];
    229
    230		if (page) {
    231			wait_on_page_locked(page);
    232			if (!PageUptodate(page)) {
    233				/* asynchronous error */
    234				put_page(page);
    235				pages[i] = NULL;
    236			}
    237		}
    238	}
    239
    240	buffer = next_buffer;
    241	next_buffer = NEXT_BUFFER(buffer);
    242	buffer_blocknr[buffer] = blocknr;
    243	buffer_dev[buffer] = sb;
    244
    245	data = read_buffers[buffer];
    246	for (i = 0; i < BLKS_PER_BUF; i++) {
    247		struct page *page = pages[i];
    248
    249		if (page) {
    250			memcpy(data, kmap(page), PAGE_SIZE);
    251			kunmap(page);
    252			put_page(page);
    253		} else
    254			memset(data, 0, PAGE_SIZE);
    255		data += PAGE_SIZE;
    256	}
    257	return read_buffers[buffer] + offset;
    258}
    259
    260/*
    261 * Return a pointer to the linearly addressed cramfs image in memory.
    262 */
    263static void *cramfs_direct_read(struct super_block *sb, unsigned int offset,
    264				unsigned int len)
    265{
    266	struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
    267
    268	if (!len)
    269		return NULL;
    270	if (len > sbi->size || offset > sbi->size - len)
    271		return page_address(ZERO_PAGE(0));
    272	return sbi->linear_virt_addr + offset;
    273}
    274
    275/*
    276 * Returns a pointer to a buffer containing at least LEN bytes of
    277 * filesystem starting at byte offset OFFSET into the filesystem.
    278 */
    279static void *cramfs_read(struct super_block *sb, unsigned int offset,
    280			 unsigned int len)
    281{
    282	struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
    283
    284	if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sbi->linear_virt_addr)
    285		return cramfs_direct_read(sb, offset, len);
    286	else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
    287		return cramfs_blkdev_read(sb, offset, len);
    288	else
    289		return NULL;
    290}
    291
    292/*
    293 * For a mapping to be possible, we need a range of uncompressed and
    294 * contiguous blocks. Return the offset for the first block and number of
    295 * valid blocks for which that is true, or zero otherwise.
    296 */
    297static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages)
    298{
    299	struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
    300	int i;
    301	u32 *blockptrs, first_block_addr;
    302
    303	/*
    304	 * We can dereference memory directly here as this code may be
    305	 * reached only when there is a direct filesystem image mapping
    306	 * available in memory.
    307	 */
    308	blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode) + pgoff * 4);
    309	first_block_addr = blockptrs[0] & ~CRAMFS_BLK_FLAGS;
    310	i = 0;
    311	do {
    312		u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT);
    313		u32 expect = (first_block_addr + block_off) |
    314			     CRAMFS_BLK_FLAG_DIRECT_PTR |
    315			     CRAMFS_BLK_FLAG_UNCOMPRESSED;
    316		if (blockptrs[i] != expect) {
    317			pr_debug("range: block %d/%d got %#x expects %#x\n",
    318				 pgoff+i, pgoff + *pages - 1,
    319				 blockptrs[i], expect);
    320			if (i == 0)
    321				return 0;
    322			break;
    323		}
    324	} while (++i < *pages);
    325
    326	*pages = i;
    327	return first_block_addr << CRAMFS_BLK_DIRECT_PTR_SHIFT;
    328}
    329
    330#ifdef CONFIG_MMU
    331
    332/*
    333 * Return true if the last page of a file in the filesystem image contains
    334 * some other data that doesn't belong to that file. It is assumed that the
    335 * last block is CRAMFS_BLK_FLAG_DIRECT_PTR | CRAMFS_BLK_FLAG_UNCOMPRESSED
    336 * (verified by cramfs_get_block_range() and directly accessible in memory.
    337 */
    338static bool cramfs_last_page_is_shared(struct inode *inode)
    339{
    340	struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
    341	u32 partial, last_page, blockaddr, *blockptrs;
    342	char *tail_data;
    343
    344	partial = offset_in_page(inode->i_size);
    345	if (!partial)
    346		return false;
    347	last_page = inode->i_size >> PAGE_SHIFT;
    348	blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode));
    349	blockaddr = blockptrs[last_page] & ~CRAMFS_BLK_FLAGS;
    350	blockaddr <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
    351	tail_data = sbi->linear_virt_addr + blockaddr + partial;
    352	return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false;
    353}
    354
    355static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
    356{
    357	struct inode *inode = file_inode(file);
    358	struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
    359	unsigned int pages, max_pages, offset;
    360	unsigned long address, pgoff = vma->vm_pgoff;
    361	char *bailout_reason;
    362	int ret;
    363
    364	ret = generic_file_readonly_mmap(file, vma);
    365	if (ret)
    366		return ret;
    367
    368	/*
    369	 * Now try to pre-populate ptes for this vma with a direct
    370	 * mapping avoiding memory allocation when possible.
    371	 */
    372
    373	/* Could COW work here? */
    374	bailout_reason = "vma is writable";
    375	if (vma->vm_flags & VM_WRITE)
    376		goto bailout;
    377
    378	max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    379	bailout_reason = "beyond file limit";
    380	if (pgoff >= max_pages)
    381		goto bailout;
    382	pages = min(vma_pages(vma), max_pages - pgoff);
    383
    384	offset = cramfs_get_block_range(inode, pgoff, &pages);
    385	bailout_reason = "unsuitable block layout";
    386	if (!offset)
    387		goto bailout;
    388	address = sbi->linear_phys_addr + offset;
    389	bailout_reason = "data is not page aligned";
    390	if (!PAGE_ALIGNED(address))
    391		goto bailout;
    392
    393	/* Don't map the last page if it contains some other data */
    394	if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) {
    395		pr_debug("mmap: %pD: last page is shared\n", file);
    396		pages--;
    397	}
    398
    399	if (!pages) {
    400		bailout_reason = "no suitable block remaining";
    401		goto bailout;
    402	}
    403
    404	if (pages == vma_pages(vma)) {
    405		/*
    406		 * The entire vma is mappable. remap_pfn_range() will
    407		 * make it distinguishable from a non-direct mapping
    408		 * in /proc/<pid>/maps by substituting the file offset
    409		 * with the actual physical address.
    410		 */
    411		ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
    412				      pages * PAGE_SIZE, vma->vm_page_prot);
    413	} else {
    414		/*
    415		 * Let's create a mixed map if we can't map it all.
    416		 * The normal paging machinery will take care of the
    417		 * unpopulated ptes via cramfs_read_folio().
    418		 */
    419		int i;
    420		vma->vm_flags |= VM_MIXEDMAP;
    421		for (i = 0; i < pages && !ret; i++) {
    422			vm_fault_t vmf;
    423			unsigned long off = i * PAGE_SIZE;
    424			pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
    425			vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
    426			if (vmf & VM_FAULT_ERROR)
    427				ret = vm_fault_to_errno(vmf, 0);
    428		}
    429	}
    430
    431	if (!ret)
    432		pr_debug("mapped %pD[%lu] at 0x%08lx (%u/%lu pages) "
    433			 "to vma 0x%08lx, page_prot 0x%llx\n", file,
    434			 pgoff, address, pages, vma_pages(vma), vma->vm_start,
    435			 (unsigned long long)pgprot_val(vma->vm_page_prot));
    436	return ret;
    437
    438bailout:
    439	pr_debug("%pD[%lu]: direct mmap impossible: %s\n",
    440		 file, pgoff, bailout_reason);
    441	/* Didn't manage any direct map, but normal paging is still possible */
    442	return 0;
    443}
    444
    445#else /* CONFIG_MMU */
    446
    447static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
    448{
    449	return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS;
    450}
    451
    452static unsigned long cramfs_physmem_get_unmapped_area(struct file *file,
    453			unsigned long addr, unsigned long len,
    454			unsigned long pgoff, unsigned long flags)
    455{
    456	struct inode *inode = file_inode(file);
    457	struct super_block *sb = inode->i_sb;
    458	struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
    459	unsigned int pages, block_pages, max_pages, offset;
    460
    461	pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
    462	max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    463	if (pgoff >= max_pages || pages > max_pages - pgoff)
    464		return -EINVAL;
    465	block_pages = pages;
    466	offset = cramfs_get_block_range(inode, pgoff, &block_pages);
    467	if (!offset || block_pages != pages)
    468		return -ENOSYS;
    469	addr = sbi->linear_phys_addr + offset;
    470	pr_debug("get_unmapped for %pD ofs %#lx siz %lu at 0x%08lx\n",
    471		 file, pgoff*PAGE_SIZE, len, addr);
    472	return addr;
    473}
    474
    475static unsigned int cramfs_physmem_mmap_capabilities(struct file *file)
    476{
    477	return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT |
    478	       NOMMU_MAP_READ | NOMMU_MAP_EXEC;
    479}
    480
    481#endif /* CONFIG_MMU */
    482
    483static const struct file_operations cramfs_physmem_fops = {
    484	.llseek			= generic_file_llseek,
    485	.read_iter		= generic_file_read_iter,
    486	.splice_read		= generic_file_splice_read,
    487	.mmap			= cramfs_physmem_mmap,
    488#ifndef CONFIG_MMU
    489	.get_unmapped_area	= cramfs_physmem_get_unmapped_area,
    490	.mmap_capabilities	= cramfs_physmem_mmap_capabilities,
    491#endif
    492};
    493
    494static void cramfs_kill_sb(struct super_block *sb)
    495{
    496	struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
    497
    498	if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) {
    499		if (sbi && sbi->mtd_point_size)
    500			mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size);
    501		kill_mtd_super(sb);
    502	} else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
    503		kill_block_super(sb);
    504	}
    505	kfree(sbi);
    506}
    507
    508static int cramfs_reconfigure(struct fs_context *fc)
    509{
    510	sync_filesystem(fc->root->d_sb);
    511	fc->sb_flags |= SB_RDONLY;
    512	return 0;
    513}
    514
    515static int cramfs_read_super(struct super_block *sb, struct fs_context *fc,
    516			     struct cramfs_super *super)
    517{
    518	struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
    519	unsigned long root_offset;
    520	bool silent = fc->sb_flags & SB_SILENT;
    521
    522	/* We don't know the real size yet */
    523	sbi->size = PAGE_SIZE;
    524
    525	/* Read the first block and get the superblock from it */
    526	mutex_lock(&read_mutex);
    527	memcpy(super, cramfs_read(sb, 0, sizeof(*super)), sizeof(*super));
    528	mutex_unlock(&read_mutex);
    529
    530	/* Do sanity checks on the superblock */
    531	if (super->magic != CRAMFS_MAGIC) {
    532		/* check for wrong endianness */
    533		if (super->magic == CRAMFS_MAGIC_WEND) {
    534			if (!silent)
    535				errorfc(fc, "wrong endianness");
    536			return -EINVAL;
    537		}
    538
    539		/* check at 512 byte offset */
    540		mutex_lock(&read_mutex);
    541		memcpy(super,
    542		       cramfs_read(sb, 512, sizeof(*super)),
    543		       sizeof(*super));
    544		mutex_unlock(&read_mutex);
    545		if (super->magic != CRAMFS_MAGIC) {
    546			if (super->magic == CRAMFS_MAGIC_WEND && !silent)
    547				errorfc(fc, "wrong endianness");
    548			else if (!silent)
    549				errorfc(fc, "wrong magic");
    550			return -EINVAL;
    551		}
    552	}
    553
    554	/* get feature flags first */
    555	if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) {
    556		errorfc(fc, "unsupported filesystem features");
    557		return -EINVAL;
    558	}
    559
    560	/* Check that the root inode is in a sane state */
    561	if (!S_ISDIR(super->root.mode)) {
    562		errorfc(fc, "root is not a directory");
    563		return -EINVAL;
    564	}
    565	/* correct strange, hard-coded permissions of mkcramfs */
    566	super->root.mode |= 0555;
    567
    568	root_offset = super->root.offset << 2;
    569	if (super->flags & CRAMFS_FLAG_FSID_VERSION_2) {
    570		sbi->size = super->size;
    571		sbi->blocks = super->fsid.blocks;
    572		sbi->files = super->fsid.files;
    573	} else {
    574		sbi->size = 1<<28;
    575		sbi->blocks = 0;
    576		sbi->files = 0;
    577	}
    578	sbi->magic = super->magic;
    579	sbi->flags = super->flags;
    580	if (root_offset == 0)
    581		infofc(fc, "empty filesystem");
    582	else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
    583		 ((root_offset != sizeof(struct cramfs_super)) &&
    584		  (root_offset != 512 + sizeof(struct cramfs_super))))
    585	{
    586		errorfc(fc, "bad root offset %lu", root_offset);
    587		return -EINVAL;
    588	}
    589
    590	return 0;
    591}
    592
    593static int cramfs_finalize_super(struct super_block *sb,
    594				 struct cramfs_inode *cramfs_root)
    595{
    596	struct inode *root;
    597
    598	/* Set it all up.. */
    599	sb->s_flags |= SB_RDONLY;
    600	sb->s_time_min = 0;
    601	sb->s_time_max = 0;
    602	sb->s_op = &cramfs_ops;
    603	root = get_cramfs_inode(sb, cramfs_root, 0);
    604	if (IS_ERR(root))
    605		return PTR_ERR(root);
    606	sb->s_root = d_make_root(root);
    607	if (!sb->s_root)
    608		return -ENOMEM;
    609	return 0;
    610}
    611
    612static int cramfs_blkdev_fill_super(struct super_block *sb, struct fs_context *fc)
    613{
    614	struct cramfs_sb_info *sbi;
    615	struct cramfs_super super;
    616	int i, err;
    617
    618	sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
    619	if (!sbi)
    620		return -ENOMEM;
    621	sb->s_fs_info = sbi;
    622
    623	/* Invalidate the read buffers on mount: think disk change.. */
    624	for (i = 0; i < READ_BUFFERS; i++)
    625		buffer_blocknr[i] = -1;
    626
    627	err = cramfs_read_super(sb, fc, &super);
    628	if (err)
    629		return err;
    630	return cramfs_finalize_super(sb, &super.root);
    631}
    632
    633static int cramfs_mtd_fill_super(struct super_block *sb, struct fs_context *fc)
    634{
    635	struct cramfs_sb_info *sbi;
    636	struct cramfs_super super;
    637	int err;
    638
    639	sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
    640	if (!sbi)
    641		return -ENOMEM;
    642	sb->s_fs_info = sbi;
    643
    644	/* Map only one page for now.  Will remap it when fs size is known. */
    645	err = mtd_point(sb->s_mtd, 0, PAGE_SIZE, &sbi->mtd_point_size,
    646			&sbi->linear_virt_addr, &sbi->linear_phys_addr);
    647	if (err || sbi->mtd_point_size != PAGE_SIZE) {
    648		pr_err("unable to get direct memory access to mtd:%s\n",
    649		       sb->s_mtd->name);
    650		return err ? : -ENODATA;
    651	}
    652
    653	pr_info("checking physical address %pap for linear cramfs image\n",
    654		&sbi->linear_phys_addr);
    655	err = cramfs_read_super(sb, fc, &super);
    656	if (err)
    657		return err;
    658
    659	/* Remap the whole filesystem now */
    660	pr_info("linear cramfs image on mtd:%s appears to be %lu KB in size\n",
    661		sb->s_mtd->name, sbi->size/1024);
    662	mtd_unpoint(sb->s_mtd, 0, PAGE_SIZE);
    663	err = mtd_point(sb->s_mtd, 0, sbi->size, &sbi->mtd_point_size,
    664			&sbi->linear_virt_addr, &sbi->linear_phys_addr);
    665	if (err || sbi->mtd_point_size != sbi->size) {
    666		pr_err("unable to get direct memory access to mtd:%s\n",
    667		       sb->s_mtd->name);
    668		return err ? : -ENODATA;
    669	}
    670
    671	return cramfs_finalize_super(sb, &super.root);
    672}
    673
    674static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
    675{
    676	struct super_block *sb = dentry->d_sb;
    677	u64 id = 0;
    678
    679	if (sb->s_bdev)
    680		id = huge_encode_dev(sb->s_bdev->bd_dev);
    681	else if (sb->s_dev)
    682		id = huge_encode_dev(sb->s_dev);
    683
    684	buf->f_type = CRAMFS_MAGIC;
    685	buf->f_bsize = PAGE_SIZE;
    686	buf->f_blocks = CRAMFS_SB(sb)->blocks;
    687	buf->f_bfree = 0;
    688	buf->f_bavail = 0;
    689	buf->f_files = CRAMFS_SB(sb)->files;
    690	buf->f_ffree = 0;
    691	buf->f_fsid = u64_to_fsid(id);
    692	buf->f_namelen = CRAMFS_MAXPATHLEN;
    693	return 0;
    694}
    695
    696/*
    697 * Read a cramfs directory entry.
    698 */
    699static int cramfs_readdir(struct file *file, struct dir_context *ctx)
    700{
    701	struct inode *inode = file_inode(file);
    702	struct super_block *sb = inode->i_sb;
    703	char *buf;
    704	unsigned int offset;
    705
    706	/* Offset within the thing. */
    707	if (ctx->pos >= inode->i_size)
    708		return 0;
    709	offset = ctx->pos;
    710	/* Directory entries are always 4-byte aligned */
    711	if (offset & 3)
    712		return -EINVAL;
    713
    714	buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL);
    715	if (!buf)
    716		return -ENOMEM;
    717
    718	while (offset < inode->i_size) {
    719		struct cramfs_inode *de;
    720		unsigned long nextoffset;
    721		char *name;
    722		ino_t ino;
    723		umode_t mode;
    724		int namelen;
    725
    726		mutex_lock(&read_mutex);
    727		de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
    728		name = (char *)(de+1);
    729
    730		/*
    731		 * Namelengths on disk are shifted by two
    732		 * and the name padded out to 4-byte boundaries
    733		 * with zeroes.
    734		 */
    735		namelen = de->namelen << 2;
    736		memcpy(buf, name, namelen);
    737		ino = cramino(de, OFFSET(inode) + offset);
    738		mode = de->mode;
    739		mutex_unlock(&read_mutex);
    740		nextoffset = offset + sizeof(*de) + namelen;
    741		for (;;) {
    742			if (!namelen) {
    743				kfree(buf);
    744				return -EIO;
    745			}
    746			if (buf[namelen-1])
    747				break;
    748			namelen--;
    749		}
    750		if (!dir_emit(ctx, buf, namelen, ino, mode >> 12))
    751			break;
    752
    753		ctx->pos = offset = nextoffset;
    754	}
    755	kfree(buf);
    756	return 0;
    757}
    758
    759/*
    760 * Lookup and fill in the inode data..
    761 */
    762static struct dentry *cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
    763{
    764	unsigned int offset = 0;
    765	struct inode *inode = NULL;
    766	int sorted;
    767
    768	mutex_lock(&read_mutex);
    769	sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
    770	while (offset < dir->i_size) {
    771		struct cramfs_inode *de;
    772		char *name;
    773		int namelen, retval;
    774		int dir_off = OFFSET(dir) + offset;
    775
    776		de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
    777		name = (char *)(de+1);
    778
    779		/* Try to take advantage of sorted directories */
    780		if (sorted && (dentry->d_name.name[0] < name[0]))
    781			break;
    782
    783		namelen = de->namelen << 2;
    784		offset += sizeof(*de) + namelen;
    785
    786		/* Quick check that the name is roughly the right length */
    787		if (((dentry->d_name.len + 3) & ~3) != namelen)
    788			continue;
    789
    790		for (;;) {
    791			if (!namelen) {
    792				inode = ERR_PTR(-EIO);
    793				goto out;
    794			}
    795			if (name[namelen-1])
    796				break;
    797			namelen--;
    798		}
    799		if (namelen != dentry->d_name.len)
    800			continue;
    801		retval = memcmp(dentry->d_name.name, name, namelen);
    802		if (retval > 0)
    803			continue;
    804		if (!retval) {
    805			inode = get_cramfs_inode(dir->i_sb, de, dir_off);
    806			break;
    807		}
    808		/* else (retval < 0) */
    809		if (sorted)
    810			break;
    811	}
    812out:
    813	mutex_unlock(&read_mutex);
    814	return d_splice_alias(inode, dentry);
    815}
    816
    817static int cramfs_read_folio(struct file *file, struct folio *folio)
    818{
    819	struct page *page = &folio->page;
    820	struct inode *inode = page->mapping->host;
    821	u32 maxblock;
    822	int bytes_filled;
    823	void *pgdata;
    824
    825	maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    826	bytes_filled = 0;
    827	pgdata = kmap(page);
    828
    829	if (page->index < maxblock) {
    830		struct super_block *sb = inode->i_sb;
    831		u32 blkptr_offset = OFFSET(inode) + page->index * 4;
    832		u32 block_ptr, block_start, block_len;
    833		bool uncompressed, direct;
    834
    835		mutex_lock(&read_mutex);
    836		block_ptr = *(u32 *) cramfs_read(sb, blkptr_offset, 4);
    837		uncompressed = (block_ptr & CRAMFS_BLK_FLAG_UNCOMPRESSED);
    838		direct = (block_ptr & CRAMFS_BLK_FLAG_DIRECT_PTR);
    839		block_ptr &= ~CRAMFS_BLK_FLAGS;
    840
    841		if (direct) {
    842			/*
    843			 * The block pointer is an absolute start pointer,
    844			 * shifted by 2 bits. The size is included in the
    845			 * first 2 bytes of the data block when compressed,
    846			 * or PAGE_SIZE otherwise.
    847			 */
    848			block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT;
    849			if (uncompressed) {
    850				block_len = PAGE_SIZE;
    851				/* if last block: cap to file length */
    852				if (page->index == maxblock - 1)
    853					block_len =
    854						offset_in_page(inode->i_size);
    855			} else {
    856				block_len = *(u16 *)
    857					cramfs_read(sb, block_start, 2);
    858				block_start += 2;
    859			}
    860		} else {
    861			/*
    862			 * The block pointer indicates one past the end of
    863			 * the current block (start of next block). If this
    864			 * is the first block then it starts where the block
    865			 * pointer table ends, otherwise its start comes
    866			 * from the previous block's pointer.
    867			 */
    868			block_start = OFFSET(inode) + maxblock * 4;
    869			if (page->index)
    870				block_start = *(u32 *)
    871					cramfs_read(sb, blkptr_offset - 4, 4);
    872			/* Beware... previous ptr might be a direct ptr */
    873			if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) {
    874				/* See comments on earlier code. */
    875				u32 prev_start = block_start;
    876				block_start = prev_start & ~CRAMFS_BLK_FLAGS;
    877				block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
    878				if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) {
    879					block_start += PAGE_SIZE;
    880				} else {
    881					block_len = *(u16 *)
    882						cramfs_read(sb, block_start, 2);
    883					block_start += 2 + block_len;
    884				}
    885			}
    886			block_start &= ~CRAMFS_BLK_FLAGS;
    887			block_len = block_ptr - block_start;
    888		}
    889
    890		if (block_len == 0)
    891			; /* hole */
    892		else if (unlikely(block_len > 2*PAGE_SIZE ||
    893				  (uncompressed && block_len > PAGE_SIZE))) {
    894			mutex_unlock(&read_mutex);
    895			pr_err("bad data blocksize %u\n", block_len);
    896			goto err;
    897		} else if (uncompressed) {
    898			memcpy(pgdata,
    899			       cramfs_read(sb, block_start, block_len),
    900			       block_len);
    901			bytes_filled = block_len;
    902		} else {
    903			bytes_filled = cramfs_uncompress_block(pgdata,
    904				 PAGE_SIZE,
    905				 cramfs_read(sb, block_start, block_len),
    906				 block_len);
    907		}
    908		mutex_unlock(&read_mutex);
    909		if (unlikely(bytes_filled < 0))
    910			goto err;
    911	}
    912
    913	memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
    914	flush_dcache_page(page);
    915	kunmap(page);
    916	SetPageUptodate(page);
    917	unlock_page(page);
    918	return 0;
    919
    920err:
    921	kunmap(page);
    922	ClearPageUptodate(page);
    923	SetPageError(page);
    924	unlock_page(page);
    925	return 0;
    926}
    927
    928static const struct address_space_operations cramfs_aops = {
    929	.read_folio = cramfs_read_folio
    930};
    931
    932/*
    933 * Our operations:
    934 */
    935
    936/*
    937 * A directory can only readdir
    938 */
    939static const struct file_operations cramfs_directory_operations = {
    940	.llseek		= generic_file_llseek,
    941	.read		= generic_read_dir,
    942	.iterate_shared	= cramfs_readdir,
    943};
    944
    945static const struct inode_operations cramfs_dir_inode_operations = {
    946	.lookup		= cramfs_lookup,
    947};
    948
    949static const struct super_operations cramfs_ops = {
    950	.statfs		= cramfs_statfs,
    951};
    952
    953static int cramfs_get_tree(struct fs_context *fc)
    954{
    955	int ret = -ENOPROTOOPT;
    956
    957	if (IS_ENABLED(CONFIG_CRAMFS_MTD)) {
    958		ret = get_tree_mtd(fc, cramfs_mtd_fill_super);
    959		if (!ret)
    960			return 0;
    961	}
    962	if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
    963		ret = get_tree_bdev(fc, cramfs_blkdev_fill_super);
    964	return ret;
    965}
    966
    967static const struct fs_context_operations cramfs_context_ops = {
    968	.get_tree	= cramfs_get_tree,
    969	.reconfigure	= cramfs_reconfigure,
    970};
    971
    972/*
    973 * Set up the filesystem mount context.
    974 */
    975static int cramfs_init_fs_context(struct fs_context *fc)
    976{
    977	fc->ops = &cramfs_context_ops;
    978	return 0;
    979}
    980
    981static struct file_system_type cramfs_fs_type = {
    982	.owner		= THIS_MODULE,
    983	.name		= "cramfs",
    984	.init_fs_context = cramfs_init_fs_context,
    985	.kill_sb	= cramfs_kill_sb,
    986	.fs_flags	= FS_REQUIRES_DEV,
    987};
    988MODULE_ALIAS_FS("cramfs");
    989
    990static int __init init_cramfs_fs(void)
    991{
    992	int rv;
    993
    994	rv = cramfs_uncompress_init();
    995	if (rv < 0)
    996		return rv;
    997	rv = register_filesystem(&cramfs_fs_type);
    998	if (rv < 0)
    999		cramfs_uncompress_exit();
   1000	return rv;
   1001}
   1002
   1003static void __exit exit_cramfs_fs(void)
   1004{
   1005	cramfs_uncompress_exit();
   1006	unregister_filesystem(&cramfs_fs_type);
   1007}
   1008
   1009module_init(init_cramfs_fs)
   1010module_exit(exit_cramfs_fs)
   1011MODULE_LICENSE("GPL");