cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

file.c (25776B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  linux/fs/affs/file.c
      4 *
      5 *  (c) 1996  Hans-Joachim Widmaier - Rewritten
      6 *
      7 *  (C) 1993  Ray Burr - Modified for Amiga FFS filesystem.
      8 *
      9 *  (C) 1992  Eric Youngdale Modified for ISO 9660 filesystem.
     10 *
     11 *  (C) 1991  Linus Torvalds - minix filesystem
     12 *
     13 *  affs regular file handling primitives
     14 */
     15
     16#include <linux/uio.h>
     17#include <linux/blkdev.h>
     18#include "affs.h"
     19
     20static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
     21
     22static int
     23affs_file_open(struct inode *inode, struct file *filp)
     24{
     25	pr_debug("open(%lu,%d)\n",
     26		 inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
     27	atomic_inc(&AFFS_I(inode)->i_opencnt);
     28	return 0;
     29}
     30
     31static int
     32affs_file_release(struct inode *inode, struct file *filp)
     33{
     34	pr_debug("release(%lu, %d)\n",
     35		 inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
     36
     37	if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
     38		inode_lock(inode);
     39		if (inode->i_size != AFFS_I(inode)->mmu_private)
     40			affs_truncate(inode);
     41		affs_free_prealloc(inode);
     42		inode_unlock(inode);
     43	}
     44
     45	return 0;
     46}
     47
     48static int
     49affs_grow_extcache(struct inode *inode, u32 lc_idx)
     50{
     51	struct super_block	*sb = inode->i_sb;
     52	struct buffer_head	*bh;
     53	u32 lc_max;
     54	int i, j, key;
     55
     56	if (!AFFS_I(inode)->i_lc) {
     57		char *ptr = (char *)get_zeroed_page(GFP_NOFS);
     58		if (!ptr)
     59			return -ENOMEM;
     60		AFFS_I(inode)->i_lc = (u32 *)ptr;
     61		AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
     62	}
     63
     64	lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
     65
     66	if (AFFS_I(inode)->i_extcnt > lc_max) {
     67		u32 lc_shift, lc_mask, tmp, off;
     68
     69		/* need to recalculate linear cache, start from old size */
     70		lc_shift = AFFS_I(inode)->i_lc_shift;
     71		tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
     72		for (; tmp; tmp >>= 1)
     73			lc_shift++;
     74		lc_mask = (1 << lc_shift) - 1;
     75
     76		/* fix idx and old size to new shift */
     77		lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
     78		AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
     79
     80		/* first shrink old cache to make more space */
     81		off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
     82		for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
     83			AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
     84
     85		AFFS_I(inode)->i_lc_shift = lc_shift;
     86		AFFS_I(inode)->i_lc_mask = lc_mask;
     87	}
     88
     89	/* fill cache to the needed index */
     90	i = AFFS_I(inode)->i_lc_size;
     91	AFFS_I(inode)->i_lc_size = lc_idx + 1;
     92	for (; i <= lc_idx; i++) {
     93		if (!i) {
     94			AFFS_I(inode)->i_lc[0] = inode->i_ino;
     95			continue;
     96		}
     97		key = AFFS_I(inode)->i_lc[i - 1];
     98		j = AFFS_I(inode)->i_lc_mask + 1;
     99		// unlock cache
    100		for (; j > 0; j--) {
    101			bh = affs_bread(sb, key);
    102			if (!bh)
    103				goto err;
    104			key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
    105			affs_brelse(bh);
    106		}
    107		// lock cache
    108		AFFS_I(inode)->i_lc[i] = key;
    109	}
    110
    111	return 0;
    112
    113err:
    114	// lock cache
    115	return -EIO;
    116}
    117
    118static struct buffer_head *
    119affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
    120{
    121	struct super_block *sb = inode->i_sb;
    122	struct buffer_head *new_bh;
    123	u32 blocknr, tmp;
    124
    125	blocknr = affs_alloc_block(inode, bh->b_blocknr);
    126	if (!blocknr)
    127		return ERR_PTR(-ENOSPC);
    128
    129	new_bh = affs_getzeroblk(sb, blocknr);
    130	if (!new_bh) {
    131		affs_free_block(sb, blocknr);
    132		return ERR_PTR(-EIO);
    133	}
    134
    135	AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
    136	AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
    137	AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
    138	AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
    139	affs_fix_checksum(sb, new_bh);
    140
    141	mark_buffer_dirty_inode(new_bh, inode);
    142
    143	tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
    144	if (tmp)
    145		affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
    146	AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
    147	affs_adjust_checksum(bh, blocknr - tmp);
    148	mark_buffer_dirty_inode(bh, inode);
    149
    150	AFFS_I(inode)->i_extcnt++;
    151	mark_inode_dirty(inode);
    152
    153	return new_bh;
    154}
    155
    156static inline struct buffer_head *
    157affs_get_extblock(struct inode *inode, u32 ext)
    158{
    159	/* inline the simplest case: same extended block as last time */
    160	struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
    161	if (ext == AFFS_I(inode)->i_ext_last)
    162		get_bh(bh);
    163	else
    164		/* we have to do more (not inlined) */
    165		bh = affs_get_extblock_slow(inode, ext);
    166
    167	return bh;
    168}
    169
    170static struct buffer_head *
    171affs_get_extblock_slow(struct inode *inode, u32 ext)
    172{
    173	struct super_block *sb = inode->i_sb;
    174	struct buffer_head *bh;
    175	u32 ext_key;
    176	u32 lc_idx, lc_off, ac_idx;
    177	u32 tmp, idx;
    178
    179	if (ext == AFFS_I(inode)->i_ext_last + 1) {
    180		/* read the next extended block from the current one */
    181		bh = AFFS_I(inode)->i_ext_bh;
    182		ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
    183		if (ext < AFFS_I(inode)->i_extcnt)
    184			goto read_ext;
    185		BUG_ON(ext > AFFS_I(inode)->i_extcnt);
    186		bh = affs_alloc_extblock(inode, bh, ext);
    187		if (IS_ERR(bh))
    188			return bh;
    189		goto store_ext;
    190	}
    191
    192	if (ext == 0) {
    193		/* we seek back to the file header block */
    194		ext_key = inode->i_ino;
    195		goto read_ext;
    196	}
    197
    198	if (ext >= AFFS_I(inode)->i_extcnt) {
    199		struct buffer_head *prev_bh;
    200
    201		/* allocate a new extended block */
    202		BUG_ON(ext > AFFS_I(inode)->i_extcnt);
    203
    204		/* get previous extended block */
    205		prev_bh = affs_get_extblock(inode, ext - 1);
    206		if (IS_ERR(prev_bh))
    207			return prev_bh;
    208		bh = affs_alloc_extblock(inode, prev_bh, ext);
    209		affs_brelse(prev_bh);
    210		if (IS_ERR(bh))
    211			return bh;
    212		goto store_ext;
    213	}
    214
    215again:
    216	/* check if there is an extended cache and whether it's large enough */
    217	lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
    218	lc_off = ext & AFFS_I(inode)->i_lc_mask;
    219
    220	if (lc_idx >= AFFS_I(inode)->i_lc_size) {
    221		int err;
    222
    223		err = affs_grow_extcache(inode, lc_idx);
    224		if (err)
    225			return ERR_PTR(err);
    226		goto again;
    227	}
    228
    229	/* every n'th key we find in the linear cache */
    230	if (!lc_off) {
    231		ext_key = AFFS_I(inode)->i_lc[lc_idx];
    232		goto read_ext;
    233	}
    234
    235	/* maybe it's still in the associative cache */
    236	ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
    237	if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
    238		ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
    239		goto read_ext;
    240	}
    241
    242	/* try to find one of the previous extended blocks */
    243	tmp = ext;
    244	idx = ac_idx;
    245	while (--tmp, --lc_off > 0) {
    246		idx = (idx - 1) & AFFS_AC_MASK;
    247		if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
    248			ext_key = AFFS_I(inode)->i_ac[idx].key;
    249			goto find_ext;
    250		}
    251	}
    252
    253	/* fall back to the linear cache */
    254	ext_key = AFFS_I(inode)->i_lc[lc_idx];
    255find_ext:
    256	/* read all extended blocks until we find the one we need */
    257	//unlock cache
    258	do {
    259		bh = affs_bread(sb, ext_key);
    260		if (!bh)
    261			goto err_bread;
    262		ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
    263		affs_brelse(bh);
    264		tmp++;
    265	} while (tmp < ext);
    266	//lock cache
    267
    268	/* store it in the associative cache */
    269	// recalculate ac_idx?
    270	AFFS_I(inode)->i_ac[ac_idx].ext = ext;
    271	AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
    272
    273read_ext:
    274	/* finally read the right extended block */
    275	//unlock cache
    276	bh = affs_bread(sb, ext_key);
    277	if (!bh)
    278		goto err_bread;
    279	//lock cache
    280
    281store_ext:
    282	/* release old cached extended block and store the new one */
    283	affs_brelse(AFFS_I(inode)->i_ext_bh);
    284	AFFS_I(inode)->i_ext_last = ext;
    285	AFFS_I(inode)->i_ext_bh = bh;
    286	get_bh(bh);
    287
    288	return bh;
    289
    290err_bread:
    291	affs_brelse(bh);
    292	return ERR_PTR(-EIO);
    293}
    294
    295static int
    296affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
    297{
    298	struct super_block	*sb = inode->i_sb;
    299	struct buffer_head	*ext_bh;
    300	u32			 ext;
    301
    302	pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino,
    303		 (unsigned long long)block);
    304
    305	BUG_ON(block > (sector_t)0x7fffffffUL);
    306
    307	if (block >= AFFS_I(inode)->i_blkcnt) {
    308		if (block > AFFS_I(inode)->i_blkcnt || !create)
    309			goto err_big;
    310	} else
    311		create = 0;
    312
    313	//lock cache
    314	affs_lock_ext(inode);
    315
    316	ext = (u32)block / AFFS_SB(sb)->s_hashsize;
    317	block -= ext * AFFS_SB(sb)->s_hashsize;
    318	ext_bh = affs_get_extblock(inode, ext);
    319	if (IS_ERR(ext_bh))
    320		goto err_ext;
    321	map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
    322
    323	if (create) {
    324		u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
    325		if (!blocknr)
    326			goto err_alloc;
    327		set_buffer_new(bh_result);
    328		AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
    329		AFFS_I(inode)->i_blkcnt++;
    330
    331		/* store new block */
    332		if (bh_result->b_blocknr)
    333			affs_warning(sb, "get_block",
    334				     "block already set (%llx)",
    335				     (unsigned long long)bh_result->b_blocknr);
    336		AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
    337		AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
    338		affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
    339		bh_result->b_blocknr = blocknr;
    340
    341		if (!block) {
    342			/* insert first block into header block */
    343			u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
    344			if (tmp)
    345				affs_warning(sb, "get_block", "first block already set (%d)", tmp);
    346			AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
    347			affs_adjust_checksum(ext_bh, blocknr - tmp);
    348		}
    349	}
    350
    351	affs_brelse(ext_bh);
    352	//unlock cache
    353	affs_unlock_ext(inode);
    354	return 0;
    355
    356err_big:
    357	affs_error(inode->i_sb, "get_block", "strange block request %llu",
    358		   (unsigned long long)block);
    359	return -EIO;
    360err_ext:
    361	// unlock cache
    362	affs_unlock_ext(inode);
    363	return PTR_ERR(ext_bh);
    364err_alloc:
    365	brelse(ext_bh);
    366	clear_buffer_mapped(bh_result);
    367	bh_result->b_bdev = NULL;
    368	// unlock cache
    369	affs_unlock_ext(inode);
    370	return -ENOSPC;
    371}
    372
    373static int affs_writepage(struct page *page, struct writeback_control *wbc)
    374{
    375	return block_write_full_page(page, affs_get_block, wbc);
    376}
    377
    378static int affs_read_folio(struct file *file, struct folio *folio)
    379{
    380	return block_read_full_folio(folio, affs_get_block);
    381}
    382
    383static void affs_write_failed(struct address_space *mapping, loff_t to)
    384{
    385	struct inode *inode = mapping->host;
    386
    387	if (to > inode->i_size) {
    388		truncate_pagecache(inode, inode->i_size);
    389		affs_truncate(inode);
    390	}
    391}
    392
    393static ssize_t
    394affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
    395{
    396	struct file *file = iocb->ki_filp;
    397	struct address_space *mapping = file->f_mapping;
    398	struct inode *inode = mapping->host;
    399	size_t count = iov_iter_count(iter);
    400	loff_t offset = iocb->ki_pos;
    401	ssize_t ret;
    402
    403	if (iov_iter_rw(iter) == WRITE) {
    404		loff_t size = offset + count;
    405
    406		if (AFFS_I(inode)->mmu_private < size)
    407			return 0;
    408	}
    409
    410	ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block);
    411	if (ret < 0 && iov_iter_rw(iter) == WRITE)
    412		affs_write_failed(mapping, offset + count);
    413	return ret;
    414}
    415
    416static int affs_write_begin(struct file *file, struct address_space *mapping,
    417			loff_t pos, unsigned len,
    418			struct page **pagep, void **fsdata)
    419{
    420	int ret;
    421
    422	*pagep = NULL;
    423	ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
    424				affs_get_block,
    425				&AFFS_I(mapping->host)->mmu_private);
    426	if (unlikely(ret))
    427		affs_write_failed(mapping, pos + len);
    428
    429	return ret;
    430}
    431
    432static int affs_write_end(struct file *file, struct address_space *mapping,
    433			  loff_t pos, unsigned int len, unsigned int copied,
    434			  struct page *page, void *fsdata)
    435{
    436	struct inode *inode = mapping->host;
    437	int ret;
    438
    439	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
    440
    441	/* Clear Archived bit on file writes, as AmigaOS would do */
    442	if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
    443		AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
    444		mark_inode_dirty(inode);
    445	}
    446
    447	return ret;
    448}
    449
    450static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
    451{
    452	return generic_block_bmap(mapping,block,affs_get_block);
    453}
    454
    455const struct address_space_operations affs_aops = {
    456	.dirty_folio	= block_dirty_folio,
    457	.invalidate_folio = block_invalidate_folio,
    458	.read_folio = affs_read_folio,
    459	.writepage = affs_writepage,
    460	.write_begin = affs_write_begin,
    461	.write_end = affs_write_end,
    462	.direct_IO = affs_direct_IO,
    463	.bmap = _affs_bmap
    464};
    465
    466static inline struct buffer_head *
    467affs_bread_ino(struct inode *inode, int block, int create)
    468{
    469	struct buffer_head *bh, tmp_bh;
    470	int err;
    471
    472	tmp_bh.b_state = 0;
    473	err = affs_get_block(inode, block, &tmp_bh, create);
    474	if (!err) {
    475		bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
    476		if (bh) {
    477			bh->b_state |= tmp_bh.b_state;
    478			return bh;
    479		}
    480		err = -EIO;
    481	}
    482	return ERR_PTR(err);
    483}
    484
    485static inline struct buffer_head *
    486affs_getzeroblk_ino(struct inode *inode, int block)
    487{
    488	struct buffer_head *bh, tmp_bh;
    489	int err;
    490
    491	tmp_bh.b_state = 0;
    492	err = affs_get_block(inode, block, &tmp_bh, 1);
    493	if (!err) {
    494		bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
    495		if (bh) {
    496			bh->b_state |= tmp_bh.b_state;
    497			return bh;
    498		}
    499		err = -EIO;
    500	}
    501	return ERR_PTR(err);
    502}
    503
    504static inline struct buffer_head *
    505affs_getemptyblk_ino(struct inode *inode, int block)
    506{
    507	struct buffer_head *bh, tmp_bh;
    508	int err;
    509
    510	tmp_bh.b_state = 0;
    511	err = affs_get_block(inode, block, &tmp_bh, 1);
    512	if (!err) {
    513		bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
    514		if (bh) {
    515			bh->b_state |= tmp_bh.b_state;
    516			return bh;
    517		}
    518		err = -EIO;
    519	}
    520	return ERR_PTR(err);
    521}
    522
    523static int
    524affs_do_readpage_ofs(struct page *page, unsigned to, int create)
    525{
    526	struct inode *inode = page->mapping->host;
    527	struct super_block *sb = inode->i_sb;
    528	struct buffer_head *bh;
    529	char *data;
    530	unsigned pos = 0;
    531	u32 bidx, boff, bsize;
    532	u32 tmp;
    533
    534	pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
    535		 page->index, to);
    536	BUG_ON(to > PAGE_SIZE);
    537	bsize = AFFS_SB(sb)->s_data_blksize;
    538	tmp = page->index << PAGE_SHIFT;
    539	bidx = tmp / bsize;
    540	boff = tmp % bsize;
    541
    542	while (pos < to) {
    543		bh = affs_bread_ino(inode, bidx, create);
    544		if (IS_ERR(bh))
    545			return PTR_ERR(bh);
    546		tmp = min(bsize - boff, to - pos);
    547		BUG_ON(pos + tmp > to || tmp > bsize);
    548		data = kmap_atomic(page);
    549		memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
    550		kunmap_atomic(data);
    551		affs_brelse(bh);
    552		bidx++;
    553		pos += tmp;
    554		boff = 0;
    555	}
    556	flush_dcache_page(page);
    557	return 0;
    558}
    559
    560static int
    561affs_extent_file_ofs(struct inode *inode, u32 newsize)
    562{
    563	struct super_block *sb = inode->i_sb;
    564	struct buffer_head *bh, *prev_bh;
    565	u32 bidx, boff;
    566	u32 size, bsize;
    567	u32 tmp;
    568
    569	pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize);
    570	bsize = AFFS_SB(sb)->s_data_blksize;
    571	bh = NULL;
    572	size = AFFS_I(inode)->mmu_private;
    573	bidx = size / bsize;
    574	boff = size % bsize;
    575	if (boff) {
    576		bh = affs_bread_ino(inode, bidx, 0);
    577		if (IS_ERR(bh))
    578			return PTR_ERR(bh);
    579		tmp = min(bsize - boff, newsize - size);
    580		BUG_ON(boff + tmp > bsize || tmp > bsize);
    581		memset(AFFS_DATA(bh) + boff, 0, tmp);
    582		be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
    583		affs_fix_checksum(sb, bh);
    584		mark_buffer_dirty_inode(bh, inode);
    585		size += tmp;
    586		bidx++;
    587	} else if (bidx) {
    588		bh = affs_bread_ino(inode, bidx - 1, 0);
    589		if (IS_ERR(bh))
    590			return PTR_ERR(bh);
    591	}
    592
    593	while (size < newsize) {
    594		prev_bh = bh;
    595		bh = affs_getzeroblk_ino(inode, bidx);
    596		if (IS_ERR(bh))
    597			goto out;
    598		tmp = min(bsize, newsize - size);
    599		BUG_ON(tmp > bsize);
    600		AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
    601		AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
    602		AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
    603		AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
    604		affs_fix_checksum(sb, bh);
    605		bh->b_state &= ~(1UL << BH_New);
    606		mark_buffer_dirty_inode(bh, inode);
    607		if (prev_bh) {
    608			u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
    609
    610			if (tmp_next)
    611				affs_warning(sb, "extent_file_ofs",
    612					     "next block already set for %d (%d)",
    613					     bidx, tmp_next);
    614			AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
    615			affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
    616			mark_buffer_dirty_inode(prev_bh, inode);
    617			affs_brelse(prev_bh);
    618		}
    619		size += bsize;
    620		bidx++;
    621	}
    622	affs_brelse(bh);
    623	inode->i_size = AFFS_I(inode)->mmu_private = newsize;
    624	return 0;
    625
    626out:
    627	inode->i_size = AFFS_I(inode)->mmu_private = newsize;
    628	return PTR_ERR(bh);
    629}
    630
    631static int
    632affs_read_folio_ofs(struct file *file, struct folio *folio)
    633{
    634	struct page *page = &folio->page;
    635	struct inode *inode = page->mapping->host;
    636	u32 to;
    637	int err;
    638
    639	pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
    640	to = PAGE_SIZE;
    641	if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
    642		to = inode->i_size & ~PAGE_MASK;
    643		memset(page_address(page) + to, 0, PAGE_SIZE - to);
    644	}
    645
    646	err = affs_do_readpage_ofs(page, to, 0);
    647	if (!err)
    648		SetPageUptodate(page);
    649	unlock_page(page);
    650	return err;
    651}
    652
    653static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
    654				loff_t pos, unsigned len,
    655				struct page **pagep, void **fsdata)
    656{
    657	struct inode *inode = mapping->host;
    658	struct page *page;
    659	pgoff_t index;
    660	int err = 0;
    661
    662	pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
    663		 pos + len);
    664	if (pos > AFFS_I(inode)->mmu_private) {
    665		/* XXX: this probably leaves a too-big i_size in case of
    666		 * failure. Should really be updating i_size at write_end time
    667		 */
    668		err = affs_extent_file_ofs(inode, pos);
    669		if (err)
    670			return err;
    671	}
    672
    673	index = pos >> PAGE_SHIFT;
    674	page = grab_cache_page_write_begin(mapping, index);
    675	if (!page)
    676		return -ENOMEM;
    677	*pagep = page;
    678
    679	if (PageUptodate(page))
    680		return 0;
    681
    682	/* XXX: inefficient but safe in the face of short writes */
    683	err = affs_do_readpage_ofs(page, PAGE_SIZE, 1);
    684	if (err) {
    685		unlock_page(page);
    686		put_page(page);
    687	}
    688	return err;
    689}
    690
    691static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
    692				loff_t pos, unsigned len, unsigned copied,
    693				struct page *page, void *fsdata)
    694{
    695	struct inode *inode = mapping->host;
    696	struct super_block *sb = inode->i_sb;
    697	struct buffer_head *bh, *prev_bh;
    698	char *data;
    699	u32 bidx, boff, bsize;
    700	unsigned from, to;
    701	u32 tmp;
    702	int written;
    703
    704	from = pos & (PAGE_SIZE - 1);
    705	to = from + len;
    706	/*
    707	 * XXX: not sure if this can handle short copies (len < copied), but
    708	 * we don't have to, because the page should always be uptodate here,
    709	 * due to write_begin.
    710	 */
    711
    712	pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
    713		 pos + len);
    714	bsize = AFFS_SB(sb)->s_data_blksize;
    715	data = page_address(page);
    716
    717	bh = NULL;
    718	written = 0;
    719	tmp = (page->index << PAGE_SHIFT) + from;
    720	bidx = tmp / bsize;
    721	boff = tmp % bsize;
    722	if (boff) {
    723		bh = affs_bread_ino(inode, bidx, 0);
    724		if (IS_ERR(bh)) {
    725			written = PTR_ERR(bh);
    726			goto err_first_bh;
    727		}
    728		tmp = min(bsize - boff, to - from);
    729		BUG_ON(boff + tmp > bsize || tmp > bsize);
    730		memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
    731		be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
    732		affs_fix_checksum(sb, bh);
    733		mark_buffer_dirty_inode(bh, inode);
    734		written += tmp;
    735		from += tmp;
    736		bidx++;
    737	} else if (bidx) {
    738		bh = affs_bread_ino(inode, bidx - 1, 0);
    739		if (IS_ERR(bh)) {
    740			written = PTR_ERR(bh);
    741			goto err_first_bh;
    742		}
    743	}
    744	while (from + bsize <= to) {
    745		prev_bh = bh;
    746		bh = affs_getemptyblk_ino(inode, bidx);
    747		if (IS_ERR(bh))
    748			goto err_bh;
    749		memcpy(AFFS_DATA(bh), data + from, bsize);
    750		if (buffer_new(bh)) {
    751			AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
    752			AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
    753			AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
    754			AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
    755			AFFS_DATA_HEAD(bh)->next = 0;
    756			bh->b_state &= ~(1UL << BH_New);
    757			if (prev_bh) {
    758				u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
    759
    760				if (tmp_next)
    761					affs_warning(sb, "commit_write_ofs",
    762						     "next block already set for %d (%d)",
    763						     bidx, tmp_next);
    764				AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
    765				affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
    766				mark_buffer_dirty_inode(prev_bh, inode);
    767			}
    768		}
    769		affs_brelse(prev_bh);
    770		affs_fix_checksum(sb, bh);
    771		mark_buffer_dirty_inode(bh, inode);
    772		written += bsize;
    773		from += bsize;
    774		bidx++;
    775	}
    776	if (from < to) {
    777		prev_bh = bh;
    778		bh = affs_bread_ino(inode, bidx, 1);
    779		if (IS_ERR(bh))
    780			goto err_bh;
    781		tmp = min(bsize, to - from);
    782		BUG_ON(tmp > bsize);
    783		memcpy(AFFS_DATA(bh), data + from, tmp);
    784		if (buffer_new(bh)) {
    785			AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
    786			AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
    787			AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
    788			AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
    789			AFFS_DATA_HEAD(bh)->next = 0;
    790			bh->b_state &= ~(1UL << BH_New);
    791			if (prev_bh) {
    792				u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
    793
    794				if (tmp_next)
    795					affs_warning(sb, "commit_write_ofs",
    796						     "next block already set for %d (%d)",
    797						     bidx, tmp_next);
    798				AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
    799				affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
    800				mark_buffer_dirty_inode(prev_bh, inode);
    801			}
    802		} else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
    803			AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
    804		affs_brelse(prev_bh);
    805		affs_fix_checksum(sb, bh);
    806		mark_buffer_dirty_inode(bh, inode);
    807		written += tmp;
    808		from += tmp;
    809		bidx++;
    810	}
    811	SetPageUptodate(page);
    812
    813done:
    814	affs_brelse(bh);
    815	tmp = (page->index << PAGE_SHIFT) + from;
    816	if (tmp > inode->i_size)
    817		inode->i_size = AFFS_I(inode)->mmu_private = tmp;
    818
    819	/* Clear Archived bit on file writes, as AmigaOS would do */
    820	if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
    821		AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
    822		mark_inode_dirty(inode);
    823	}
    824
    825err_first_bh:
    826	unlock_page(page);
    827	put_page(page);
    828
    829	return written;
    830
    831err_bh:
    832	bh = prev_bh;
    833	if (!written)
    834		written = PTR_ERR(bh);
    835	goto done;
    836}
    837
    838const struct address_space_operations affs_aops_ofs = {
    839	.dirty_folio	= block_dirty_folio,
    840	.invalidate_folio = block_invalidate_folio,
    841	.read_folio = affs_read_folio_ofs,
    842	//.writepage = affs_writepage_ofs,
    843	.write_begin = affs_write_begin_ofs,
    844	.write_end = affs_write_end_ofs
    845};
    846
    847/* Free any preallocated blocks. */
    848
    849void
    850affs_free_prealloc(struct inode *inode)
    851{
    852	struct super_block *sb = inode->i_sb;
    853
    854	pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino);
    855
    856	while (AFFS_I(inode)->i_pa_cnt) {
    857		AFFS_I(inode)->i_pa_cnt--;
    858		affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
    859	}
    860}
    861
    862/* Truncate (or enlarge) a file to the requested size. */
    863
    864void
    865affs_truncate(struct inode *inode)
    866{
    867	struct super_block *sb = inode->i_sb;
    868	u32 ext, ext_key;
    869	u32 last_blk, blkcnt, blk;
    870	u32 size;
    871	struct buffer_head *ext_bh;
    872	int i;
    873
    874	pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n",
    875		 inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size);
    876
    877	last_blk = 0;
    878	ext = 0;
    879	if (inode->i_size) {
    880		last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
    881		ext = last_blk / AFFS_SB(sb)->s_hashsize;
    882	}
    883
    884	if (inode->i_size > AFFS_I(inode)->mmu_private) {
    885		struct address_space *mapping = inode->i_mapping;
    886		struct page *page;
    887		void *fsdata;
    888		loff_t isize = inode->i_size;
    889		int res;
    890
    891		res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &page, &fsdata);
    892		if (!res)
    893			res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata);
    894		else
    895			inode->i_size = AFFS_I(inode)->mmu_private;
    896		mark_inode_dirty(inode);
    897		return;
    898	} else if (inode->i_size == AFFS_I(inode)->mmu_private)
    899		return;
    900
    901	// lock cache
    902	ext_bh = affs_get_extblock(inode, ext);
    903	if (IS_ERR(ext_bh)) {
    904		affs_warning(sb, "truncate",
    905			     "unexpected read error for ext block %u (%ld)",
    906			     ext, PTR_ERR(ext_bh));
    907		return;
    908	}
    909	if (AFFS_I(inode)->i_lc) {
    910		/* clear linear cache */
    911		i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
    912		if (AFFS_I(inode)->i_lc_size > i) {
    913			AFFS_I(inode)->i_lc_size = i;
    914			for (; i < AFFS_LC_SIZE; i++)
    915				AFFS_I(inode)->i_lc[i] = 0;
    916		}
    917		/* clear associative cache */
    918		for (i = 0; i < AFFS_AC_SIZE; i++)
    919			if (AFFS_I(inode)->i_ac[i].ext >= ext)
    920				AFFS_I(inode)->i_ac[i].ext = 0;
    921	}
    922	ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
    923
    924	blkcnt = AFFS_I(inode)->i_blkcnt;
    925	i = 0;
    926	blk = last_blk;
    927	if (inode->i_size) {
    928		i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
    929		blk++;
    930	} else
    931		AFFS_HEAD(ext_bh)->first_data = 0;
    932	AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
    933	size = AFFS_SB(sb)->s_hashsize;
    934	if (size > blkcnt - blk + i)
    935		size = blkcnt - blk + i;
    936	for (; i < size; i++, blk++) {
    937		affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
    938		AFFS_BLOCK(sb, ext_bh, i) = 0;
    939	}
    940	AFFS_TAIL(sb, ext_bh)->extension = 0;
    941	affs_fix_checksum(sb, ext_bh);
    942	mark_buffer_dirty_inode(ext_bh, inode);
    943	affs_brelse(ext_bh);
    944
    945	if (inode->i_size) {
    946		AFFS_I(inode)->i_blkcnt = last_blk + 1;
    947		AFFS_I(inode)->i_extcnt = ext + 1;
    948		if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) {
    949			struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
    950			u32 tmp;
    951			if (IS_ERR(bh)) {
    952				affs_warning(sb, "truncate",
    953					     "unexpected read error for last block %u (%ld)",
    954					     ext, PTR_ERR(bh));
    955				return;
    956			}
    957			tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
    958			AFFS_DATA_HEAD(bh)->next = 0;
    959			affs_adjust_checksum(bh, -tmp);
    960			affs_brelse(bh);
    961		}
    962	} else {
    963		AFFS_I(inode)->i_blkcnt = 0;
    964		AFFS_I(inode)->i_extcnt = 1;
    965	}
    966	AFFS_I(inode)->mmu_private = inode->i_size;
    967	// unlock cache
    968
    969	while (ext_key) {
    970		ext_bh = affs_bread(sb, ext_key);
    971		size = AFFS_SB(sb)->s_hashsize;
    972		if (size > blkcnt - blk)
    973			size = blkcnt - blk;
    974		for (i = 0; i < size; i++, blk++)
    975			affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
    976		affs_free_block(sb, ext_key);
    977		ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
    978		affs_brelse(ext_bh);
    979	}
    980	affs_free_prealloc(inode);
    981}
    982
    983int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
    984{
    985	struct inode *inode = filp->f_mapping->host;
    986	int ret, err;
    987
    988	err = file_write_and_wait_range(filp, start, end);
    989	if (err)
    990		return err;
    991
    992	inode_lock(inode);
    993	ret = write_inode_now(inode, 0);
    994	err = sync_blockdev(inode->i_sb->s_bdev);
    995	if (!ret)
    996		ret = err;
    997	inode_unlock(inode);
    998	return ret;
    999}
   1000const struct file_operations affs_file_operations = {
   1001	.llseek		= generic_file_llseek,
   1002	.read_iter	= generic_file_read_iter,
   1003	.write_iter	= generic_file_write_iter,
   1004	.mmap		= generic_file_mmap,
   1005	.open		= affs_file_open,
   1006	.release	= affs_file_release,
   1007	.fsync		= affs_file_fsync,
   1008	.splice_read	= generic_file_splice_read,
   1009};
   1010
   1011const struct inode_operations affs_file_inode_operations = {
   1012	.setattr	= affs_notify_change,
   1013};