cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

itree_common.c (7982B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Generic part */
      3
      4typedef struct {
      5	block_t	*p;
      6	block_t	key;
      7	struct buffer_head *bh;
      8} Indirect;
      9
     10static DEFINE_RWLOCK(pointers_lock);
     11
     12static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
     13{
     14	p->key = *(p->p = v);
     15	p->bh = bh;
     16}
     17
     18static inline int verify_chain(Indirect *from, Indirect *to)
     19{
     20	while (from <= to && from->key == *from->p)
     21		from++;
     22	return (from > to);
     23}
     24
     25static inline block_t *block_end(struct buffer_head *bh)
     26{
     27	return (block_t *)((char*)bh->b_data + bh->b_size);
     28}
     29
     30static inline Indirect *get_branch(struct inode *inode,
     31					int depth,
     32					int *offsets,
     33					Indirect chain[DEPTH],
     34					int *err)
     35{
     36	struct super_block *sb = inode->i_sb;
     37	Indirect *p = chain;
     38	struct buffer_head *bh;
     39
     40	*err = 0;
     41	/* i_data is not going away, no lock needed */
     42	add_chain (chain, NULL, i_data(inode) + *offsets);
     43	if (!p->key)
     44		goto no_block;
     45	while (--depth) {
     46		bh = sb_bread(sb, block_to_cpu(p->key));
     47		if (!bh)
     48			goto failure;
     49		read_lock(&pointers_lock);
     50		if (!verify_chain(chain, p))
     51			goto changed;
     52		add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
     53		read_unlock(&pointers_lock);
     54		if (!p->key)
     55			goto no_block;
     56	}
     57	return NULL;
     58
     59changed:
     60	read_unlock(&pointers_lock);
     61	brelse(bh);
     62	*err = -EAGAIN;
     63	goto no_block;
     64failure:
     65	*err = -EIO;
     66no_block:
     67	return p;
     68}
     69
     70static int alloc_branch(struct inode *inode,
     71			     int num,
     72			     int *offsets,
     73			     Indirect *branch)
     74{
     75	int n = 0;
     76	int i;
     77	int parent = minix_new_block(inode);
     78	int err = -ENOSPC;
     79
     80	branch[0].key = cpu_to_block(parent);
     81	if (parent) for (n = 1; n < num; n++) {
     82		struct buffer_head *bh;
     83		/* Allocate the next block */
     84		int nr = minix_new_block(inode);
     85		if (!nr)
     86			break;
     87		branch[n].key = cpu_to_block(nr);
     88		bh = sb_getblk(inode->i_sb, parent);
     89		if (!bh) {
     90			minix_free_block(inode, nr);
     91			err = -ENOMEM;
     92			break;
     93		}
     94		lock_buffer(bh);
     95		memset(bh->b_data, 0, bh->b_size);
     96		branch[n].bh = bh;
     97		branch[n].p = (block_t*) bh->b_data + offsets[n];
     98		*branch[n].p = branch[n].key;
     99		set_buffer_uptodate(bh);
    100		unlock_buffer(bh);
    101		mark_buffer_dirty_inode(bh, inode);
    102		parent = nr;
    103	}
    104	if (n == num)
    105		return 0;
    106
    107	/* Allocation failed, free what we already allocated */
    108	for (i = 1; i < n; i++)
    109		bforget(branch[i].bh);
    110	for (i = 0; i < n; i++)
    111		minix_free_block(inode, block_to_cpu(branch[i].key));
    112	return err;
    113}
    114
    115static inline int splice_branch(struct inode *inode,
    116				     Indirect chain[DEPTH],
    117				     Indirect *where,
    118				     int num)
    119{
    120	int i;
    121
    122	write_lock(&pointers_lock);
    123
    124	/* Verify that place we are splicing to is still there and vacant */
    125	if (!verify_chain(chain, where-1) || *where->p)
    126		goto changed;
    127
    128	*where->p = where->key;
    129
    130	write_unlock(&pointers_lock);
    131
    132	/* We are done with atomic stuff, now do the rest of housekeeping */
    133
    134	inode->i_ctime = current_time(inode);
    135
    136	/* had we spliced it onto indirect block? */
    137	if (where->bh)
    138		mark_buffer_dirty_inode(where->bh, inode);
    139
    140	mark_inode_dirty(inode);
    141	return 0;
    142
    143changed:
    144	write_unlock(&pointers_lock);
    145	for (i = 1; i < num; i++)
    146		bforget(where[i].bh);
    147	for (i = 0; i < num; i++)
    148		minix_free_block(inode, block_to_cpu(where[i].key));
    149	return -EAGAIN;
    150}
    151
    152static int get_block(struct inode * inode, sector_t block,
    153			struct buffer_head *bh, int create)
    154{
    155	int err = -EIO;
    156	int offsets[DEPTH];
    157	Indirect chain[DEPTH];
    158	Indirect *partial;
    159	int left;
    160	int depth = block_to_path(inode, block, offsets);
    161
    162	if (depth == 0)
    163		goto out;
    164
    165reread:
    166	partial = get_branch(inode, depth, offsets, chain, &err);
    167
    168	/* Simplest case - block found, no allocation needed */
    169	if (!partial) {
    170got_it:
    171		map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key));
    172		/* Clean up and exit */
    173		partial = chain+depth-1; /* the whole chain */
    174		goto cleanup;
    175	}
    176
    177	/* Next simple case - plain lookup or failed read of indirect block */
    178	if (!create || err == -EIO) {
    179cleanup:
    180		while (partial > chain) {
    181			brelse(partial->bh);
    182			partial--;
    183		}
    184out:
    185		return err;
    186	}
    187
    188	/*
    189	 * Indirect block might be removed by truncate while we were
    190	 * reading it. Handling of that case (forget what we've got and
    191	 * reread) is taken out of the main path.
    192	 */
    193	if (err == -EAGAIN)
    194		goto changed;
    195
    196	left = (chain + depth) - partial;
    197	err = alloc_branch(inode, left, offsets+(partial-chain), partial);
    198	if (err)
    199		goto cleanup;
    200
    201	if (splice_branch(inode, chain, partial, left) < 0)
    202		goto changed;
    203
    204	set_buffer_new(bh);
    205	goto got_it;
    206
    207changed:
    208	while (partial > chain) {
    209		brelse(partial->bh);
    210		partial--;
    211	}
    212	goto reread;
    213}
    214
    215static inline int all_zeroes(block_t *p, block_t *q)
    216{
    217	while (p < q)
    218		if (*p++)
    219			return 0;
    220	return 1;
    221}
    222
    223static Indirect *find_shared(struct inode *inode,
    224				int depth,
    225				int offsets[DEPTH],
    226				Indirect chain[DEPTH],
    227				block_t *top)
    228{
    229	Indirect *partial, *p;
    230	int k, err;
    231
    232	*top = 0;
    233	for (k = depth; k > 1 && !offsets[k-1]; k--)
    234		;
    235	partial = get_branch(inode, k, offsets, chain, &err);
    236
    237	write_lock(&pointers_lock);
    238	if (!partial)
    239		partial = chain + k-1;
    240	if (!partial->key && *partial->p) {
    241		write_unlock(&pointers_lock);
    242		goto no_top;
    243	}
    244	for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
    245		;
    246	if (p == chain + k - 1 && p > chain) {
    247		p->p--;
    248	} else {
    249		*top = *p->p;
    250		*p->p = 0;
    251	}
    252	write_unlock(&pointers_lock);
    253
    254	while(partial > p)
    255	{
    256		brelse(partial->bh);
    257		partial--;
    258	}
    259no_top:
    260	return partial;
    261}
    262
    263static inline void free_data(struct inode *inode, block_t *p, block_t *q)
    264{
    265	unsigned long nr;
    266
    267	for ( ; p < q ; p++) {
    268		nr = block_to_cpu(*p);
    269		if (nr) {
    270			*p = 0;
    271			minix_free_block(inode, nr);
    272		}
    273	}
    274}
    275
    276static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
    277{
    278	struct buffer_head * bh;
    279	unsigned long nr;
    280
    281	if (depth--) {
    282		for ( ; p < q ; p++) {
    283			nr = block_to_cpu(*p);
    284			if (!nr)
    285				continue;
    286			*p = 0;
    287			bh = sb_bread(inode->i_sb, nr);
    288			if (!bh)
    289				continue;
    290			free_branches(inode, (block_t*)bh->b_data,
    291				      block_end(bh), depth);
    292			bforget(bh);
    293			minix_free_block(inode, nr);
    294			mark_inode_dirty(inode);
    295		}
    296	} else
    297		free_data(inode, p, q);
    298}
    299
    300static inline void truncate (struct inode * inode)
    301{
    302	struct super_block *sb = inode->i_sb;
    303	block_t *idata = i_data(inode);
    304	int offsets[DEPTH];
    305	Indirect chain[DEPTH];
    306	Indirect *partial;
    307	block_t nr = 0;
    308	int n;
    309	int first_whole;
    310	long iblock;
    311
    312	iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits;
    313	block_truncate_page(inode->i_mapping, inode->i_size, get_block);
    314
    315	n = block_to_path(inode, iblock, offsets);
    316	if (!n)
    317		return;
    318
    319	if (n == 1) {
    320		free_data(inode, idata+offsets[0], idata + DIRECT);
    321		first_whole = 0;
    322		goto do_indirects;
    323	}
    324
    325	first_whole = offsets[0] + 1 - DIRECT;
    326	partial = find_shared(inode, n, offsets, chain, &nr);
    327	if (nr) {
    328		if (partial == chain)
    329			mark_inode_dirty(inode);
    330		else
    331			mark_buffer_dirty_inode(partial->bh, inode);
    332		free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
    333	}
    334	/* Clear the ends of indirect blocks on the shared branch */
    335	while (partial > chain) {
    336		free_branches(inode, partial->p + 1, block_end(partial->bh),
    337				(chain+n-1) - partial);
    338		mark_buffer_dirty_inode(partial->bh, inode);
    339		brelse (partial->bh);
    340		partial--;
    341	}
    342do_indirects:
    343	/* Kill the remaining (whole) subtrees */
    344	while (first_whole < DEPTH-1) {
    345		nr = idata[DIRECT+first_whole];
    346		if (nr) {
    347			idata[DIRECT+first_whole] = 0;
    348			mark_inode_dirty(inode);
    349			free_branches(inode, &nr, &nr+1, first_whole+1);
    350		}
    351		first_whole++;
    352	}
    353	inode->i_mtime = inode->i_ctime = current_time(inode);
    354	mark_inode_dirty(inode);
    355}
    356
    357static inline unsigned nblocks(loff_t size, struct super_block *sb)
    358{
    359	int k = sb->s_blocksize_bits - 10;
    360	unsigned blocks, res, direct = DIRECT, i = DEPTH;
    361	blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k);
    362	res = blocks;
    363	while (--i && blocks > direct) {
    364		blocks -= direct;
    365		blocks += sb->s_blocksize/sizeof(block_t) - 1;
    366		blocks /= sb->s_blocksize/sizeof(block_t);
    367		res += blocks;
    368		direct = 1;
    369	}
    370	return res;
    371}