cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bnode.c (11924B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  linux/fs/hfs/bnode.c
      4 *
      5 * Copyright (C) 2001
      6 * Brad Boyer (flar@allandria.com)
      7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
      8 *
      9 * Handle basic btree node operations
     10 */
     11
     12#include <linux/pagemap.h>
     13#include <linux/slab.h>
     14#include <linux/swap.h>
     15
     16#include "btree.h"
     17
     18void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
     19{
     20	struct page *page;
     21	int pagenum;
     22	int bytes_read;
     23	int bytes_to_read;
     24	void *vaddr;
     25
     26	off += node->page_offset;
     27	pagenum = off >> PAGE_SHIFT;
     28	off &= ~PAGE_MASK; /* compute page offset for the first page */
     29
     30	for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
     31		if (pagenum >= node->tree->pages_per_bnode)
     32			break;
     33		page = node->page[pagenum];
     34		bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
     35
     36		vaddr = kmap_atomic(page);
     37		memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
     38		kunmap_atomic(vaddr);
     39
     40		pagenum++;
     41		off = 0; /* page offset only applies to the first page */
     42	}
     43}
     44
     45u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
     46{
     47	__be16 data;
     48	// optimize later...
     49	hfs_bnode_read(node, &data, off, 2);
     50	return be16_to_cpu(data);
     51}
     52
     53u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
     54{
     55	u8 data;
     56	// optimize later...
     57	hfs_bnode_read(node, &data, off, 1);
     58	return data;
     59}
     60
     61void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
     62{
     63	struct hfs_btree *tree;
     64	int key_len;
     65
     66	tree = node->tree;
     67	if (node->type == HFS_NODE_LEAF ||
     68	    tree->attributes & HFS_TREE_VARIDXKEYS)
     69		key_len = hfs_bnode_read_u8(node, off) + 1;
     70	else
     71		key_len = tree->max_key_len + 1;
     72
     73	hfs_bnode_read(node, key, off, key_len);
     74}
     75
     76void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
     77{
     78	struct page *page;
     79
     80	off += node->page_offset;
     81	page = node->page[0];
     82
     83	memcpy(kmap(page) + off, buf, len);
     84	kunmap(page);
     85	set_page_dirty(page);
     86}
     87
     88void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
     89{
     90	__be16 v = cpu_to_be16(data);
     91	// optimize later...
     92	hfs_bnode_write(node, &v, off, 2);
     93}
     94
     95void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data)
     96{
     97	// optimize later...
     98	hfs_bnode_write(node, &data, off, 1);
     99}
    100
    101void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
    102{
    103	struct page *page;
    104
    105	off += node->page_offset;
    106	page = node->page[0];
    107
    108	memset(kmap(page) + off, 0, len);
    109	kunmap(page);
    110	set_page_dirty(page);
    111}
    112
    113void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
    114		struct hfs_bnode *src_node, int src, int len)
    115{
    116	struct page *src_page, *dst_page;
    117
    118	hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
    119	if (!len)
    120		return;
    121	src += src_node->page_offset;
    122	dst += dst_node->page_offset;
    123	src_page = src_node->page[0];
    124	dst_page = dst_node->page[0];
    125
    126	memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len);
    127	kunmap(src_page);
    128	kunmap(dst_page);
    129	set_page_dirty(dst_page);
    130}
    131
    132void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
    133{
    134	struct page *page;
    135	void *ptr;
    136
    137	hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
    138	if (!len)
    139		return;
    140	src += node->page_offset;
    141	dst += node->page_offset;
    142	page = node->page[0];
    143	ptr = kmap(page);
    144	memmove(ptr + dst, ptr + src, len);
    145	kunmap(page);
    146	set_page_dirty(page);
    147}
    148
    149void hfs_bnode_dump(struct hfs_bnode *node)
    150{
    151	struct hfs_bnode_desc desc;
    152	__be32 cnid;
    153	int i, off, key_off;
    154
    155	hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
    156	hfs_bnode_read(node, &desc, 0, sizeof(desc));
    157	hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
    158		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
    159		desc.type, desc.height, be16_to_cpu(desc.num_recs));
    160
    161	off = node->tree->node_size - 2;
    162	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
    163		key_off = hfs_bnode_read_u16(node, off);
    164		hfs_dbg_cont(BNODE_MOD, " %d", key_off);
    165		if (i && node->type == HFS_NODE_INDEX) {
    166			int tmp;
    167
    168			if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
    169				tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1;
    170			else
    171				tmp = node->tree->max_key_len + 1;
    172			hfs_dbg_cont(BNODE_MOD, " (%d,%d",
    173				     tmp, hfs_bnode_read_u8(node, key_off));
    174			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
    175			hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
    176		} else if (i && node->type == HFS_NODE_LEAF) {
    177			int tmp;
    178
    179			tmp = hfs_bnode_read_u8(node, key_off);
    180			hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
    181		}
    182	}
    183	hfs_dbg_cont(BNODE_MOD, "\n");
    184}
    185
    186void hfs_bnode_unlink(struct hfs_bnode *node)
    187{
    188	struct hfs_btree *tree;
    189	struct hfs_bnode *tmp;
    190	__be32 cnid;
    191
    192	tree = node->tree;
    193	if (node->prev) {
    194		tmp = hfs_bnode_find(tree, node->prev);
    195		if (IS_ERR(tmp))
    196			return;
    197		tmp->next = node->next;
    198		cnid = cpu_to_be32(tmp->next);
    199		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
    200		hfs_bnode_put(tmp);
    201	} else if (node->type == HFS_NODE_LEAF)
    202		tree->leaf_head = node->next;
    203
    204	if (node->next) {
    205		tmp = hfs_bnode_find(tree, node->next);
    206		if (IS_ERR(tmp))
    207			return;
    208		tmp->prev = node->prev;
    209		cnid = cpu_to_be32(tmp->prev);
    210		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4);
    211		hfs_bnode_put(tmp);
    212	} else if (node->type == HFS_NODE_LEAF)
    213		tree->leaf_tail = node->prev;
    214
    215	// move down?
    216	if (!node->prev && !node->next) {
    217		printk(KERN_DEBUG "hfs_btree_del_level\n");
    218	}
    219	if (!node->parent) {
    220		tree->root = 0;
    221		tree->depth = 0;
    222	}
    223	set_bit(HFS_BNODE_DELETED, &node->flags);
    224}
    225
    226static inline int hfs_bnode_hash(u32 num)
    227{
    228	num = (num >> 16) + num;
    229	num += num >> 8;
    230	return num & (NODE_HASH_SIZE - 1);
    231}
    232
    233struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
    234{
    235	struct hfs_bnode *node;
    236
    237	if (cnid >= tree->node_count) {
    238		pr_err("request for non-existent node %d in B*Tree\n", cnid);
    239		return NULL;
    240	}
    241
    242	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
    243	     node; node = node->next_hash) {
    244		if (node->this == cnid) {
    245			return node;
    246		}
    247	}
    248	return NULL;
    249}
    250
    251static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
    252{
    253	struct hfs_bnode *node, *node2;
    254	struct address_space *mapping;
    255	struct page *page;
    256	int size, block, i, hash;
    257	loff_t off;
    258
    259	if (cnid >= tree->node_count) {
    260		pr_err("request for non-existent node %d in B*Tree\n", cnid);
    261		return NULL;
    262	}
    263
    264	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
    265		sizeof(struct page *);
    266	node = kzalloc(size, GFP_KERNEL);
    267	if (!node)
    268		return NULL;
    269	node->tree = tree;
    270	node->this = cnid;
    271	set_bit(HFS_BNODE_NEW, &node->flags);
    272	atomic_set(&node->refcnt, 1);
    273	hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
    274		node->tree->cnid, node->this);
    275	init_waitqueue_head(&node->lock_wq);
    276	spin_lock(&tree->hash_lock);
    277	node2 = hfs_bnode_findhash(tree, cnid);
    278	if (!node2) {
    279		hash = hfs_bnode_hash(cnid);
    280		node->next_hash = tree->node_hash[hash];
    281		tree->node_hash[hash] = node;
    282		tree->node_hash_cnt++;
    283	} else {
    284		spin_unlock(&tree->hash_lock);
    285		kfree(node);
    286		wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
    287		return node2;
    288	}
    289	spin_unlock(&tree->hash_lock);
    290
    291	mapping = tree->inode->i_mapping;
    292	off = (loff_t)cnid * tree->node_size;
    293	block = off >> PAGE_SHIFT;
    294	node->page_offset = off & ~PAGE_MASK;
    295	for (i = 0; i < tree->pages_per_bnode; i++) {
    296		page = read_mapping_page(mapping, block++, NULL);
    297		if (IS_ERR(page))
    298			goto fail;
    299		if (PageError(page)) {
    300			put_page(page);
    301			goto fail;
    302		}
    303		node->page[i] = page;
    304	}
    305
    306	return node;
    307fail:
    308	set_bit(HFS_BNODE_ERROR, &node->flags);
    309	return node;
    310}
    311
    312void hfs_bnode_unhash(struct hfs_bnode *node)
    313{
    314	struct hfs_bnode **p;
    315
    316	hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
    317		node->tree->cnid, node->this, atomic_read(&node->refcnt));
    318	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
    319	     *p && *p != node; p = &(*p)->next_hash)
    320		;
    321	BUG_ON(!*p);
    322	*p = node->next_hash;
    323	node->tree->node_hash_cnt--;
    324}
    325
    326/* Load a particular node out of a tree */
    327struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
    328{
    329	struct hfs_bnode *node;
    330	struct hfs_bnode_desc *desc;
    331	int i, rec_off, off, next_off;
    332	int entry_size, key_size;
    333
    334	spin_lock(&tree->hash_lock);
    335	node = hfs_bnode_findhash(tree, num);
    336	if (node) {
    337		hfs_bnode_get(node);
    338		spin_unlock(&tree->hash_lock);
    339		wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
    340		if (test_bit(HFS_BNODE_ERROR, &node->flags))
    341			goto node_error;
    342		return node;
    343	}
    344	spin_unlock(&tree->hash_lock);
    345	node = __hfs_bnode_create(tree, num);
    346	if (!node)
    347		return ERR_PTR(-ENOMEM);
    348	if (test_bit(HFS_BNODE_ERROR, &node->flags))
    349		goto node_error;
    350	if (!test_bit(HFS_BNODE_NEW, &node->flags))
    351		return node;
    352
    353	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
    354	node->prev = be32_to_cpu(desc->prev);
    355	node->next = be32_to_cpu(desc->next);
    356	node->num_recs = be16_to_cpu(desc->num_recs);
    357	node->type = desc->type;
    358	node->height = desc->height;
    359	kunmap(node->page[0]);
    360
    361	switch (node->type) {
    362	case HFS_NODE_HEADER:
    363	case HFS_NODE_MAP:
    364		if (node->height != 0)
    365			goto node_error;
    366		break;
    367	case HFS_NODE_LEAF:
    368		if (node->height != 1)
    369			goto node_error;
    370		break;
    371	case HFS_NODE_INDEX:
    372		if (node->height <= 1 || node->height > tree->depth)
    373			goto node_error;
    374		break;
    375	default:
    376		goto node_error;
    377	}
    378
    379	rec_off = tree->node_size - 2;
    380	off = hfs_bnode_read_u16(node, rec_off);
    381	if (off != sizeof(struct hfs_bnode_desc))
    382		goto node_error;
    383	for (i = 1; i <= node->num_recs; off = next_off, i++) {
    384		rec_off -= 2;
    385		next_off = hfs_bnode_read_u16(node, rec_off);
    386		if (next_off <= off ||
    387		    next_off > tree->node_size ||
    388		    next_off & 1)
    389			goto node_error;
    390		entry_size = next_off - off;
    391		if (node->type != HFS_NODE_INDEX &&
    392		    node->type != HFS_NODE_LEAF)
    393			continue;
    394		key_size = hfs_bnode_read_u8(node, off) + 1;
    395		if (key_size >= entry_size /*|| key_size & 1*/)
    396			goto node_error;
    397	}
    398	clear_bit(HFS_BNODE_NEW, &node->flags);
    399	wake_up(&node->lock_wq);
    400	return node;
    401
    402node_error:
    403	set_bit(HFS_BNODE_ERROR, &node->flags);
    404	clear_bit(HFS_BNODE_NEW, &node->flags);
    405	wake_up(&node->lock_wq);
    406	hfs_bnode_put(node);
    407	return ERR_PTR(-EIO);
    408}
    409
    410void hfs_bnode_free(struct hfs_bnode *node)
    411{
    412	int i;
    413
    414	for (i = 0; i < node->tree->pages_per_bnode; i++)
    415		if (node->page[i])
    416			put_page(node->page[i]);
    417	kfree(node);
    418}
    419
    420struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
    421{
    422	struct hfs_bnode *node;
    423	struct page **pagep;
    424	int i;
    425
    426	spin_lock(&tree->hash_lock);
    427	node = hfs_bnode_findhash(tree, num);
    428	spin_unlock(&tree->hash_lock);
    429	if (node) {
    430		pr_crit("new node %u already hashed?\n", num);
    431		WARN_ON(1);
    432		return node;
    433	}
    434	node = __hfs_bnode_create(tree, num);
    435	if (!node)
    436		return ERR_PTR(-ENOMEM);
    437	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
    438		hfs_bnode_put(node);
    439		return ERR_PTR(-EIO);
    440	}
    441
    442	pagep = node->page;
    443	memset(kmap(*pagep) + node->page_offset, 0,
    444	       min((int)PAGE_SIZE, (int)tree->node_size));
    445	set_page_dirty(*pagep);
    446	kunmap(*pagep);
    447	for (i = 1; i < tree->pages_per_bnode; i++) {
    448		memset(kmap(*++pagep), 0, PAGE_SIZE);
    449		set_page_dirty(*pagep);
    450		kunmap(*pagep);
    451	}
    452	clear_bit(HFS_BNODE_NEW, &node->flags);
    453	wake_up(&node->lock_wq);
    454
    455	return node;
    456}
    457
    458void hfs_bnode_get(struct hfs_bnode *node)
    459{
    460	if (node) {
    461		atomic_inc(&node->refcnt);
    462		hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
    463			node->tree->cnid, node->this,
    464			atomic_read(&node->refcnt));
    465	}
    466}
    467
    468/* Dispose of resources used by a node */
    469void hfs_bnode_put(struct hfs_bnode *node)
    470{
    471	if (node) {
    472		struct hfs_btree *tree = node->tree;
    473		int i;
    474
    475		hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
    476			node->tree->cnid, node->this,
    477			atomic_read(&node->refcnt));
    478		BUG_ON(!atomic_read(&node->refcnt));
    479		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
    480			return;
    481		for (i = 0; i < tree->pages_per_bnode; i++) {
    482			if (!node->page[i])
    483				continue;
    484			mark_page_accessed(node->page[i]);
    485		}
    486
    487		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
    488			hfs_bnode_unhash(node);
    489			spin_unlock(&tree->hash_lock);
    490			hfs_bmap_free(node);
    491			hfs_bnode_free(node);
    492			return;
    493		}
    494		spin_unlock(&tree->hash_lock);
    495	}
    496}