cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

btree.c (12444B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  linux/fs/hfsplus/btree.c
      4 *
      5 * Copyright (C) 2001
      6 * Brad Boyer (flar@allandria.com)
      7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
      8 *
      9 * Handle opening/closing btree
     10 */
     11
     12#include <linux/slab.h>
     13#include <linux/pagemap.h>
     14#include <linux/log2.h>
     15
     16#include "hfsplus_fs.h"
     17#include "hfsplus_raw.h"
     18
     19/*
     20 * Initial source code of clump size calculation is gotten
     21 * from http://opensource.apple.com/tarballs/diskdev_cmds/
     22 */
     23#define CLUMP_ENTRIES	15
     24
     25static short clumptbl[CLUMP_ENTRIES * 3] = {
     26/*
     27 *	    Volume	Attributes	 Catalog	 Extents
     28 *	     Size	Clump (MB)	Clump (MB)	Clump (MB)
     29 */
     30	/*   1GB */	  4,		  4,		 4,
     31	/*   2GB */	  6,		  6,		 4,
     32	/*   4GB */	  8,		  8,		 4,
     33	/*   8GB */	 11,		 11,		 5,
     34	/*
     35	 * For volumes 16GB and larger, we want to make sure that a full OS
     36	 * install won't require fragmentation of the Catalog or Attributes
     37	 * B-trees.  We do this by making the clump sizes sufficiently large,
     38	 * and by leaving a gap after the B-trees for them to grow into.
     39	 *
     40	 * For SnowLeopard 10A298, a FullNetInstall with all packages selected
     41	 * results in:
     42	 * Catalog B-tree Header
     43	 *	nodeSize:          8192
     44	 *	totalNodes:       31616
     45	 *	freeNodes:         1978
     46	 * (used = 231.55 MB)
     47	 * Attributes B-tree Header
     48	 *	nodeSize:          8192
     49	 *	totalNodes:       63232
     50	 *	freeNodes:          958
     51	 * (used = 486.52 MB)
     52	 *
     53	 * We also want Time Machine backup volumes to have a sufficiently
     54	 * large clump size to reduce fragmentation.
     55	 *
     56	 * The series of numbers for Catalog and Attribute form a geometric
     57	 * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
     58	 * the previous term.  For Attributes (16GB to 512GB), each term is
     59	 * 4**(1/5) times the previous term.  For 1TB to 16TB, each term is
     60	 * 2**(1/5) times the previous term.
     61	 */
     62	/*  16GB */	 64,		 32,		 5,
     63	/*  32GB */	 84,		 49,		 6,
     64	/*  64GB */	111,		 74,		 7,
     65	/* 128GB */	147,		111,		 8,
     66	/* 256GB */	194,		169,		 9,
     67	/* 512GB */	256,		256,		11,
     68	/*   1TB */	294,		294,		14,
     69	/*   2TB */	338,		338,		16,
     70	/*   4TB */	388,		388,		20,
     71	/*   8TB */	446,		446,		25,
     72	/*  16TB */	512,		512,		32
     73};
     74
     75u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
     76					u64 sectors, int file_id)
     77{
     78	u32 mod = max(node_size, block_size);
     79	u32 clump_size;
     80	int column;
     81	int i;
     82
     83	/* Figure out which column of the above table to use for this file. */
     84	switch (file_id) {
     85	case HFSPLUS_ATTR_CNID:
     86		column = 0;
     87		break;
     88	case HFSPLUS_CAT_CNID:
     89		column = 1;
     90		break;
     91	default:
     92		column = 2;
     93		break;
     94	}
     95
     96	/*
     97	 * The default clump size is 0.8% of the volume size. And
     98	 * it must also be a multiple of the node and block size.
     99	 */
    100	if (sectors < 0x200000) {
    101		clump_size = sectors << 2;	/*  0.8 %  */
    102		if (clump_size < (8 * node_size))
    103			clump_size = 8 * node_size;
    104	} else {
    105		/* turn exponent into table index... */
    106		for (i = 0, sectors = sectors >> 22;
    107		     sectors && (i < CLUMP_ENTRIES - 1);
    108		     ++i, sectors = sectors >> 1) {
    109			/* empty body */
    110		}
    111
    112		clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
    113	}
    114
    115	/*
    116	 * Round the clump size to a multiple of node and block size.
    117	 * NOTE: This rounds down.
    118	 */
    119	clump_size /= mod;
    120	clump_size *= mod;
    121
    122	/*
    123	 * Rounding down could have rounded down to 0 if the block size was
    124	 * greater than the clump size.  If so, just use one block or node.
    125	 */
    126	if (clump_size == 0)
    127		clump_size = mod;
    128
    129	return clump_size;
    130}
    131
    132/* Get a reference to a B*Tree and do some initial checks */
    133struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
    134{
    135	struct hfs_btree *tree;
    136	struct hfs_btree_header_rec *head;
    137	struct address_space *mapping;
    138	struct inode *inode;
    139	struct page *page;
    140	unsigned int size;
    141
    142	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
    143	if (!tree)
    144		return NULL;
    145
    146	mutex_init(&tree->tree_lock);
    147	spin_lock_init(&tree->hash_lock);
    148	tree->sb = sb;
    149	tree->cnid = id;
    150	inode = hfsplus_iget(sb, id);
    151	if (IS_ERR(inode))
    152		goto free_tree;
    153	tree->inode = inode;
    154
    155	if (!HFSPLUS_I(tree->inode)->first_blocks) {
    156		pr_err("invalid btree extent records (0 size)\n");
    157		goto free_inode;
    158	}
    159
    160	mapping = tree->inode->i_mapping;
    161	page = read_mapping_page(mapping, 0, NULL);
    162	if (IS_ERR(page))
    163		goto free_inode;
    164
    165	/* Load the header */
    166	head = (struct hfs_btree_header_rec *)(kmap(page) +
    167		sizeof(struct hfs_bnode_desc));
    168	tree->root = be32_to_cpu(head->root);
    169	tree->leaf_count = be32_to_cpu(head->leaf_count);
    170	tree->leaf_head = be32_to_cpu(head->leaf_head);
    171	tree->leaf_tail = be32_to_cpu(head->leaf_tail);
    172	tree->node_count = be32_to_cpu(head->node_count);
    173	tree->free_nodes = be32_to_cpu(head->free_nodes);
    174	tree->attributes = be32_to_cpu(head->attributes);
    175	tree->node_size = be16_to_cpu(head->node_size);
    176	tree->max_key_len = be16_to_cpu(head->max_key_len);
    177	tree->depth = be16_to_cpu(head->depth);
    178
    179	/* Verify the tree and set the correct compare function */
    180	switch (id) {
    181	case HFSPLUS_EXT_CNID:
    182		if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
    183			pr_err("invalid extent max_key_len %d\n",
    184				tree->max_key_len);
    185			goto fail_page;
    186		}
    187		if (tree->attributes & HFS_TREE_VARIDXKEYS) {
    188			pr_err("invalid extent btree flag\n");
    189			goto fail_page;
    190		}
    191
    192		tree->keycmp = hfsplus_ext_cmp_key;
    193		break;
    194	case HFSPLUS_CAT_CNID:
    195		if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
    196			pr_err("invalid catalog max_key_len %d\n",
    197				tree->max_key_len);
    198			goto fail_page;
    199		}
    200		if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
    201			pr_err("invalid catalog btree flag\n");
    202			goto fail_page;
    203		}
    204
    205		if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
    206		    (head->key_type == HFSPLUS_KEY_BINARY))
    207			tree->keycmp = hfsplus_cat_bin_cmp_key;
    208		else {
    209			tree->keycmp = hfsplus_cat_case_cmp_key;
    210			set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
    211		}
    212		break;
    213	case HFSPLUS_ATTR_CNID:
    214		if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
    215			pr_err("invalid attributes max_key_len %d\n",
    216				tree->max_key_len);
    217			goto fail_page;
    218		}
    219		tree->keycmp = hfsplus_attr_bin_cmp_key;
    220		break;
    221	default:
    222		pr_err("unknown B*Tree requested\n");
    223		goto fail_page;
    224	}
    225
    226	if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
    227		pr_err("invalid btree flag\n");
    228		goto fail_page;
    229	}
    230
    231	size = tree->node_size;
    232	if (!is_power_of_2(size))
    233		goto fail_page;
    234	if (!tree->node_count)
    235		goto fail_page;
    236
    237	tree->node_size_shift = ffs(size) - 1;
    238
    239	tree->pages_per_bnode =
    240		(tree->node_size + PAGE_SIZE - 1) >>
    241		PAGE_SHIFT;
    242
    243	kunmap(page);
    244	put_page(page);
    245	return tree;
    246
    247 fail_page:
    248	put_page(page);
    249 free_inode:
    250	tree->inode->i_mapping->a_ops = &hfsplus_aops;
    251	iput(tree->inode);
    252 free_tree:
    253	kfree(tree);
    254	return NULL;
    255}
    256
    257/* Release resources used by a btree */
    258void hfs_btree_close(struct hfs_btree *tree)
    259{
    260	struct hfs_bnode *node;
    261	int i;
    262
    263	if (!tree)
    264		return;
    265
    266	for (i = 0; i < NODE_HASH_SIZE; i++) {
    267		while ((node = tree->node_hash[i])) {
    268			tree->node_hash[i] = node->next_hash;
    269			if (atomic_read(&node->refcnt))
    270				pr_crit("node %d:%d "
    271						"still has %d user(s)!\n",
    272					node->tree->cnid, node->this,
    273					atomic_read(&node->refcnt));
    274			hfs_bnode_free(node);
    275			tree->node_hash_cnt--;
    276		}
    277	}
    278	iput(tree->inode);
    279	kfree(tree);
    280}
    281
    282int hfs_btree_write(struct hfs_btree *tree)
    283{
    284	struct hfs_btree_header_rec *head;
    285	struct hfs_bnode *node;
    286	struct page *page;
    287
    288	node = hfs_bnode_find(tree, 0);
    289	if (IS_ERR(node))
    290		/* panic? */
    291		return -EIO;
    292	/* Load the header */
    293	page = node->page[0];
    294	head = (struct hfs_btree_header_rec *)(kmap(page) +
    295		sizeof(struct hfs_bnode_desc));
    296
    297	head->root = cpu_to_be32(tree->root);
    298	head->leaf_count = cpu_to_be32(tree->leaf_count);
    299	head->leaf_head = cpu_to_be32(tree->leaf_head);
    300	head->leaf_tail = cpu_to_be32(tree->leaf_tail);
    301	head->node_count = cpu_to_be32(tree->node_count);
    302	head->free_nodes = cpu_to_be32(tree->free_nodes);
    303	head->attributes = cpu_to_be32(tree->attributes);
    304	head->depth = cpu_to_be16(tree->depth);
    305
    306	kunmap(page);
    307	set_page_dirty(page);
    308	hfs_bnode_put(node);
    309	return 0;
    310}
    311
    312static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
    313{
    314	struct hfs_btree *tree = prev->tree;
    315	struct hfs_bnode *node;
    316	struct hfs_bnode_desc desc;
    317	__be32 cnid;
    318
    319	node = hfs_bnode_create(tree, idx);
    320	if (IS_ERR(node))
    321		return node;
    322
    323	tree->free_nodes--;
    324	prev->next = idx;
    325	cnid = cpu_to_be32(idx);
    326	hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
    327
    328	node->type = HFS_NODE_MAP;
    329	node->num_recs = 1;
    330	hfs_bnode_clear(node, 0, tree->node_size);
    331	desc.next = 0;
    332	desc.prev = 0;
    333	desc.type = HFS_NODE_MAP;
    334	desc.height = 0;
    335	desc.num_recs = cpu_to_be16(1);
    336	desc.reserved = 0;
    337	hfs_bnode_write(node, &desc, 0, sizeof(desc));
    338	hfs_bnode_write_u16(node, 14, 0x8000);
    339	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
    340	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
    341
    342	return node;
    343}
    344
    345/* Make sure @tree has enough space for the @rsvd_nodes */
    346int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
    347{
    348	struct inode *inode = tree->inode;
    349	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
    350	u32 count;
    351	int res;
    352
    353	if (rsvd_nodes <= 0)
    354		return 0;
    355
    356	while (tree->free_nodes < rsvd_nodes) {
    357		res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
    358		if (res)
    359			return res;
    360		hip->phys_size = inode->i_size =
    361			(loff_t)hip->alloc_blocks <<
    362				HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
    363		hip->fs_blocks =
    364			hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
    365		inode_set_bytes(inode, inode->i_size);
    366		count = inode->i_size >> tree->node_size_shift;
    367		tree->free_nodes += count - tree->node_count;
    368		tree->node_count = count;
    369	}
    370	return 0;
    371}
    372
    373struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
    374{
    375	struct hfs_bnode *node, *next_node;
    376	struct page **pagep;
    377	u32 nidx, idx;
    378	unsigned off;
    379	u16 off16;
    380	u16 len;
    381	u8 *data, byte, m;
    382	int i, res;
    383
    384	res = hfs_bmap_reserve(tree, 1);
    385	if (res)
    386		return ERR_PTR(res);
    387
    388	nidx = 0;
    389	node = hfs_bnode_find(tree, nidx);
    390	if (IS_ERR(node))
    391		return node;
    392	len = hfs_brec_lenoff(node, 2, &off16);
    393	off = off16;
    394
    395	off += node->page_offset;
    396	pagep = node->page + (off >> PAGE_SHIFT);
    397	data = kmap(*pagep);
    398	off &= ~PAGE_MASK;
    399	idx = 0;
    400
    401	for (;;) {
    402		while (len) {
    403			byte = data[off];
    404			if (byte != 0xff) {
    405				for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
    406					if (!(byte & m)) {
    407						idx += i;
    408						data[off] |= m;
    409						set_page_dirty(*pagep);
    410						kunmap(*pagep);
    411						tree->free_nodes--;
    412						mark_inode_dirty(tree->inode);
    413						hfs_bnode_put(node);
    414						return hfs_bnode_create(tree,
    415							idx);
    416					}
    417				}
    418			}
    419			if (++off >= PAGE_SIZE) {
    420				kunmap(*pagep);
    421				data = kmap(*++pagep);
    422				off = 0;
    423			}
    424			idx += 8;
    425			len--;
    426		}
    427		kunmap(*pagep);
    428		nidx = node->next;
    429		if (!nidx) {
    430			hfs_dbg(BNODE_MOD, "create new bmap node\n");
    431			next_node = hfs_bmap_new_bmap(node, idx);
    432		} else
    433			next_node = hfs_bnode_find(tree, nidx);
    434		hfs_bnode_put(node);
    435		if (IS_ERR(next_node))
    436			return next_node;
    437		node = next_node;
    438
    439		len = hfs_brec_lenoff(node, 0, &off16);
    440		off = off16;
    441		off += node->page_offset;
    442		pagep = node->page + (off >> PAGE_SHIFT);
    443		data = kmap(*pagep);
    444		off &= ~PAGE_MASK;
    445	}
    446}
    447
    448void hfs_bmap_free(struct hfs_bnode *node)
    449{
    450	struct hfs_btree *tree;
    451	struct page *page;
    452	u16 off, len;
    453	u32 nidx;
    454	u8 *data, byte, m;
    455
    456	hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
    457	BUG_ON(!node->this);
    458	tree = node->tree;
    459	nidx = node->this;
    460	node = hfs_bnode_find(tree, 0);
    461	if (IS_ERR(node))
    462		return;
    463	len = hfs_brec_lenoff(node, 2, &off);
    464	while (nidx >= len * 8) {
    465		u32 i;
    466
    467		nidx -= len * 8;
    468		i = node->next;
    469		if (!i) {
    470			/* panic */;
    471			pr_crit("unable to free bnode %u. "
    472					"bmap not found!\n",
    473				node->this);
    474			hfs_bnode_put(node);
    475			return;
    476		}
    477		hfs_bnode_put(node);
    478		node = hfs_bnode_find(tree, i);
    479		if (IS_ERR(node))
    480			return;
    481		if (node->type != HFS_NODE_MAP) {
    482			/* panic */;
    483			pr_crit("invalid bmap found! "
    484					"(%u,%d)\n",
    485				node->this, node->type);
    486			hfs_bnode_put(node);
    487			return;
    488		}
    489		len = hfs_brec_lenoff(node, 0, &off);
    490	}
    491	off += node->page_offset + nidx / 8;
    492	page = node->page[off >> PAGE_SHIFT];
    493	data = kmap(page);
    494	off &= ~PAGE_MASK;
    495	m = 1 << (~nidx & 7);
    496	byte = data[off];
    497	if (!(byte & m)) {
    498		pr_crit("trying to free free bnode "
    499				"%u(%d)\n",
    500			node->this, node->type);
    501		kunmap(page);
    502		hfs_bnode_put(node);
    503		return;
    504	}
    505	data[off] = byte & ~m;
    506	set_page_dirty(page);
    507	kunmap(page);
    508	hfs_bnode_put(node);
    509	tree->free_nodes++;
    510	mark_inode_dirty(tree->inode);
    511}