cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ref-verify.c (25507B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2014 Facebook.  All rights reserved.
      4 */
      5
      6#include <linux/sched.h>
      7#include <linux/stacktrace.h>
      8#include "ctree.h"
      9#include "disk-io.h"
     10#include "locking.h"
     11#include "delayed-ref.h"
     12#include "ref-verify.h"
     13
     14/*
     15 * Used to keep track the roots and number of refs each root has for a given
     16 * bytenr.  This just tracks the number of direct references, no shared
     17 * references.
     18 */
     19struct root_entry {
     20	u64 root_objectid;
     21	u64 num_refs;
     22	struct rb_node node;
     23};
     24
     25/*
     26 * These are meant to represent what should exist in the extent tree, these can
     27 * be used to verify the extent tree is consistent as these should all match
     28 * what the extent tree says.
     29 */
     30struct ref_entry {
     31	u64 root_objectid;
     32	u64 parent;
     33	u64 owner;
     34	u64 offset;
     35	u64 num_refs;
     36	struct rb_node node;
     37};
     38
     39#define MAX_TRACE	16
     40
     41/*
     42 * Whenever we add/remove a reference we record the action.  The action maps
     43 * back to the delayed ref action.  We hold the ref we are changing in the
     44 * action so we can account for the history properly, and we record the root we
     45 * were called with since it could be different from ref_root.  We also store
     46 * stack traces because that's how I roll.
     47 */
     48struct ref_action {
     49	int action;
     50	u64 root;
     51	struct ref_entry ref;
     52	struct list_head list;
     53	unsigned long trace[MAX_TRACE];
     54	unsigned int trace_len;
     55};
     56
     57/*
     58 * One of these for every block we reference, it holds the roots and references
     59 * to it as well as all of the ref actions that have occurred to it.  We never
     60 * free it until we unmount the file system in order to make sure re-allocations
     61 * are happening properly.
     62 */
     63struct block_entry {
     64	u64 bytenr;
     65	u64 len;
     66	u64 num_refs;
     67	int metadata;
     68	int from_disk;
     69	struct rb_root roots;
     70	struct rb_root refs;
     71	struct rb_node node;
     72	struct list_head actions;
     73};
     74
     75static struct block_entry *insert_block_entry(struct rb_root *root,
     76					      struct block_entry *be)
     77{
     78	struct rb_node **p = &root->rb_node;
     79	struct rb_node *parent_node = NULL;
     80	struct block_entry *entry;
     81
     82	while (*p) {
     83		parent_node = *p;
     84		entry = rb_entry(parent_node, struct block_entry, node);
     85		if (entry->bytenr > be->bytenr)
     86			p = &(*p)->rb_left;
     87		else if (entry->bytenr < be->bytenr)
     88			p = &(*p)->rb_right;
     89		else
     90			return entry;
     91	}
     92
     93	rb_link_node(&be->node, parent_node, p);
     94	rb_insert_color(&be->node, root);
     95	return NULL;
     96}
     97
     98static struct block_entry *lookup_block_entry(struct rb_root *root, u64 bytenr)
     99{
    100	struct rb_node *n;
    101	struct block_entry *entry = NULL;
    102
    103	n = root->rb_node;
    104	while (n) {
    105		entry = rb_entry(n, struct block_entry, node);
    106		if (entry->bytenr < bytenr)
    107			n = n->rb_right;
    108		else if (entry->bytenr > bytenr)
    109			n = n->rb_left;
    110		else
    111			return entry;
    112	}
    113	return NULL;
    114}
    115
    116static struct root_entry *insert_root_entry(struct rb_root *root,
    117					    struct root_entry *re)
    118{
    119	struct rb_node **p = &root->rb_node;
    120	struct rb_node *parent_node = NULL;
    121	struct root_entry *entry;
    122
    123	while (*p) {
    124		parent_node = *p;
    125		entry = rb_entry(parent_node, struct root_entry, node);
    126		if (entry->root_objectid > re->root_objectid)
    127			p = &(*p)->rb_left;
    128		else if (entry->root_objectid < re->root_objectid)
    129			p = &(*p)->rb_right;
    130		else
    131			return entry;
    132	}
    133
    134	rb_link_node(&re->node, parent_node, p);
    135	rb_insert_color(&re->node, root);
    136	return NULL;
    137
    138}
    139
    140static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2)
    141{
    142	if (ref1->root_objectid < ref2->root_objectid)
    143		return -1;
    144	if (ref1->root_objectid > ref2->root_objectid)
    145		return 1;
    146	if (ref1->parent < ref2->parent)
    147		return -1;
    148	if (ref1->parent > ref2->parent)
    149		return 1;
    150	if (ref1->owner < ref2->owner)
    151		return -1;
    152	if (ref1->owner > ref2->owner)
    153		return 1;
    154	if (ref1->offset < ref2->offset)
    155		return -1;
    156	if (ref1->offset > ref2->offset)
    157		return 1;
    158	return 0;
    159}
    160
    161static struct ref_entry *insert_ref_entry(struct rb_root *root,
    162					  struct ref_entry *ref)
    163{
    164	struct rb_node **p = &root->rb_node;
    165	struct rb_node *parent_node = NULL;
    166	struct ref_entry *entry;
    167	int cmp;
    168
    169	while (*p) {
    170		parent_node = *p;
    171		entry = rb_entry(parent_node, struct ref_entry, node);
    172		cmp = comp_refs(entry, ref);
    173		if (cmp > 0)
    174			p = &(*p)->rb_left;
    175		else if (cmp < 0)
    176			p = &(*p)->rb_right;
    177		else
    178			return entry;
    179	}
    180
    181	rb_link_node(&ref->node, parent_node, p);
    182	rb_insert_color(&ref->node, root);
    183	return NULL;
    184
    185}
    186
    187static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
    188{
    189	struct rb_node *n;
    190	struct root_entry *entry = NULL;
    191
    192	n = root->rb_node;
    193	while (n) {
    194		entry = rb_entry(n, struct root_entry, node);
    195		if (entry->root_objectid < objectid)
    196			n = n->rb_right;
    197		else if (entry->root_objectid > objectid)
    198			n = n->rb_left;
    199		else
    200			return entry;
    201	}
    202	return NULL;
    203}
    204
    205#ifdef CONFIG_STACKTRACE
    206static void __save_stack_trace(struct ref_action *ra)
    207{
    208	ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2);
    209}
    210
    211static void __print_stack_trace(struct btrfs_fs_info *fs_info,
    212				struct ref_action *ra)
    213{
    214	if (ra->trace_len == 0) {
    215		btrfs_err(fs_info, "  ref-verify: no stacktrace");
    216		return;
    217	}
    218	stack_trace_print(ra->trace, ra->trace_len, 2);
    219}
    220#else
    221static inline void __save_stack_trace(struct ref_action *ra)
    222{
    223}
    224
    225static inline void __print_stack_trace(struct btrfs_fs_info *fs_info,
    226				       struct ref_action *ra)
    227{
    228	btrfs_err(fs_info, "  ref-verify: no stacktrace support");
    229}
    230#endif
    231
    232static void free_block_entry(struct block_entry *be)
    233{
    234	struct root_entry *re;
    235	struct ref_entry *ref;
    236	struct ref_action *ra;
    237	struct rb_node *n;
    238
    239	while ((n = rb_first(&be->roots))) {
    240		re = rb_entry(n, struct root_entry, node);
    241		rb_erase(&re->node, &be->roots);
    242		kfree(re);
    243	}
    244
    245	while((n = rb_first(&be->refs))) {
    246		ref = rb_entry(n, struct ref_entry, node);
    247		rb_erase(&ref->node, &be->refs);
    248		kfree(ref);
    249	}
    250
    251	while (!list_empty(&be->actions)) {
    252		ra = list_first_entry(&be->actions, struct ref_action,
    253				      list);
    254		list_del(&ra->list);
    255		kfree(ra);
    256	}
    257	kfree(be);
    258}
    259
    260static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
    261					   u64 bytenr, u64 len,
    262					   u64 root_objectid)
    263{
    264	struct block_entry *be = NULL, *exist;
    265	struct root_entry *re = NULL;
    266
    267	re = kzalloc(sizeof(struct root_entry), GFP_NOFS);
    268	be = kzalloc(sizeof(struct block_entry), GFP_NOFS);
    269	if (!be || !re) {
    270		kfree(re);
    271		kfree(be);
    272		return ERR_PTR(-ENOMEM);
    273	}
    274	be->bytenr = bytenr;
    275	be->len = len;
    276
    277	re->root_objectid = root_objectid;
    278	re->num_refs = 0;
    279
    280	spin_lock(&fs_info->ref_verify_lock);
    281	exist = insert_block_entry(&fs_info->block_tree, be);
    282	if (exist) {
    283		if (root_objectid) {
    284			struct root_entry *exist_re;
    285
    286			exist_re = insert_root_entry(&exist->roots, re);
    287			if (exist_re)
    288				kfree(re);
    289		} else {
    290			kfree(re);
    291		}
    292		kfree(be);
    293		return exist;
    294	}
    295
    296	be->num_refs = 0;
    297	be->metadata = 0;
    298	be->from_disk = 0;
    299	be->roots = RB_ROOT;
    300	be->refs = RB_ROOT;
    301	INIT_LIST_HEAD(&be->actions);
    302	if (root_objectid)
    303		insert_root_entry(&be->roots, re);
    304	else
    305		kfree(re);
    306	return be;
    307}
    308
    309static int add_tree_block(struct btrfs_fs_info *fs_info, u64 ref_root,
    310			  u64 parent, u64 bytenr, int level)
    311{
    312	struct block_entry *be;
    313	struct root_entry *re;
    314	struct ref_entry *ref = NULL, *exist;
    315
    316	ref = kmalloc(sizeof(struct ref_entry), GFP_NOFS);
    317	if (!ref)
    318		return -ENOMEM;
    319
    320	if (parent)
    321		ref->root_objectid = 0;
    322	else
    323		ref->root_objectid = ref_root;
    324	ref->parent = parent;
    325	ref->owner = level;
    326	ref->offset = 0;
    327	ref->num_refs = 1;
    328
    329	be = add_block_entry(fs_info, bytenr, fs_info->nodesize, ref_root);
    330	if (IS_ERR(be)) {
    331		kfree(ref);
    332		return PTR_ERR(be);
    333	}
    334	be->num_refs++;
    335	be->from_disk = 1;
    336	be->metadata = 1;
    337
    338	if (!parent) {
    339		ASSERT(ref_root);
    340		re = lookup_root_entry(&be->roots, ref_root);
    341		ASSERT(re);
    342		re->num_refs++;
    343	}
    344	exist = insert_ref_entry(&be->refs, ref);
    345	if (exist) {
    346		exist->num_refs++;
    347		kfree(ref);
    348	}
    349	spin_unlock(&fs_info->ref_verify_lock);
    350
    351	return 0;
    352}
    353
    354static int add_shared_data_ref(struct btrfs_fs_info *fs_info,
    355			       u64 parent, u32 num_refs, u64 bytenr,
    356			       u64 num_bytes)
    357{
    358	struct block_entry *be;
    359	struct ref_entry *ref;
    360
    361	ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS);
    362	if (!ref)
    363		return -ENOMEM;
    364	be = add_block_entry(fs_info, bytenr, num_bytes, 0);
    365	if (IS_ERR(be)) {
    366		kfree(ref);
    367		return PTR_ERR(be);
    368	}
    369	be->num_refs += num_refs;
    370
    371	ref->parent = parent;
    372	ref->num_refs = num_refs;
    373	if (insert_ref_entry(&be->refs, ref)) {
    374		spin_unlock(&fs_info->ref_verify_lock);
    375		btrfs_err(fs_info, "existing shared ref when reading from disk?");
    376		kfree(ref);
    377		return -EINVAL;
    378	}
    379	spin_unlock(&fs_info->ref_verify_lock);
    380	return 0;
    381}
    382
    383static int add_extent_data_ref(struct btrfs_fs_info *fs_info,
    384			       struct extent_buffer *leaf,
    385			       struct btrfs_extent_data_ref *dref,
    386			       u64 bytenr, u64 num_bytes)
    387{
    388	struct block_entry *be;
    389	struct ref_entry *ref;
    390	struct root_entry *re;
    391	u64 ref_root = btrfs_extent_data_ref_root(leaf, dref);
    392	u64 owner = btrfs_extent_data_ref_objectid(leaf, dref);
    393	u64 offset = btrfs_extent_data_ref_offset(leaf, dref);
    394	u32 num_refs = btrfs_extent_data_ref_count(leaf, dref);
    395
    396	ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS);
    397	if (!ref)
    398		return -ENOMEM;
    399	be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
    400	if (IS_ERR(be)) {
    401		kfree(ref);
    402		return PTR_ERR(be);
    403	}
    404	be->num_refs += num_refs;
    405
    406	ref->parent = 0;
    407	ref->owner = owner;
    408	ref->root_objectid = ref_root;
    409	ref->offset = offset;
    410	ref->num_refs = num_refs;
    411	if (insert_ref_entry(&be->refs, ref)) {
    412		spin_unlock(&fs_info->ref_verify_lock);
    413		btrfs_err(fs_info, "existing ref when reading from disk?");
    414		kfree(ref);
    415		return -EINVAL;
    416	}
    417
    418	re = lookup_root_entry(&be->roots, ref_root);
    419	if (!re) {
    420		spin_unlock(&fs_info->ref_verify_lock);
    421		btrfs_err(fs_info, "missing root in new block entry?");
    422		return -EINVAL;
    423	}
    424	re->num_refs += num_refs;
    425	spin_unlock(&fs_info->ref_verify_lock);
    426	return 0;
    427}
    428
    429static int process_extent_item(struct btrfs_fs_info *fs_info,
    430			       struct btrfs_path *path, struct btrfs_key *key,
    431			       int slot, int *tree_block_level)
    432{
    433	struct btrfs_extent_item *ei;
    434	struct btrfs_extent_inline_ref *iref;
    435	struct btrfs_extent_data_ref *dref;
    436	struct btrfs_shared_data_ref *sref;
    437	struct extent_buffer *leaf = path->nodes[0];
    438	u32 item_size = btrfs_item_size(leaf, slot);
    439	unsigned long end, ptr;
    440	u64 offset, flags, count;
    441	int type, ret;
    442
    443	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
    444	flags = btrfs_extent_flags(leaf, ei);
    445
    446	if ((key->type == BTRFS_EXTENT_ITEM_KEY) &&
    447	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
    448		struct btrfs_tree_block_info *info;
    449
    450		info = (struct btrfs_tree_block_info *)(ei + 1);
    451		*tree_block_level = btrfs_tree_block_level(leaf, info);
    452		iref = (struct btrfs_extent_inline_ref *)(info + 1);
    453	} else {
    454		if (key->type == BTRFS_METADATA_ITEM_KEY)
    455			*tree_block_level = key->offset;
    456		iref = (struct btrfs_extent_inline_ref *)(ei + 1);
    457	}
    458
    459	ptr = (unsigned long)iref;
    460	end = (unsigned long)ei + item_size;
    461	while (ptr < end) {
    462		iref = (struct btrfs_extent_inline_ref *)ptr;
    463		type = btrfs_extent_inline_ref_type(leaf, iref);
    464		offset = btrfs_extent_inline_ref_offset(leaf, iref);
    465		switch (type) {
    466		case BTRFS_TREE_BLOCK_REF_KEY:
    467			ret = add_tree_block(fs_info, offset, 0, key->objectid,
    468					     *tree_block_level);
    469			break;
    470		case BTRFS_SHARED_BLOCK_REF_KEY:
    471			ret = add_tree_block(fs_info, 0, offset, key->objectid,
    472					     *tree_block_level);
    473			break;
    474		case BTRFS_EXTENT_DATA_REF_KEY:
    475			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
    476			ret = add_extent_data_ref(fs_info, leaf, dref,
    477						  key->objectid, key->offset);
    478			break;
    479		case BTRFS_SHARED_DATA_REF_KEY:
    480			sref = (struct btrfs_shared_data_ref *)(iref + 1);
    481			count = btrfs_shared_data_ref_count(leaf, sref);
    482			ret = add_shared_data_ref(fs_info, offset, count,
    483						  key->objectid, key->offset);
    484			break;
    485		default:
    486			btrfs_err(fs_info, "invalid key type in iref");
    487			ret = -EINVAL;
    488			break;
    489		}
    490		if (ret)
    491			break;
    492		ptr += btrfs_extent_inline_ref_size(type);
    493	}
    494	return ret;
    495}
    496
    497static int process_leaf(struct btrfs_root *root,
    498			struct btrfs_path *path, u64 *bytenr, u64 *num_bytes,
    499			int *tree_block_level)
    500{
    501	struct btrfs_fs_info *fs_info = root->fs_info;
    502	struct extent_buffer *leaf = path->nodes[0];
    503	struct btrfs_extent_data_ref *dref;
    504	struct btrfs_shared_data_ref *sref;
    505	u32 count;
    506	int i = 0, ret = 0;
    507	struct btrfs_key key;
    508	int nritems = btrfs_header_nritems(leaf);
    509
    510	for (i = 0; i < nritems; i++) {
    511		btrfs_item_key_to_cpu(leaf, &key, i);
    512		switch (key.type) {
    513		case BTRFS_EXTENT_ITEM_KEY:
    514			*num_bytes = key.offset;
    515			fallthrough;
    516		case BTRFS_METADATA_ITEM_KEY:
    517			*bytenr = key.objectid;
    518			ret = process_extent_item(fs_info, path, &key, i,
    519						  tree_block_level);
    520			break;
    521		case BTRFS_TREE_BLOCK_REF_KEY:
    522			ret = add_tree_block(fs_info, key.offset, 0,
    523					     key.objectid, *tree_block_level);
    524			break;
    525		case BTRFS_SHARED_BLOCK_REF_KEY:
    526			ret = add_tree_block(fs_info, 0, key.offset,
    527					     key.objectid, *tree_block_level);
    528			break;
    529		case BTRFS_EXTENT_DATA_REF_KEY:
    530			dref = btrfs_item_ptr(leaf, i,
    531					      struct btrfs_extent_data_ref);
    532			ret = add_extent_data_ref(fs_info, leaf, dref, *bytenr,
    533						  *num_bytes);
    534			break;
    535		case BTRFS_SHARED_DATA_REF_KEY:
    536			sref = btrfs_item_ptr(leaf, i,
    537					      struct btrfs_shared_data_ref);
    538			count = btrfs_shared_data_ref_count(leaf, sref);
    539			ret = add_shared_data_ref(fs_info, key.offset, count,
    540						  *bytenr, *num_bytes);
    541			break;
    542		default:
    543			break;
    544		}
    545		if (ret)
    546			break;
    547	}
    548	return ret;
    549}
    550
    551/* Walk down to the leaf from the given level */
    552static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
    553			  int level, u64 *bytenr, u64 *num_bytes,
    554			  int *tree_block_level)
    555{
    556	struct extent_buffer *eb;
    557	int ret = 0;
    558
    559	while (level >= 0) {
    560		if (level) {
    561			eb = btrfs_read_node_slot(path->nodes[level],
    562						  path->slots[level]);
    563			if (IS_ERR(eb))
    564				return PTR_ERR(eb);
    565			btrfs_tree_read_lock(eb);
    566			path->nodes[level-1] = eb;
    567			path->slots[level-1] = 0;
    568			path->locks[level-1] = BTRFS_READ_LOCK;
    569		} else {
    570			ret = process_leaf(root, path, bytenr, num_bytes,
    571					   tree_block_level);
    572			if (ret)
    573				break;
    574		}
    575		level--;
    576	}
    577	return ret;
    578}
    579
    580/* Walk up to the next node that needs to be processed */
    581static int walk_up_tree(struct btrfs_path *path, int *level)
    582{
    583	int l;
    584
    585	for (l = 0; l < BTRFS_MAX_LEVEL; l++) {
    586		if (!path->nodes[l])
    587			continue;
    588		if (l) {
    589			path->slots[l]++;
    590			if (path->slots[l] <
    591			    btrfs_header_nritems(path->nodes[l])) {
    592				*level = l;
    593				return 0;
    594			}
    595		}
    596		btrfs_tree_unlock_rw(path->nodes[l], path->locks[l]);
    597		free_extent_buffer(path->nodes[l]);
    598		path->nodes[l] = NULL;
    599		path->slots[l] = 0;
    600		path->locks[l] = 0;
    601	}
    602
    603	return 1;
    604}
    605
    606static void dump_ref_action(struct btrfs_fs_info *fs_info,
    607			    struct ref_action *ra)
    608{
    609	btrfs_err(fs_info,
    610"  Ref action %d, root %llu, ref_root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
    611		  ra->action, ra->root, ra->ref.root_objectid, ra->ref.parent,
    612		  ra->ref.owner, ra->ref.offset, ra->ref.num_refs);
    613	__print_stack_trace(fs_info, ra);
    614}
    615
    616/*
    617 * Dumps all the information from the block entry to printk, it's going to be
    618 * awesome.
    619 */
    620static void dump_block_entry(struct btrfs_fs_info *fs_info,
    621			     struct block_entry *be)
    622{
    623	struct ref_entry *ref;
    624	struct root_entry *re;
    625	struct ref_action *ra;
    626	struct rb_node *n;
    627
    628	btrfs_err(fs_info,
    629"dumping block entry [%llu %llu], num_refs %llu, metadata %d, from disk %d",
    630		  be->bytenr, be->len, be->num_refs, be->metadata,
    631		  be->from_disk);
    632
    633	for (n = rb_first(&be->refs); n; n = rb_next(n)) {
    634		ref = rb_entry(n, struct ref_entry, node);
    635		btrfs_err(fs_info,
    636"  ref root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
    637			  ref->root_objectid, ref->parent, ref->owner,
    638			  ref->offset, ref->num_refs);
    639	}
    640
    641	for (n = rb_first(&be->roots); n; n = rb_next(n)) {
    642		re = rb_entry(n, struct root_entry, node);
    643		btrfs_err(fs_info, "  root entry %llu, num_refs %llu",
    644			  re->root_objectid, re->num_refs);
    645	}
    646
    647	list_for_each_entry(ra, &be->actions, list)
    648		dump_ref_action(fs_info, ra);
    649}
    650
    651/*
    652 * btrfs_ref_tree_mod: called when we modify a ref for a bytenr
    653 *
    654 * This will add an action item to the given bytenr and do sanity checks to make
    655 * sure we haven't messed something up.  If we are making a new allocation and
    656 * this block entry has history we will delete all previous actions as long as
    657 * our sanity checks pass as they are no longer needed.
    658 */
    659int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
    660		       struct btrfs_ref *generic_ref)
    661{
    662	struct ref_entry *ref = NULL, *exist;
    663	struct ref_action *ra = NULL;
    664	struct block_entry *be = NULL;
    665	struct root_entry *re = NULL;
    666	int action = generic_ref->action;
    667	int ret = 0;
    668	bool metadata;
    669	u64 bytenr = generic_ref->bytenr;
    670	u64 num_bytes = generic_ref->len;
    671	u64 parent = generic_ref->parent;
    672	u64 ref_root = 0;
    673	u64 owner = 0;
    674	u64 offset = 0;
    675
    676	if (!btrfs_test_opt(fs_info, REF_VERIFY))
    677		return 0;
    678
    679	if (generic_ref->type == BTRFS_REF_METADATA) {
    680		if (!parent)
    681			ref_root = generic_ref->tree_ref.owning_root;
    682		owner = generic_ref->tree_ref.level;
    683	} else if (!parent) {
    684		ref_root = generic_ref->data_ref.owning_root;
    685		owner = generic_ref->data_ref.ino;
    686		offset = generic_ref->data_ref.offset;
    687	}
    688	metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
    689
    690	ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS);
    691	ra = kmalloc(sizeof(struct ref_action), GFP_NOFS);
    692	if (!ra || !ref) {
    693		kfree(ref);
    694		kfree(ra);
    695		ret = -ENOMEM;
    696		goto out;
    697	}
    698
    699	ref->parent = parent;
    700	ref->owner = owner;
    701	ref->root_objectid = ref_root;
    702	ref->offset = offset;
    703	ref->num_refs = (action == BTRFS_DROP_DELAYED_REF) ? -1 : 1;
    704
    705	memcpy(&ra->ref, ref, sizeof(struct ref_entry));
    706	/*
    707	 * Save the extra info from the delayed ref in the ref action to make it
    708	 * easier to figure out what is happening.  The real ref's we add to the
    709	 * ref tree need to reflect what we save on disk so it matches any
    710	 * on-disk refs we pre-loaded.
    711	 */
    712	ra->ref.owner = owner;
    713	ra->ref.offset = offset;
    714	ra->ref.root_objectid = ref_root;
    715	__save_stack_trace(ra);
    716
    717	INIT_LIST_HEAD(&ra->list);
    718	ra->action = action;
    719	ra->root = generic_ref->real_root;
    720
    721	/*
    722	 * This is an allocation, preallocate the block_entry in case we haven't
    723	 * used it before.
    724	 */
    725	ret = -EINVAL;
    726	if (action == BTRFS_ADD_DELAYED_EXTENT) {
    727		/*
    728		 * For subvol_create we'll just pass in whatever the parent root
    729		 * is and the new root objectid, so let's not treat the passed
    730		 * in root as if it really has a ref for this bytenr.
    731		 */
    732		be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
    733		if (IS_ERR(be)) {
    734			kfree(ref);
    735			kfree(ra);
    736			ret = PTR_ERR(be);
    737			goto out;
    738		}
    739		be->num_refs++;
    740		if (metadata)
    741			be->metadata = 1;
    742
    743		if (be->num_refs != 1) {
    744			btrfs_err(fs_info,
    745			"re-allocated a block that still has references to it!");
    746			dump_block_entry(fs_info, be);
    747			dump_ref_action(fs_info, ra);
    748			kfree(ref);
    749			kfree(ra);
    750			goto out_unlock;
    751		}
    752
    753		while (!list_empty(&be->actions)) {
    754			struct ref_action *tmp;
    755
    756			tmp = list_first_entry(&be->actions, struct ref_action,
    757					       list);
    758			list_del(&tmp->list);
    759			kfree(tmp);
    760		}
    761	} else {
    762		struct root_entry *tmp;
    763
    764		if (!parent) {
    765			re = kmalloc(sizeof(struct root_entry), GFP_NOFS);
    766			if (!re) {
    767				kfree(ref);
    768				kfree(ra);
    769				ret = -ENOMEM;
    770				goto out;
    771			}
    772			/*
    773			 * This is the root that is modifying us, so it's the
    774			 * one we want to lookup below when we modify the
    775			 * re->num_refs.
    776			 */
    777			ref_root = generic_ref->real_root;
    778			re->root_objectid = generic_ref->real_root;
    779			re->num_refs = 0;
    780		}
    781
    782		spin_lock(&fs_info->ref_verify_lock);
    783		be = lookup_block_entry(&fs_info->block_tree, bytenr);
    784		if (!be) {
    785			btrfs_err(fs_info,
    786"trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
    787				  action, bytenr, num_bytes);
    788			dump_ref_action(fs_info, ra);
    789			kfree(ref);
    790			kfree(ra);
    791			goto out_unlock;
    792		} else if (be->num_refs == 0) {
    793			btrfs_err(fs_info,
    794		"trying to do action %d for a bytenr that has 0 total references",
    795				action);
    796			dump_block_entry(fs_info, be);
    797			dump_ref_action(fs_info, ra);
    798			kfree(ref);
    799			kfree(ra);
    800			goto out_unlock;
    801		}
    802
    803		if (!parent) {
    804			tmp = insert_root_entry(&be->roots, re);
    805			if (tmp) {
    806				kfree(re);
    807				re = tmp;
    808			}
    809		}
    810	}
    811
    812	exist = insert_ref_entry(&be->refs, ref);
    813	if (exist) {
    814		if (action == BTRFS_DROP_DELAYED_REF) {
    815			if (exist->num_refs == 0) {
    816				btrfs_err(fs_info,
    817"dropping a ref for a existing root that doesn't have a ref on the block");
    818				dump_block_entry(fs_info, be);
    819				dump_ref_action(fs_info, ra);
    820				kfree(ref);
    821				kfree(ra);
    822				goto out_unlock;
    823			}
    824			exist->num_refs--;
    825			if (exist->num_refs == 0) {
    826				rb_erase(&exist->node, &be->refs);
    827				kfree(exist);
    828			}
    829		} else if (!be->metadata) {
    830			exist->num_refs++;
    831		} else {
    832			btrfs_err(fs_info,
    833"attempting to add another ref for an existing ref on a tree block");
    834			dump_block_entry(fs_info, be);
    835			dump_ref_action(fs_info, ra);
    836			kfree(ref);
    837			kfree(ra);
    838			goto out_unlock;
    839		}
    840		kfree(ref);
    841	} else {
    842		if (action == BTRFS_DROP_DELAYED_REF) {
    843			btrfs_err(fs_info,
    844"dropping a ref for a root that doesn't have a ref on the block");
    845			dump_block_entry(fs_info, be);
    846			dump_ref_action(fs_info, ra);
    847			kfree(ref);
    848			kfree(ra);
    849			goto out_unlock;
    850		}
    851	}
    852
    853	if (!parent && !re) {
    854		re = lookup_root_entry(&be->roots, ref_root);
    855		if (!re) {
    856			/*
    857			 * This shouldn't happen because we will add our re
    858			 * above when we lookup the be with !parent, but just in
    859			 * case catch this case so we don't panic because I
    860			 * didn't think of some other corner case.
    861			 */
    862			btrfs_err(fs_info, "failed to find root %llu for %llu",
    863				  generic_ref->real_root, be->bytenr);
    864			dump_block_entry(fs_info, be);
    865			dump_ref_action(fs_info, ra);
    866			kfree(ra);
    867			goto out_unlock;
    868		}
    869	}
    870	if (action == BTRFS_DROP_DELAYED_REF) {
    871		if (re)
    872			re->num_refs--;
    873		be->num_refs--;
    874	} else if (action == BTRFS_ADD_DELAYED_REF) {
    875		be->num_refs++;
    876		if (re)
    877			re->num_refs++;
    878	}
    879	list_add_tail(&ra->list, &be->actions);
    880	ret = 0;
    881out_unlock:
    882	spin_unlock(&fs_info->ref_verify_lock);
    883out:
    884	if (ret)
    885		btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
    886	return ret;
    887}
    888
    889/* Free up the ref cache */
    890void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info)
    891{
    892	struct block_entry *be;
    893	struct rb_node *n;
    894
    895	if (!btrfs_test_opt(fs_info, REF_VERIFY))
    896		return;
    897
    898	spin_lock(&fs_info->ref_verify_lock);
    899	while ((n = rb_first(&fs_info->block_tree))) {
    900		be = rb_entry(n, struct block_entry, node);
    901		rb_erase(&be->node, &fs_info->block_tree);
    902		free_block_entry(be);
    903		cond_resched_lock(&fs_info->ref_verify_lock);
    904	}
    905	spin_unlock(&fs_info->ref_verify_lock);
    906}
    907
    908void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
    909			       u64 len)
    910{
    911	struct block_entry *be = NULL, *entry;
    912	struct rb_node *n;
    913
    914	if (!btrfs_test_opt(fs_info, REF_VERIFY))
    915		return;
    916
    917	spin_lock(&fs_info->ref_verify_lock);
    918	n = fs_info->block_tree.rb_node;
    919	while (n) {
    920		entry = rb_entry(n, struct block_entry, node);
    921		if (entry->bytenr < start) {
    922			n = n->rb_right;
    923		} else if (entry->bytenr > start) {
    924			n = n->rb_left;
    925		} else {
    926			be = entry;
    927			break;
    928		}
    929		/* We want to get as close to start as possible */
    930		if (be == NULL ||
    931		    (entry->bytenr < start && be->bytenr > start) ||
    932		    (entry->bytenr < start && entry->bytenr > be->bytenr))
    933			be = entry;
    934	}
    935
    936	/*
    937	 * Could have an empty block group, maybe have something to check for
    938	 * this case to verify we were actually empty?
    939	 */
    940	if (!be) {
    941		spin_unlock(&fs_info->ref_verify_lock);
    942		return;
    943	}
    944
    945	n = &be->node;
    946	while (n) {
    947		be = rb_entry(n, struct block_entry, node);
    948		n = rb_next(n);
    949		if (be->bytenr < start && be->bytenr + be->len > start) {
    950			btrfs_err(fs_info,
    951				"block entry overlaps a block group [%llu,%llu]!",
    952				start, len);
    953			dump_block_entry(fs_info, be);
    954			continue;
    955		}
    956		if (be->bytenr < start)
    957			continue;
    958		if (be->bytenr >= start + len)
    959			break;
    960		if (be->bytenr + be->len > start + len) {
    961			btrfs_err(fs_info,
    962				"block entry overlaps a block group [%llu,%llu]!",
    963				start, len);
    964			dump_block_entry(fs_info, be);
    965		}
    966		rb_erase(&be->node, &fs_info->block_tree);
    967		free_block_entry(be);
    968	}
    969	spin_unlock(&fs_info->ref_verify_lock);
    970}
    971
    972/* Walk down all roots and build the ref tree, meant to be called at mount */
    973int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
    974{
    975	struct btrfs_root *extent_root;
    976	struct btrfs_path *path;
    977	struct extent_buffer *eb;
    978	int tree_block_level = 0;
    979	u64 bytenr = 0, num_bytes = 0;
    980	int ret, level;
    981
    982	if (!btrfs_test_opt(fs_info, REF_VERIFY))
    983		return 0;
    984
    985	path = btrfs_alloc_path();
    986	if (!path)
    987		return -ENOMEM;
    988
    989	extent_root = btrfs_extent_root(fs_info, 0);
    990	eb = btrfs_read_lock_root_node(extent_root);
    991	level = btrfs_header_level(eb);
    992	path->nodes[level] = eb;
    993	path->slots[level] = 0;
    994	path->locks[level] = BTRFS_READ_LOCK;
    995
    996	while (1) {
    997		/*
    998		 * We have to keep track of the bytenr/num_bytes we last hit
    999		 * because we could have run out of space for an inline ref, and
   1000		 * would have had to added a ref key item which may appear on a
   1001		 * different leaf from the original extent item.
   1002		 */
   1003		ret = walk_down_tree(extent_root, path, level,
   1004				     &bytenr, &num_bytes, &tree_block_level);
   1005		if (ret)
   1006			break;
   1007		ret = walk_up_tree(path, &level);
   1008		if (ret < 0)
   1009			break;
   1010		if (ret > 0) {
   1011			ret = 0;
   1012			break;
   1013		}
   1014	}
   1015	if (ret) {
   1016		btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
   1017		btrfs_free_ref_cache(fs_info);
   1018	}
   1019	btrfs_free_path(path);
   1020	return ret;
   1021}