cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

relocation.c (115721B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2009 Oracle.  All rights reserved.
      4 */
      5
      6#include <linux/sched.h>
      7#include <linux/pagemap.h>
      8#include <linux/writeback.h>
      9#include <linux/blkdev.h>
     10#include <linux/rbtree.h>
     11#include <linux/slab.h>
     12#include <linux/error-injection.h>
     13#include "ctree.h"
     14#include "disk-io.h"
     15#include "transaction.h"
     16#include "volumes.h"
     17#include "locking.h"
     18#include "btrfs_inode.h"
     19#include "async-thread.h"
     20#include "free-space-cache.h"
     21#include "qgroup.h"
     22#include "print-tree.h"
     23#include "delalloc-space.h"
     24#include "block-group.h"
     25#include "backref.h"
     26#include "misc.h"
     27#include "subpage.h"
     28#include "zoned.h"
     29#include "inode-item.h"
     30
     31/*
     32 * Relocation overview
     33 *
     34 * [What does relocation do]
     35 *
     36 * The objective of relocation is to relocate all extents of the target block
     37 * group to other block groups.
     38 * This is utilized by resize (shrink only), profile converting, compacting
     39 * space, or balance routine to spread chunks over devices.
     40 *
     41 * 		Before		|		After
     42 * ------------------------------------------------------------------
     43 *  BG A: 10 data extents	| BG A: deleted
     44 *  BG B:  2 data extents	| BG B: 10 data extents (2 old + 8 relocated)
     45 *  BG C:  1 extents		| BG C:  3 data extents (1 old + 2 relocated)
     46 *
     47 * [How does relocation work]
     48 *
     49 * 1.   Mark the target block group read-only
     50 *      New extents won't be allocated from the target block group.
     51 *
     52 * 2.1  Record each extent in the target block group
     53 *      To build a proper map of extents to be relocated.
     54 *
     55 * 2.2  Build data reloc tree and reloc trees
     56 *      Data reloc tree will contain an inode, recording all newly relocated
     57 *      data extents.
     58 *      There will be only one data reloc tree for one data block group.
     59 *
     60 *      Reloc tree will be a special snapshot of its source tree, containing
     61 *      relocated tree blocks.
     62 *      Each tree referring to a tree block in target block group will get its
     63 *      reloc tree built.
     64 *
     65 * 2.3  Swap source tree with its corresponding reloc tree
     66 *      Each involved tree only refers to new extents after swap.
     67 *
     68 * 3.   Cleanup reloc trees and data reloc tree.
     69 *      As old extents in the target block group are still referenced by reloc
     70 *      trees, we need to clean them up before really freeing the target block
     71 *      group.
     72 *
     73 * The main complexity is in steps 2.2 and 2.3.
     74 *
     75 * The entry point of relocation is relocate_block_group() function.
     76 */
     77
     78#define RELOCATION_RESERVED_NODES	256
     79/*
     80 * map address of tree root to tree
     81 */
     82struct mapping_node {
     83	struct {
     84		struct rb_node rb_node;
     85		u64 bytenr;
     86	}; /* Use rb_simle_node for search/insert */
     87	void *data;
     88};
     89
     90struct mapping_tree {
     91	struct rb_root rb_root;
     92	spinlock_t lock;
     93};
     94
     95/*
     96 * present a tree block to process
     97 */
     98struct tree_block {
     99	struct {
    100		struct rb_node rb_node;
    101		u64 bytenr;
    102	}; /* Use rb_simple_node for search/insert */
    103	u64 owner;
    104	struct btrfs_key key;
    105	unsigned int level:8;
    106	unsigned int key_ready:1;
    107};
    108
    109#define MAX_EXTENTS 128
    110
    111struct file_extent_cluster {
    112	u64 start;
    113	u64 end;
    114	u64 boundary[MAX_EXTENTS];
    115	unsigned int nr;
    116};
    117
    118struct reloc_control {
    119	/* block group to relocate */
    120	struct btrfs_block_group *block_group;
    121	/* extent tree */
    122	struct btrfs_root *extent_root;
    123	/* inode for moving data */
    124	struct inode *data_inode;
    125
    126	struct btrfs_block_rsv *block_rsv;
    127
    128	struct btrfs_backref_cache backref_cache;
    129
    130	struct file_extent_cluster cluster;
    131	/* tree blocks have been processed */
    132	struct extent_io_tree processed_blocks;
    133	/* map start of tree root to corresponding reloc tree */
    134	struct mapping_tree reloc_root_tree;
    135	/* list of reloc trees */
    136	struct list_head reloc_roots;
    137	/* list of subvolume trees that get relocated */
    138	struct list_head dirty_subvol_roots;
    139	/* size of metadata reservation for merging reloc trees */
    140	u64 merging_rsv_size;
    141	/* size of relocated tree nodes */
    142	u64 nodes_relocated;
    143	/* reserved size for block group relocation*/
    144	u64 reserved_bytes;
    145
    146	u64 search_start;
    147	u64 extents_found;
    148
    149	unsigned int stage:8;
    150	unsigned int create_reloc_tree:1;
    151	unsigned int merge_reloc_tree:1;
    152	unsigned int found_file_extent:1;
    153};
    154
    155/* stages of data relocation */
    156#define MOVE_DATA_EXTENTS	0
    157#define UPDATE_DATA_PTRS	1
    158
    159static void mark_block_processed(struct reloc_control *rc,
    160				 struct btrfs_backref_node *node)
    161{
    162	u32 blocksize;
    163
    164	if (node->level == 0 ||
    165	    in_range(node->bytenr, rc->block_group->start,
    166		     rc->block_group->length)) {
    167		blocksize = rc->extent_root->fs_info->nodesize;
    168		set_extent_bits(&rc->processed_blocks, node->bytenr,
    169				node->bytenr + blocksize - 1, EXTENT_DIRTY);
    170	}
    171	node->processed = 1;
    172}
    173
    174
    175static void mapping_tree_init(struct mapping_tree *tree)
    176{
    177	tree->rb_root = RB_ROOT;
    178	spin_lock_init(&tree->lock);
    179}
    180
    181/*
    182 * walk up backref nodes until reach node presents tree root
    183 */
    184static struct btrfs_backref_node *walk_up_backref(
    185		struct btrfs_backref_node *node,
    186		struct btrfs_backref_edge *edges[], int *index)
    187{
    188	struct btrfs_backref_edge *edge;
    189	int idx = *index;
    190
    191	while (!list_empty(&node->upper)) {
    192		edge = list_entry(node->upper.next,
    193				  struct btrfs_backref_edge, list[LOWER]);
    194		edges[idx++] = edge;
    195		node = edge->node[UPPER];
    196	}
    197	BUG_ON(node->detached);
    198	*index = idx;
    199	return node;
    200}
    201
    202/*
    203 * walk down backref nodes to find start of next reference path
    204 */
    205static struct btrfs_backref_node *walk_down_backref(
    206		struct btrfs_backref_edge *edges[], int *index)
    207{
    208	struct btrfs_backref_edge *edge;
    209	struct btrfs_backref_node *lower;
    210	int idx = *index;
    211
    212	while (idx > 0) {
    213		edge = edges[idx - 1];
    214		lower = edge->node[LOWER];
    215		if (list_is_last(&edge->list[LOWER], &lower->upper)) {
    216			idx--;
    217			continue;
    218		}
    219		edge = list_entry(edge->list[LOWER].next,
    220				  struct btrfs_backref_edge, list[LOWER]);
    221		edges[idx - 1] = edge;
    222		*index = idx;
    223		return edge->node[UPPER];
    224	}
    225	*index = 0;
    226	return NULL;
    227}
    228
    229static void update_backref_node(struct btrfs_backref_cache *cache,
    230				struct btrfs_backref_node *node, u64 bytenr)
    231{
    232	struct rb_node *rb_node;
    233	rb_erase(&node->rb_node, &cache->rb_root);
    234	node->bytenr = bytenr;
    235	rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
    236	if (rb_node)
    237		btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
    238}
    239
    240/*
    241 * update backref cache after a transaction commit
    242 */
    243static int update_backref_cache(struct btrfs_trans_handle *trans,
    244				struct btrfs_backref_cache *cache)
    245{
    246	struct btrfs_backref_node *node;
    247	int level = 0;
    248
    249	if (cache->last_trans == 0) {
    250		cache->last_trans = trans->transid;
    251		return 0;
    252	}
    253
    254	if (cache->last_trans == trans->transid)
    255		return 0;
    256
    257	/*
    258	 * detached nodes are used to avoid unnecessary backref
    259	 * lookup. transaction commit changes the extent tree.
    260	 * so the detached nodes are no longer useful.
    261	 */
    262	while (!list_empty(&cache->detached)) {
    263		node = list_entry(cache->detached.next,
    264				  struct btrfs_backref_node, list);
    265		btrfs_backref_cleanup_node(cache, node);
    266	}
    267
    268	while (!list_empty(&cache->changed)) {
    269		node = list_entry(cache->changed.next,
    270				  struct btrfs_backref_node, list);
    271		list_del_init(&node->list);
    272		BUG_ON(node->pending);
    273		update_backref_node(cache, node, node->new_bytenr);
    274	}
    275
    276	/*
    277	 * some nodes can be left in the pending list if there were
    278	 * errors during processing the pending nodes.
    279	 */
    280	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
    281		list_for_each_entry(node, &cache->pending[level], list) {
    282			BUG_ON(!node->pending);
    283			if (node->bytenr == node->new_bytenr)
    284				continue;
    285			update_backref_node(cache, node, node->new_bytenr);
    286		}
    287	}
    288
    289	cache->last_trans = 0;
    290	return 1;
    291}
    292
    293static bool reloc_root_is_dead(struct btrfs_root *root)
    294{
    295	/*
    296	 * Pair with set_bit/clear_bit in clean_dirty_subvols and
    297	 * btrfs_update_reloc_root. We need to see the updated bit before
    298	 * trying to access reloc_root
    299	 */
    300	smp_rmb();
    301	if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
    302		return true;
    303	return false;
    304}
    305
    306/*
    307 * Check if this subvolume tree has valid reloc tree.
    308 *
    309 * Reloc tree after swap is considered dead, thus not considered as valid.
    310 * This is enough for most callers, as they don't distinguish dead reloc root
    311 * from no reloc root.  But btrfs_should_ignore_reloc_root() below is a
    312 * special case.
    313 */
    314static bool have_reloc_root(struct btrfs_root *root)
    315{
    316	if (reloc_root_is_dead(root))
    317		return false;
    318	if (!root->reloc_root)
    319		return false;
    320	return true;
    321}
    322
    323int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
    324{
    325	struct btrfs_root *reloc_root;
    326
    327	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
    328		return 0;
    329
    330	/* This root has been merged with its reloc tree, we can ignore it */
    331	if (reloc_root_is_dead(root))
    332		return 1;
    333
    334	reloc_root = root->reloc_root;
    335	if (!reloc_root)
    336		return 0;
    337
    338	if (btrfs_header_generation(reloc_root->commit_root) ==
    339	    root->fs_info->running_transaction->transid)
    340		return 0;
    341	/*
    342	 * if there is reloc tree and it was created in previous
    343	 * transaction backref lookup can find the reloc tree,
    344	 * so backref node for the fs tree root is useless for
    345	 * relocation.
    346	 */
    347	return 1;
    348}
    349
    350/*
    351 * find reloc tree by address of tree root
    352 */
    353struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
    354{
    355	struct reloc_control *rc = fs_info->reloc_ctl;
    356	struct rb_node *rb_node;
    357	struct mapping_node *node;
    358	struct btrfs_root *root = NULL;
    359
    360	ASSERT(rc);
    361	spin_lock(&rc->reloc_root_tree.lock);
    362	rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
    363	if (rb_node) {
    364		node = rb_entry(rb_node, struct mapping_node, rb_node);
    365		root = node->data;
    366	}
    367	spin_unlock(&rc->reloc_root_tree.lock);
    368	return btrfs_grab_root(root);
    369}
    370
    371/*
    372 * For useless nodes, do two major clean ups:
    373 *
    374 * - Cleanup the children edges and nodes
    375 *   If child node is also orphan (no parent) during cleanup, then the child
    376 *   node will also be cleaned up.
    377 *
    378 * - Freeing up leaves (level 0), keeps nodes detached
    379 *   For nodes, the node is still cached as "detached"
    380 *
    381 * Return false if @node is not in the @useless_nodes list.
    382 * Return true if @node is in the @useless_nodes list.
    383 */
    384static bool handle_useless_nodes(struct reloc_control *rc,
    385				 struct btrfs_backref_node *node)
    386{
    387	struct btrfs_backref_cache *cache = &rc->backref_cache;
    388	struct list_head *useless_node = &cache->useless_node;
    389	bool ret = false;
    390
    391	while (!list_empty(useless_node)) {
    392		struct btrfs_backref_node *cur;
    393
    394		cur = list_first_entry(useless_node, struct btrfs_backref_node,
    395				 list);
    396		list_del_init(&cur->list);
    397
    398		/* Only tree root nodes can be added to @useless_nodes */
    399		ASSERT(list_empty(&cur->upper));
    400
    401		if (cur == node)
    402			ret = true;
    403
    404		/* The node is the lowest node */
    405		if (cur->lowest) {
    406			list_del_init(&cur->lower);
    407			cur->lowest = 0;
    408		}
    409
    410		/* Cleanup the lower edges */
    411		while (!list_empty(&cur->lower)) {
    412			struct btrfs_backref_edge *edge;
    413			struct btrfs_backref_node *lower;
    414
    415			edge = list_entry(cur->lower.next,
    416					struct btrfs_backref_edge, list[UPPER]);
    417			list_del(&edge->list[UPPER]);
    418			list_del(&edge->list[LOWER]);
    419			lower = edge->node[LOWER];
    420			btrfs_backref_free_edge(cache, edge);
    421
    422			/* Child node is also orphan, queue for cleanup */
    423			if (list_empty(&lower->upper))
    424				list_add(&lower->list, useless_node);
    425		}
    426		/* Mark this block processed for relocation */
    427		mark_block_processed(rc, cur);
    428
    429		/*
    430		 * Backref nodes for tree leaves are deleted from the cache.
    431		 * Backref nodes for upper level tree blocks are left in the
    432		 * cache to avoid unnecessary backref lookup.
    433		 */
    434		if (cur->level > 0) {
    435			list_add(&cur->list, &cache->detached);
    436			cur->detached = 1;
    437		} else {
    438			rb_erase(&cur->rb_node, &cache->rb_root);
    439			btrfs_backref_free_node(cache, cur);
    440		}
    441	}
    442	return ret;
    443}
    444
    445/*
    446 * Build backref tree for a given tree block. Root of the backref tree
    447 * corresponds the tree block, leaves of the backref tree correspond roots of
    448 * b-trees that reference the tree block.
    449 *
    450 * The basic idea of this function is check backrefs of a given block to find
    451 * upper level blocks that reference the block, and then check backrefs of
    452 * these upper level blocks recursively. The recursion stops when tree root is
    453 * reached or backrefs for the block is cached.
    454 *
    455 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
    456 * all upper level blocks that directly/indirectly reference the block are also
    457 * cached.
    458 */
    459static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
    460			struct reloc_control *rc, struct btrfs_key *node_key,
    461			int level, u64 bytenr)
    462{
    463	struct btrfs_backref_iter *iter;
    464	struct btrfs_backref_cache *cache = &rc->backref_cache;
    465	/* For searching parent of TREE_BLOCK_REF */
    466	struct btrfs_path *path;
    467	struct btrfs_backref_node *cur;
    468	struct btrfs_backref_node *node = NULL;
    469	struct btrfs_backref_edge *edge;
    470	int ret;
    471	int err = 0;
    472
    473	iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS);
    474	if (!iter)
    475		return ERR_PTR(-ENOMEM);
    476	path = btrfs_alloc_path();
    477	if (!path) {
    478		err = -ENOMEM;
    479		goto out;
    480	}
    481
    482	node = btrfs_backref_alloc_node(cache, bytenr, level);
    483	if (!node) {
    484		err = -ENOMEM;
    485		goto out;
    486	}
    487
    488	node->lowest = 1;
    489	cur = node;
    490
    491	/* Breadth-first search to build backref cache */
    492	do {
    493		ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
    494						  cur);
    495		if (ret < 0) {
    496			err = ret;
    497			goto out;
    498		}
    499		edge = list_first_entry_or_null(&cache->pending_edge,
    500				struct btrfs_backref_edge, list[UPPER]);
    501		/*
    502		 * The pending list isn't empty, take the first block to
    503		 * process
    504		 */
    505		if (edge) {
    506			list_del_init(&edge->list[UPPER]);
    507			cur = edge->node[UPPER];
    508		}
    509	} while (edge);
    510
    511	/* Finish the upper linkage of newly added edges/nodes */
    512	ret = btrfs_backref_finish_upper_links(cache, node);
    513	if (ret < 0) {
    514		err = ret;
    515		goto out;
    516	}
    517
    518	if (handle_useless_nodes(rc, node))
    519		node = NULL;
    520out:
    521	btrfs_backref_iter_free(iter);
    522	btrfs_free_path(path);
    523	if (err) {
    524		btrfs_backref_error_cleanup(cache, node);
    525		return ERR_PTR(err);
    526	}
    527	ASSERT(!node || !node->detached);
    528	ASSERT(list_empty(&cache->useless_node) &&
    529	       list_empty(&cache->pending_edge));
    530	return node;
    531}
    532
    533/*
    534 * helper to add backref node for the newly created snapshot.
    535 * the backref node is created by cloning backref node that
    536 * corresponds to root of source tree
    537 */
    538static int clone_backref_node(struct btrfs_trans_handle *trans,
    539			      struct reloc_control *rc,
    540			      struct btrfs_root *src,
    541			      struct btrfs_root *dest)
    542{
    543	struct btrfs_root *reloc_root = src->reloc_root;
    544	struct btrfs_backref_cache *cache = &rc->backref_cache;
    545	struct btrfs_backref_node *node = NULL;
    546	struct btrfs_backref_node *new_node;
    547	struct btrfs_backref_edge *edge;
    548	struct btrfs_backref_edge *new_edge;
    549	struct rb_node *rb_node;
    550
    551	if (cache->last_trans > 0)
    552		update_backref_cache(trans, cache);
    553
    554	rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
    555	if (rb_node) {
    556		node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
    557		if (node->detached)
    558			node = NULL;
    559		else
    560			BUG_ON(node->new_bytenr != reloc_root->node->start);
    561	}
    562
    563	if (!node) {
    564		rb_node = rb_simple_search(&cache->rb_root,
    565					   reloc_root->commit_root->start);
    566		if (rb_node) {
    567			node = rb_entry(rb_node, struct btrfs_backref_node,
    568					rb_node);
    569			BUG_ON(node->detached);
    570		}
    571	}
    572
    573	if (!node)
    574		return 0;
    575
    576	new_node = btrfs_backref_alloc_node(cache, dest->node->start,
    577					    node->level);
    578	if (!new_node)
    579		return -ENOMEM;
    580
    581	new_node->lowest = node->lowest;
    582	new_node->checked = 1;
    583	new_node->root = btrfs_grab_root(dest);
    584	ASSERT(new_node->root);
    585
    586	if (!node->lowest) {
    587		list_for_each_entry(edge, &node->lower, list[UPPER]) {
    588			new_edge = btrfs_backref_alloc_edge(cache);
    589			if (!new_edge)
    590				goto fail;
    591
    592			btrfs_backref_link_edge(new_edge, edge->node[LOWER],
    593						new_node, LINK_UPPER);
    594		}
    595	} else {
    596		list_add_tail(&new_node->lower, &cache->leaves);
    597	}
    598
    599	rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
    600				   &new_node->rb_node);
    601	if (rb_node)
    602		btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
    603
    604	if (!new_node->lowest) {
    605		list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
    606			list_add_tail(&new_edge->list[LOWER],
    607				      &new_edge->node[LOWER]->upper);
    608		}
    609	}
    610	return 0;
    611fail:
    612	while (!list_empty(&new_node->lower)) {
    613		new_edge = list_entry(new_node->lower.next,
    614				      struct btrfs_backref_edge, list[UPPER]);
    615		list_del(&new_edge->list[UPPER]);
    616		btrfs_backref_free_edge(cache, new_edge);
    617	}
    618	btrfs_backref_free_node(cache, new_node);
    619	return -ENOMEM;
    620}
    621
    622/*
    623 * helper to add 'address of tree root -> reloc tree' mapping
    624 */
    625static int __must_check __add_reloc_root(struct btrfs_root *root)
    626{
    627	struct btrfs_fs_info *fs_info = root->fs_info;
    628	struct rb_node *rb_node;
    629	struct mapping_node *node;
    630	struct reloc_control *rc = fs_info->reloc_ctl;
    631
    632	node = kmalloc(sizeof(*node), GFP_NOFS);
    633	if (!node)
    634		return -ENOMEM;
    635
    636	node->bytenr = root->commit_root->start;
    637	node->data = root;
    638
    639	spin_lock(&rc->reloc_root_tree.lock);
    640	rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
    641				   node->bytenr, &node->rb_node);
    642	spin_unlock(&rc->reloc_root_tree.lock);
    643	if (rb_node) {
    644		btrfs_err(fs_info,
    645			    "Duplicate root found for start=%llu while inserting into relocation tree",
    646			    node->bytenr);
    647		return -EEXIST;
    648	}
    649
    650	list_add_tail(&root->root_list, &rc->reloc_roots);
    651	return 0;
    652}
    653
    654/*
    655 * helper to delete the 'address of tree root -> reloc tree'
    656 * mapping
    657 */
    658static void __del_reloc_root(struct btrfs_root *root)
    659{
    660	struct btrfs_fs_info *fs_info = root->fs_info;
    661	struct rb_node *rb_node;
    662	struct mapping_node *node = NULL;
    663	struct reloc_control *rc = fs_info->reloc_ctl;
    664	bool put_ref = false;
    665
    666	if (rc && root->node) {
    667		spin_lock(&rc->reloc_root_tree.lock);
    668		rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
    669					   root->commit_root->start);
    670		if (rb_node) {
    671			node = rb_entry(rb_node, struct mapping_node, rb_node);
    672			rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
    673			RB_CLEAR_NODE(&node->rb_node);
    674		}
    675		spin_unlock(&rc->reloc_root_tree.lock);
    676		ASSERT(!node || (struct btrfs_root *)node->data == root);
    677	}
    678
    679	/*
    680	 * We only put the reloc root here if it's on the list.  There's a lot
    681	 * of places where the pattern is to splice the rc->reloc_roots, process
    682	 * the reloc roots, and then add the reloc root back onto
    683	 * rc->reloc_roots.  If we call __del_reloc_root while it's off of the
    684	 * list we don't want the reference being dropped, because the guy
    685	 * messing with the list is in charge of the reference.
    686	 */
    687	spin_lock(&fs_info->trans_lock);
    688	if (!list_empty(&root->root_list)) {
    689		put_ref = true;
    690		list_del_init(&root->root_list);
    691	}
    692	spin_unlock(&fs_info->trans_lock);
    693	if (put_ref)
    694		btrfs_put_root(root);
    695	kfree(node);
    696}
    697
    698/*
    699 * helper to update the 'address of tree root -> reloc tree'
    700 * mapping
    701 */
    702static int __update_reloc_root(struct btrfs_root *root)
    703{
    704	struct btrfs_fs_info *fs_info = root->fs_info;
    705	struct rb_node *rb_node;
    706	struct mapping_node *node = NULL;
    707	struct reloc_control *rc = fs_info->reloc_ctl;
    708
    709	spin_lock(&rc->reloc_root_tree.lock);
    710	rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
    711				   root->commit_root->start);
    712	if (rb_node) {
    713		node = rb_entry(rb_node, struct mapping_node, rb_node);
    714		rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
    715	}
    716	spin_unlock(&rc->reloc_root_tree.lock);
    717
    718	if (!node)
    719		return 0;
    720	BUG_ON((struct btrfs_root *)node->data != root);
    721
    722	spin_lock(&rc->reloc_root_tree.lock);
    723	node->bytenr = root->node->start;
    724	rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
    725				   node->bytenr, &node->rb_node);
    726	spin_unlock(&rc->reloc_root_tree.lock);
    727	if (rb_node)
    728		btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
    729	return 0;
    730}
    731
    732static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
    733					struct btrfs_root *root, u64 objectid)
    734{
    735	struct btrfs_fs_info *fs_info = root->fs_info;
    736	struct btrfs_root *reloc_root;
    737	struct extent_buffer *eb;
    738	struct btrfs_root_item *root_item;
    739	struct btrfs_key root_key;
    740	int ret = 0;
    741	bool must_abort = false;
    742
    743	root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
    744	if (!root_item)
    745		return ERR_PTR(-ENOMEM);
    746
    747	root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
    748	root_key.type = BTRFS_ROOT_ITEM_KEY;
    749	root_key.offset = objectid;
    750
    751	if (root->root_key.objectid == objectid) {
    752		u64 commit_root_gen;
    753
    754		/* called by btrfs_init_reloc_root */
    755		ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
    756				      BTRFS_TREE_RELOC_OBJECTID);
    757		if (ret)
    758			goto fail;
    759
    760		/*
    761		 * Set the last_snapshot field to the generation of the commit
    762		 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
    763		 * correctly (returns true) when the relocation root is created
    764		 * either inside the critical section of a transaction commit
    765		 * (through transaction.c:qgroup_account_snapshot()) and when
    766		 * it's created before the transaction commit is started.
    767		 */
    768		commit_root_gen = btrfs_header_generation(root->commit_root);
    769		btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
    770	} else {
    771		/*
    772		 * called by btrfs_reloc_post_snapshot_hook.
    773		 * the source tree is a reloc tree, all tree blocks
    774		 * modified after it was created have RELOC flag
    775		 * set in their headers. so it's OK to not update
    776		 * the 'last_snapshot'.
    777		 */
    778		ret = btrfs_copy_root(trans, root, root->node, &eb,
    779				      BTRFS_TREE_RELOC_OBJECTID);
    780		if (ret)
    781			goto fail;
    782	}
    783
    784	/*
    785	 * We have changed references at this point, we must abort the
    786	 * transaction if anything fails.
    787	 */
    788	must_abort = true;
    789
    790	memcpy(root_item, &root->root_item, sizeof(*root_item));
    791	btrfs_set_root_bytenr(root_item, eb->start);
    792	btrfs_set_root_level(root_item, btrfs_header_level(eb));
    793	btrfs_set_root_generation(root_item, trans->transid);
    794
    795	if (root->root_key.objectid == objectid) {
    796		btrfs_set_root_refs(root_item, 0);
    797		memset(&root_item->drop_progress, 0,
    798		       sizeof(struct btrfs_disk_key));
    799		btrfs_set_root_drop_level(root_item, 0);
    800	}
    801
    802	btrfs_tree_unlock(eb);
    803	free_extent_buffer(eb);
    804
    805	ret = btrfs_insert_root(trans, fs_info->tree_root,
    806				&root_key, root_item);
    807	if (ret)
    808		goto fail;
    809
    810	kfree(root_item);
    811
    812	reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
    813	if (IS_ERR(reloc_root)) {
    814		ret = PTR_ERR(reloc_root);
    815		goto abort;
    816	}
    817	set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
    818	reloc_root->last_trans = trans->transid;
    819	return reloc_root;
    820fail:
    821	kfree(root_item);
    822abort:
    823	if (must_abort)
    824		btrfs_abort_transaction(trans, ret);
    825	return ERR_PTR(ret);
    826}
    827
    828/*
    829 * create reloc tree for a given fs tree. reloc tree is just a
    830 * snapshot of the fs tree with special root objectid.
    831 *
    832 * The reloc_root comes out of here with two references, one for
    833 * root->reloc_root, and another for being on the rc->reloc_roots list.
    834 */
    835int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
    836			  struct btrfs_root *root)
    837{
    838	struct btrfs_fs_info *fs_info = root->fs_info;
    839	struct btrfs_root *reloc_root;
    840	struct reloc_control *rc = fs_info->reloc_ctl;
    841	struct btrfs_block_rsv *rsv;
    842	int clear_rsv = 0;
    843	int ret;
    844
    845	if (!rc)
    846		return 0;
    847
    848	/*
    849	 * The subvolume has reloc tree but the swap is finished, no need to
    850	 * create/update the dead reloc tree
    851	 */
    852	if (reloc_root_is_dead(root))
    853		return 0;
    854
    855	/*
    856	 * This is subtle but important.  We do not do
    857	 * record_root_in_transaction for reloc roots, instead we record their
    858	 * corresponding fs root, and then here we update the last trans for the
    859	 * reloc root.  This means that we have to do this for the entire life
    860	 * of the reloc root, regardless of which stage of the relocation we are
    861	 * in.
    862	 */
    863	if (root->reloc_root) {
    864		reloc_root = root->reloc_root;
    865		reloc_root->last_trans = trans->transid;
    866		return 0;
    867	}
    868
    869	/*
    870	 * We are merging reloc roots, we do not need new reloc trees.  Also
    871	 * reloc trees never need their own reloc tree.
    872	 */
    873	if (!rc->create_reloc_tree ||
    874	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
    875		return 0;
    876
    877	if (!trans->reloc_reserved) {
    878		rsv = trans->block_rsv;
    879		trans->block_rsv = rc->block_rsv;
    880		clear_rsv = 1;
    881	}
    882	reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
    883	if (clear_rsv)
    884		trans->block_rsv = rsv;
    885	if (IS_ERR(reloc_root))
    886		return PTR_ERR(reloc_root);
    887
    888	ret = __add_reloc_root(reloc_root);
    889	ASSERT(ret != -EEXIST);
    890	if (ret) {
    891		/* Pairs with create_reloc_root */
    892		btrfs_put_root(reloc_root);
    893		return ret;
    894	}
    895	root->reloc_root = btrfs_grab_root(reloc_root);
    896	return 0;
    897}
    898
    899/*
    900 * update root item of reloc tree
    901 */
    902int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
    903			    struct btrfs_root *root)
    904{
    905	struct btrfs_fs_info *fs_info = root->fs_info;
    906	struct btrfs_root *reloc_root;
    907	struct btrfs_root_item *root_item;
    908	int ret;
    909
    910	if (!have_reloc_root(root))
    911		return 0;
    912
    913	reloc_root = root->reloc_root;
    914	root_item = &reloc_root->root_item;
    915
    916	/*
    917	 * We are probably ok here, but __del_reloc_root() will drop its ref of
    918	 * the root.  We have the ref for root->reloc_root, but just in case
    919	 * hold it while we update the reloc root.
    920	 */
    921	btrfs_grab_root(reloc_root);
    922
    923	/* root->reloc_root will stay until current relocation finished */
    924	if (fs_info->reloc_ctl->merge_reloc_tree &&
    925	    btrfs_root_refs(root_item) == 0) {
    926		set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
    927		/*
    928		 * Mark the tree as dead before we change reloc_root so
    929		 * have_reloc_root will not touch it from now on.
    930		 */
    931		smp_wmb();
    932		__del_reloc_root(reloc_root);
    933	}
    934
    935	if (reloc_root->commit_root != reloc_root->node) {
    936		__update_reloc_root(reloc_root);
    937		btrfs_set_root_node(root_item, reloc_root->node);
    938		free_extent_buffer(reloc_root->commit_root);
    939		reloc_root->commit_root = btrfs_root_node(reloc_root);
    940	}
    941
    942	ret = btrfs_update_root(trans, fs_info->tree_root,
    943				&reloc_root->root_key, root_item);
    944	btrfs_put_root(reloc_root);
    945	return ret;
    946}
    947
    948/*
    949 * helper to find first cached inode with inode number >= objectid
    950 * in a subvolume
    951 */
    952static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
    953{
    954	struct rb_node *node;
    955	struct rb_node *prev;
    956	struct btrfs_inode *entry;
    957	struct inode *inode;
    958
    959	spin_lock(&root->inode_lock);
    960again:
    961	node = root->inode_tree.rb_node;
    962	prev = NULL;
    963	while (node) {
    964		prev = node;
    965		entry = rb_entry(node, struct btrfs_inode, rb_node);
    966
    967		if (objectid < btrfs_ino(entry))
    968			node = node->rb_left;
    969		else if (objectid > btrfs_ino(entry))
    970			node = node->rb_right;
    971		else
    972			break;
    973	}
    974	if (!node) {
    975		while (prev) {
    976			entry = rb_entry(prev, struct btrfs_inode, rb_node);
    977			if (objectid <= btrfs_ino(entry)) {
    978				node = prev;
    979				break;
    980			}
    981			prev = rb_next(prev);
    982		}
    983	}
    984	while (node) {
    985		entry = rb_entry(node, struct btrfs_inode, rb_node);
    986		inode = igrab(&entry->vfs_inode);
    987		if (inode) {
    988			spin_unlock(&root->inode_lock);
    989			return inode;
    990		}
    991
    992		objectid = btrfs_ino(entry) + 1;
    993		if (cond_resched_lock(&root->inode_lock))
    994			goto again;
    995
    996		node = rb_next(node);
    997	}
    998	spin_unlock(&root->inode_lock);
    999	return NULL;
   1000}
   1001
   1002/*
   1003 * get new location of data
   1004 */
   1005static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
   1006			    u64 bytenr, u64 num_bytes)
   1007{
   1008	struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
   1009	struct btrfs_path *path;
   1010	struct btrfs_file_extent_item *fi;
   1011	struct extent_buffer *leaf;
   1012	int ret;
   1013
   1014	path = btrfs_alloc_path();
   1015	if (!path)
   1016		return -ENOMEM;
   1017
   1018	bytenr -= BTRFS_I(reloc_inode)->index_cnt;
   1019	ret = btrfs_lookup_file_extent(NULL, root, path,
   1020			btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
   1021	if (ret < 0)
   1022		goto out;
   1023	if (ret > 0) {
   1024		ret = -ENOENT;
   1025		goto out;
   1026	}
   1027
   1028	leaf = path->nodes[0];
   1029	fi = btrfs_item_ptr(leaf, path->slots[0],
   1030			    struct btrfs_file_extent_item);
   1031
   1032	BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
   1033	       btrfs_file_extent_compression(leaf, fi) ||
   1034	       btrfs_file_extent_encryption(leaf, fi) ||
   1035	       btrfs_file_extent_other_encoding(leaf, fi));
   1036
   1037	if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
   1038		ret = -EINVAL;
   1039		goto out;
   1040	}
   1041
   1042	*new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
   1043	ret = 0;
   1044out:
   1045	btrfs_free_path(path);
   1046	return ret;
   1047}
   1048
   1049/*
   1050 * update file extent items in the tree leaf to point to
   1051 * the new locations.
   1052 */
   1053static noinline_for_stack
   1054int replace_file_extents(struct btrfs_trans_handle *trans,
   1055			 struct reloc_control *rc,
   1056			 struct btrfs_root *root,
   1057			 struct extent_buffer *leaf)
   1058{
   1059	struct btrfs_fs_info *fs_info = root->fs_info;
   1060	struct btrfs_key key;
   1061	struct btrfs_file_extent_item *fi;
   1062	struct inode *inode = NULL;
   1063	u64 parent;
   1064	u64 bytenr;
   1065	u64 new_bytenr = 0;
   1066	u64 num_bytes;
   1067	u64 end;
   1068	u32 nritems;
   1069	u32 i;
   1070	int ret = 0;
   1071	int first = 1;
   1072	int dirty = 0;
   1073
   1074	if (rc->stage != UPDATE_DATA_PTRS)
   1075		return 0;
   1076
   1077	/* reloc trees always use full backref */
   1078	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
   1079		parent = leaf->start;
   1080	else
   1081		parent = 0;
   1082
   1083	nritems = btrfs_header_nritems(leaf);
   1084	for (i = 0; i < nritems; i++) {
   1085		struct btrfs_ref ref = { 0 };
   1086
   1087		cond_resched();
   1088		btrfs_item_key_to_cpu(leaf, &key, i);
   1089		if (key.type != BTRFS_EXTENT_DATA_KEY)
   1090			continue;
   1091		fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
   1092		if (btrfs_file_extent_type(leaf, fi) ==
   1093		    BTRFS_FILE_EXTENT_INLINE)
   1094			continue;
   1095		bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
   1096		num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
   1097		if (bytenr == 0)
   1098			continue;
   1099		if (!in_range(bytenr, rc->block_group->start,
   1100			      rc->block_group->length))
   1101			continue;
   1102
   1103		/*
   1104		 * if we are modifying block in fs tree, wait for read_folio
   1105		 * to complete and drop the extent cache
   1106		 */
   1107		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
   1108			if (first) {
   1109				inode = find_next_inode(root, key.objectid);
   1110				first = 0;
   1111			} else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
   1112				btrfs_add_delayed_iput(inode);
   1113				inode = find_next_inode(root, key.objectid);
   1114			}
   1115			if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
   1116				end = key.offset +
   1117				      btrfs_file_extent_num_bytes(leaf, fi);
   1118				WARN_ON(!IS_ALIGNED(key.offset,
   1119						    fs_info->sectorsize));
   1120				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
   1121				end--;
   1122				ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
   1123						      key.offset, end);
   1124				if (!ret)
   1125					continue;
   1126
   1127				btrfs_drop_extent_cache(BTRFS_I(inode),
   1128						key.offset,	end, 1);
   1129				unlock_extent(&BTRFS_I(inode)->io_tree,
   1130					      key.offset, end);
   1131			}
   1132		}
   1133
   1134		ret = get_new_location(rc->data_inode, &new_bytenr,
   1135				       bytenr, num_bytes);
   1136		if (ret) {
   1137			/*
   1138			 * Don't have to abort since we've not changed anything
   1139			 * in the file extent yet.
   1140			 */
   1141			break;
   1142		}
   1143
   1144		btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
   1145		dirty = 1;
   1146
   1147		key.offset -= btrfs_file_extent_offset(leaf, fi);
   1148		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
   1149				       num_bytes, parent);
   1150		btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
   1151				    key.objectid, key.offset,
   1152				    root->root_key.objectid, false);
   1153		ret = btrfs_inc_extent_ref(trans, &ref);
   1154		if (ret) {
   1155			btrfs_abort_transaction(trans, ret);
   1156			break;
   1157		}
   1158
   1159		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
   1160				       num_bytes, parent);
   1161		btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
   1162				    key.objectid, key.offset,
   1163				    root->root_key.objectid, false);
   1164		ret = btrfs_free_extent(trans, &ref);
   1165		if (ret) {
   1166			btrfs_abort_transaction(trans, ret);
   1167			break;
   1168		}
   1169	}
   1170	if (dirty)
   1171		btrfs_mark_buffer_dirty(leaf);
   1172	if (inode)
   1173		btrfs_add_delayed_iput(inode);
   1174	return ret;
   1175}
   1176
   1177static noinline_for_stack
   1178int memcmp_node_keys(struct extent_buffer *eb, int slot,
   1179		     struct btrfs_path *path, int level)
   1180{
   1181	struct btrfs_disk_key key1;
   1182	struct btrfs_disk_key key2;
   1183	btrfs_node_key(eb, &key1, slot);
   1184	btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
   1185	return memcmp(&key1, &key2, sizeof(key1));
   1186}
   1187
   1188/*
   1189 * try to replace tree blocks in fs tree with the new blocks
   1190 * in reloc tree. tree blocks haven't been modified since the
   1191 * reloc tree was create can be replaced.
   1192 *
   1193 * if a block was replaced, level of the block + 1 is returned.
   1194 * if no block got replaced, 0 is returned. if there are other
   1195 * errors, a negative error number is returned.
   1196 */
   1197static noinline_for_stack
   1198int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
   1199		 struct btrfs_root *dest, struct btrfs_root *src,
   1200		 struct btrfs_path *path, struct btrfs_key *next_key,
   1201		 int lowest_level, int max_level)
   1202{
   1203	struct btrfs_fs_info *fs_info = dest->fs_info;
   1204	struct extent_buffer *eb;
   1205	struct extent_buffer *parent;
   1206	struct btrfs_ref ref = { 0 };
   1207	struct btrfs_key key;
   1208	u64 old_bytenr;
   1209	u64 new_bytenr;
   1210	u64 old_ptr_gen;
   1211	u64 new_ptr_gen;
   1212	u64 last_snapshot;
   1213	u32 blocksize;
   1214	int cow = 0;
   1215	int level;
   1216	int ret;
   1217	int slot;
   1218
   1219	ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
   1220	ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
   1221
   1222	last_snapshot = btrfs_root_last_snapshot(&src->root_item);
   1223again:
   1224	slot = path->slots[lowest_level];
   1225	btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
   1226
   1227	eb = btrfs_lock_root_node(dest);
   1228	level = btrfs_header_level(eb);
   1229
   1230	if (level < lowest_level) {
   1231		btrfs_tree_unlock(eb);
   1232		free_extent_buffer(eb);
   1233		return 0;
   1234	}
   1235
   1236	if (cow) {
   1237		ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
   1238				      BTRFS_NESTING_COW);
   1239		if (ret) {
   1240			btrfs_tree_unlock(eb);
   1241			free_extent_buffer(eb);
   1242			return ret;
   1243		}
   1244	}
   1245
   1246	if (next_key) {
   1247		next_key->objectid = (u64)-1;
   1248		next_key->type = (u8)-1;
   1249		next_key->offset = (u64)-1;
   1250	}
   1251
   1252	parent = eb;
   1253	while (1) {
   1254		level = btrfs_header_level(parent);
   1255		ASSERT(level >= lowest_level);
   1256
   1257		ret = btrfs_bin_search(parent, &key, &slot);
   1258		if (ret < 0)
   1259			break;
   1260		if (ret && slot > 0)
   1261			slot--;
   1262
   1263		if (next_key && slot + 1 < btrfs_header_nritems(parent))
   1264			btrfs_node_key_to_cpu(parent, next_key, slot + 1);
   1265
   1266		old_bytenr = btrfs_node_blockptr(parent, slot);
   1267		blocksize = fs_info->nodesize;
   1268		old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
   1269
   1270		if (level <= max_level) {
   1271			eb = path->nodes[level];
   1272			new_bytenr = btrfs_node_blockptr(eb,
   1273							path->slots[level]);
   1274			new_ptr_gen = btrfs_node_ptr_generation(eb,
   1275							path->slots[level]);
   1276		} else {
   1277			new_bytenr = 0;
   1278			new_ptr_gen = 0;
   1279		}
   1280
   1281		if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
   1282			ret = level;
   1283			break;
   1284		}
   1285
   1286		if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
   1287		    memcmp_node_keys(parent, slot, path, level)) {
   1288			if (level <= lowest_level) {
   1289				ret = 0;
   1290				break;
   1291			}
   1292
   1293			eb = btrfs_read_node_slot(parent, slot);
   1294			if (IS_ERR(eb)) {
   1295				ret = PTR_ERR(eb);
   1296				break;
   1297			}
   1298			btrfs_tree_lock(eb);
   1299			if (cow) {
   1300				ret = btrfs_cow_block(trans, dest, eb, parent,
   1301						      slot, &eb,
   1302						      BTRFS_NESTING_COW);
   1303				if (ret) {
   1304					btrfs_tree_unlock(eb);
   1305					free_extent_buffer(eb);
   1306					break;
   1307				}
   1308			}
   1309
   1310			btrfs_tree_unlock(parent);
   1311			free_extent_buffer(parent);
   1312
   1313			parent = eb;
   1314			continue;
   1315		}
   1316
   1317		if (!cow) {
   1318			btrfs_tree_unlock(parent);
   1319			free_extent_buffer(parent);
   1320			cow = 1;
   1321			goto again;
   1322		}
   1323
   1324		btrfs_node_key_to_cpu(path->nodes[level], &key,
   1325				      path->slots[level]);
   1326		btrfs_release_path(path);
   1327
   1328		path->lowest_level = level;
   1329		ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
   1330		path->lowest_level = 0;
   1331		if (ret) {
   1332			if (ret > 0)
   1333				ret = -ENOENT;
   1334			break;
   1335		}
   1336
   1337		/*
   1338		 * Info qgroup to trace both subtrees.
   1339		 *
   1340		 * We must trace both trees.
   1341		 * 1) Tree reloc subtree
   1342		 *    If not traced, we will leak data numbers
   1343		 * 2) Fs subtree
   1344		 *    If not traced, we will double count old data
   1345		 *
   1346		 * We don't scan the subtree right now, but only record
   1347		 * the swapped tree blocks.
   1348		 * The real subtree rescan is delayed until we have new
   1349		 * CoW on the subtree root node before transaction commit.
   1350		 */
   1351		ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
   1352				rc->block_group, parent, slot,
   1353				path->nodes[level], path->slots[level],
   1354				last_snapshot);
   1355		if (ret < 0)
   1356			break;
   1357		/*
   1358		 * swap blocks in fs tree and reloc tree.
   1359		 */
   1360		btrfs_set_node_blockptr(parent, slot, new_bytenr);
   1361		btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
   1362		btrfs_mark_buffer_dirty(parent);
   1363
   1364		btrfs_set_node_blockptr(path->nodes[level],
   1365					path->slots[level], old_bytenr);
   1366		btrfs_set_node_ptr_generation(path->nodes[level],
   1367					      path->slots[level], old_ptr_gen);
   1368		btrfs_mark_buffer_dirty(path->nodes[level]);
   1369
   1370		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
   1371				       blocksize, path->nodes[level]->start);
   1372		btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
   1373				    0, true);
   1374		ret = btrfs_inc_extent_ref(trans, &ref);
   1375		if (ret) {
   1376			btrfs_abort_transaction(trans, ret);
   1377			break;
   1378		}
   1379		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
   1380				       blocksize, 0);
   1381		btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
   1382				    true);
   1383		ret = btrfs_inc_extent_ref(trans, &ref);
   1384		if (ret) {
   1385			btrfs_abort_transaction(trans, ret);
   1386			break;
   1387		}
   1388
   1389		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
   1390				       blocksize, path->nodes[level]->start);
   1391		btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
   1392				    0, true);
   1393		ret = btrfs_free_extent(trans, &ref);
   1394		if (ret) {
   1395			btrfs_abort_transaction(trans, ret);
   1396			break;
   1397		}
   1398
   1399		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
   1400				       blocksize, 0);
   1401		btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
   1402				    0, true);
   1403		ret = btrfs_free_extent(trans, &ref);
   1404		if (ret) {
   1405			btrfs_abort_transaction(trans, ret);
   1406			break;
   1407		}
   1408
   1409		btrfs_unlock_up_safe(path, 0);
   1410
   1411		ret = level;
   1412		break;
   1413	}
   1414	btrfs_tree_unlock(parent);
   1415	free_extent_buffer(parent);
   1416	return ret;
   1417}
   1418
   1419/*
   1420 * helper to find next relocated block in reloc tree
   1421 */
   1422static noinline_for_stack
   1423int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
   1424		       int *level)
   1425{
   1426	struct extent_buffer *eb;
   1427	int i;
   1428	u64 last_snapshot;
   1429	u32 nritems;
   1430
   1431	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
   1432
   1433	for (i = 0; i < *level; i++) {
   1434		free_extent_buffer(path->nodes[i]);
   1435		path->nodes[i] = NULL;
   1436	}
   1437
   1438	for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
   1439		eb = path->nodes[i];
   1440		nritems = btrfs_header_nritems(eb);
   1441		while (path->slots[i] + 1 < nritems) {
   1442			path->slots[i]++;
   1443			if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
   1444			    last_snapshot)
   1445				continue;
   1446
   1447			*level = i;
   1448			return 0;
   1449		}
   1450		free_extent_buffer(path->nodes[i]);
   1451		path->nodes[i] = NULL;
   1452	}
   1453	return 1;
   1454}
   1455
   1456/*
   1457 * walk down reloc tree to find relocated block of lowest level
   1458 */
   1459static noinline_for_stack
   1460int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
   1461			 int *level)
   1462{
   1463	struct extent_buffer *eb = NULL;
   1464	int i;
   1465	u64 ptr_gen = 0;
   1466	u64 last_snapshot;
   1467	u32 nritems;
   1468
   1469	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
   1470
   1471	for (i = *level; i > 0; i--) {
   1472		eb = path->nodes[i];
   1473		nritems = btrfs_header_nritems(eb);
   1474		while (path->slots[i] < nritems) {
   1475			ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
   1476			if (ptr_gen > last_snapshot)
   1477				break;
   1478			path->slots[i]++;
   1479		}
   1480		if (path->slots[i] >= nritems) {
   1481			if (i == *level)
   1482				break;
   1483			*level = i + 1;
   1484			return 0;
   1485		}
   1486		if (i == 1) {
   1487			*level = i;
   1488			return 0;
   1489		}
   1490
   1491		eb = btrfs_read_node_slot(eb, path->slots[i]);
   1492		if (IS_ERR(eb))
   1493			return PTR_ERR(eb);
   1494		BUG_ON(btrfs_header_level(eb) != i - 1);
   1495		path->nodes[i - 1] = eb;
   1496		path->slots[i - 1] = 0;
   1497	}
   1498	return 1;
   1499}
   1500
   1501/*
   1502 * invalidate extent cache for file extents whose key in range of
   1503 * [min_key, max_key)
   1504 */
   1505static int invalidate_extent_cache(struct btrfs_root *root,
   1506				   struct btrfs_key *min_key,
   1507				   struct btrfs_key *max_key)
   1508{
   1509	struct btrfs_fs_info *fs_info = root->fs_info;
   1510	struct inode *inode = NULL;
   1511	u64 objectid;
   1512	u64 start, end;
   1513	u64 ino;
   1514
   1515	objectid = min_key->objectid;
   1516	while (1) {
   1517		cond_resched();
   1518		iput(inode);
   1519
   1520		if (objectid > max_key->objectid)
   1521			break;
   1522
   1523		inode = find_next_inode(root, objectid);
   1524		if (!inode)
   1525			break;
   1526		ino = btrfs_ino(BTRFS_I(inode));
   1527
   1528		if (ino > max_key->objectid) {
   1529			iput(inode);
   1530			break;
   1531		}
   1532
   1533		objectid = ino + 1;
   1534		if (!S_ISREG(inode->i_mode))
   1535			continue;
   1536
   1537		if (unlikely(min_key->objectid == ino)) {
   1538			if (min_key->type > BTRFS_EXTENT_DATA_KEY)
   1539				continue;
   1540			if (min_key->type < BTRFS_EXTENT_DATA_KEY)
   1541				start = 0;
   1542			else {
   1543				start = min_key->offset;
   1544				WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
   1545			}
   1546		} else {
   1547			start = 0;
   1548		}
   1549
   1550		if (unlikely(max_key->objectid == ino)) {
   1551			if (max_key->type < BTRFS_EXTENT_DATA_KEY)
   1552				continue;
   1553			if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
   1554				end = (u64)-1;
   1555			} else {
   1556				if (max_key->offset == 0)
   1557					continue;
   1558				end = max_key->offset;
   1559				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
   1560				end--;
   1561			}
   1562		} else {
   1563			end = (u64)-1;
   1564		}
   1565
   1566		/* the lock_extent waits for read_folio to complete */
   1567		lock_extent(&BTRFS_I(inode)->io_tree, start, end);
   1568		btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
   1569		unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
   1570	}
   1571	return 0;
   1572}
   1573
   1574static int find_next_key(struct btrfs_path *path, int level,
   1575			 struct btrfs_key *key)
   1576
   1577{
   1578	while (level < BTRFS_MAX_LEVEL) {
   1579		if (!path->nodes[level])
   1580			break;
   1581		if (path->slots[level] + 1 <
   1582		    btrfs_header_nritems(path->nodes[level])) {
   1583			btrfs_node_key_to_cpu(path->nodes[level], key,
   1584					      path->slots[level] + 1);
   1585			return 0;
   1586		}
   1587		level++;
   1588	}
   1589	return 1;
   1590}
   1591
   1592/*
   1593 * Insert current subvolume into reloc_control::dirty_subvol_roots
   1594 */
   1595static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
   1596			       struct reloc_control *rc,
   1597			       struct btrfs_root *root)
   1598{
   1599	struct btrfs_root *reloc_root = root->reloc_root;
   1600	struct btrfs_root_item *reloc_root_item;
   1601	int ret;
   1602
   1603	/* @root must be a subvolume tree root with a valid reloc tree */
   1604	ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
   1605	ASSERT(reloc_root);
   1606
   1607	reloc_root_item = &reloc_root->root_item;
   1608	memset(&reloc_root_item->drop_progress, 0,
   1609		sizeof(reloc_root_item->drop_progress));
   1610	btrfs_set_root_drop_level(reloc_root_item, 0);
   1611	btrfs_set_root_refs(reloc_root_item, 0);
   1612	ret = btrfs_update_reloc_root(trans, root);
   1613	if (ret)
   1614		return ret;
   1615
   1616	if (list_empty(&root->reloc_dirty_list)) {
   1617		btrfs_grab_root(root);
   1618		list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
   1619	}
   1620
   1621	return 0;
   1622}
   1623
   1624static int clean_dirty_subvols(struct reloc_control *rc)
   1625{
   1626	struct btrfs_root *root;
   1627	struct btrfs_root *next;
   1628	int ret = 0;
   1629	int ret2;
   1630
   1631	list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
   1632				 reloc_dirty_list) {
   1633		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
   1634			/* Merged subvolume, cleanup its reloc root */
   1635			struct btrfs_root *reloc_root = root->reloc_root;
   1636
   1637			list_del_init(&root->reloc_dirty_list);
   1638			root->reloc_root = NULL;
   1639			/*
   1640			 * Need barrier to ensure clear_bit() only happens after
   1641			 * root->reloc_root = NULL. Pairs with have_reloc_root.
   1642			 */
   1643			smp_wmb();
   1644			clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
   1645			if (reloc_root) {
   1646				/*
   1647				 * btrfs_drop_snapshot drops our ref we hold for
   1648				 * ->reloc_root.  If it fails however we must
   1649				 * drop the ref ourselves.
   1650				 */
   1651				ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
   1652				if (ret2 < 0) {
   1653					btrfs_put_root(reloc_root);
   1654					if (!ret)
   1655						ret = ret2;
   1656				}
   1657			}
   1658			btrfs_put_root(root);
   1659		} else {
   1660			/* Orphan reloc tree, just clean it up */
   1661			ret2 = btrfs_drop_snapshot(root, 0, 1);
   1662			if (ret2 < 0) {
   1663				btrfs_put_root(root);
   1664				if (!ret)
   1665					ret = ret2;
   1666			}
   1667		}
   1668	}
   1669	return ret;
   1670}
   1671
   1672/*
   1673 * merge the relocated tree blocks in reloc tree with corresponding
   1674 * fs tree.
   1675 */
   1676static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
   1677					       struct btrfs_root *root)
   1678{
   1679	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
   1680	struct btrfs_key key;
   1681	struct btrfs_key next_key;
   1682	struct btrfs_trans_handle *trans = NULL;
   1683	struct btrfs_root *reloc_root;
   1684	struct btrfs_root_item *root_item;
   1685	struct btrfs_path *path;
   1686	struct extent_buffer *leaf;
   1687	int reserve_level;
   1688	int level;
   1689	int max_level;
   1690	int replaced = 0;
   1691	int ret = 0;
   1692	u32 min_reserved;
   1693
   1694	path = btrfs_alloc_path();
   1695	if (!path)
   1696		return -ENOMEM;
   1697	path->reada = READA_FORWARD;
   1698
   1699	reloc_root = root->reloc_root;
   1700	root_item = &reloc_root->root_item;
   1701
   1702	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
   1703		level = btrfs_root_level(root_item);
   1704		atomic_inc(&reloc_root->node->refs);
   1705		path->nodes[level] = reloc_root->node;
   1706		path->slots[level] = 0;
   1707	} else {
   1708		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
   1709
   1710		level = btrfs_root_drop_level(root_item);
   1711		BUG_ON(level == 0);
   1712		path->lowest_level = level;
   1713		ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
   1714		path->lowest_level = 0;
   1715		if (ret < 0) {
   1716			btrfs_free_path(path);
   1717			return ret;
   1718		}
   1719
   1720		btrfs_node_key_to_cpu(path->nodes[level], &next_key,
   1721				      path->slots[level]);
   1722		WARN_ON(memcmp(&key, &next_key, sizeof(key)));
   1723
   1724		btrfs_unlock_up_safe(path, 0);
   1725	}
   1726
   1727	/*
   1728	 * In merge_reloc_root(), we modify the upper level pointer to swap the
   1729	 * tree blocks between reloc tree and subvolume tree.  Thus for tree
   1730	 * block COW, we COW at most from level 1 to root level for each tree.
   1731	 *
   1732	 * Thus the needed metadata size is at most root_level * nodesize,
   1733	 * and * 2 since we have two trees to COW.
   1734	 */
   1735	reserve_level = max_t(int, 1, btrfs_root_level(root_item));
   1736	min_reserved = fs_info->nodesize * reserve_level * 2;
   1737	memset(&next_key, 0, sizeof(next_key));
   1738
   1739	while (1) {
   1740		ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
   1741					     min_reserved,
   1742					     BTRFS_RESERVE_FLUSH_LIMIT);
   1743		if (ret)
   1744			goto out;
   1745		trans = btrfs_start_transaction(root, 0);
   1746		if (IS_ERR(trans)) {
   1747			ret = PTR_ERR(trans);
   1748			trans = NULL;
   1749			goto out;
   1750		}
   1751
   1752		/*
   1753		 * At this point we no longer have a reloc_control, so we can't
   1754		 * depend on btrfs_init_reloc_root to update our last_trans.
   1755		 *
   1756		 * But that's ok, we started the trans handle on our
   1757		 * corresponding fs_root, which means it's been added to the
   1758		 * dirty list.  At commit time we'll still call
   1759		 * btrfs_update_reloc_root() and update our root item
   1760		 * appropriately.
   1761		 */
   1762		reloc_root->last_trans = trans->transid;
   1763		trans->block_rsv = rc->block_rsv;
   1764
   1765		replaced = 0;
   1766		max_level = level;
   1767
   1768		ret = walk_down_reloc_tree(reloc_root, path, &level);
   1769		if (ret < 0)
   1770			goto out;
   1771		if (ret > 0)
   1772			break;
   1773
   1774		if (!find_next_key(path, level, &key) &&
   1775		    btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
   1776			ret = 0;
   1777		} else {
   1778			ret = replace_path(trans, rc, root, reloc_root, path,
   1779					   &next_key, level, max_level);
   1780		}
   1781		if (ret < 0)
   1782			goto out;
   1783		if (ret > 0) {
   1784			level = ret;
   1785			btrfs_node_key_to_cpu(path->nodes[level], &key,
   1786					      path->slots[level]);
   1787			replaced = 1;
   1788		}
   1789
   1790		ret = walk_up_reloc_tree(reloc_root, path, &level);
   1791		if (ret > 0)
   1792			break;
   1793
   1794		BUG_ON(level == 0);
   1795		/*
   1796		 * save the merging progress in the drop_progress.
   1797		 * this is OK since root refs == 1 in this case.
   1798		 */
   1799		btrfs_node_key(path->nodes[level], &root_item->drop_progress,
   1800			       path->slots[level]);
   1801		btrfs_set_root_drop_level(root_item, level);
   1802
   1803		btrfs_end_transaction_throttle(trans);
   1804		trans = NULL;
   1805
   1806		btrfs_btree_balance_dirty(fs_info);
   1807
   1808		if (replaced && rc->stage == UPDATE_DATA_PTRS)
   1809			invalidate_extent_cache(root, &key, &next_key);
   1810	}
   1811
   1812	/*
   1813	 * handle the case only one block in the fs tree need to be
   1814	 * relocated and the block is tree root.
   1815	 */
   1816	leaf = btrfs_lock_root_node(root);
   1817	ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
   1818			      BTRFS_NESTING_COW);
   1819	btrfs_tree_unlock(leaf);
   1820	free_extent_buffer(leaf);
   1821out:
   1822	btrfs_free_path(path);
   1823
   1824	if (ret == 0) {
   1825		ret = insert_dirty_subvol(trans, rc, root);
   1826		if (ret)
   1827			btrfs_abort_transaction(trans, ret);
   1828	}
   1829
   1830	if (trans)
   1831		btrfs_end_transaction_throttle(trans);
   1832
   1833	btrfs_btree_balance_dirty(fs_info);
   1834
   1835	if (replaced && rc->stage == UPDATE_DATA_PTRS)
   1836		invalidate_extent_cache(root, &key, &next_key);
   1837
   1838	return ret;
   1839}
   1840
   1841static noinline_for_stack
   1842int prepare_to_merge(struct reloc_control *rc, int err)
   1843{
   1844	struct btrfs_root *root = rc->extent_root;
   1845	struct btrfs_fs_info *fs_info = root->fs_info;
   1846	struct btrfs_root *reloc_root;
   1847	struct btrfs_trans_handle *trans;
   1848	LIST_HEAD(reloc_roots);
   1849	u64 num_bytes = 0;
   1850	int ret;
   1851
   1852	mutex_lock(&fs_info->reloc_mutex);
   1853	rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
   1854	rc->merging_rsv_size += rc->nodes_relocated * 2;
   1855	mutex_unlock(&fs_info->reloc_mutex);
   1856
   1857again:
   1858	if (!err) {
   1859		num_bytes = rc->merging_rsv_size;
   1860		ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
   1861					  BTRFS_RESERVE_FLUSH_ALL);
   1862		if (ret)
   1863			err = ret;
   1864	}
   1865
   1866	trans = btrfs_join_transaction(rc->extent_root);
   1867	if (IS_ERR(trans)) {
   1868		if (!err)
   1869			btrfs_block_rsv_release(fs_info, rc->block_rsv,
   1870						num_bytes, NULL);
   1871		return PTR_ERR(trans);
   1872	}
   1873
   1874	if (!err) {
   1875		if (num_bytes != rc->merging_rsv_size) {
   1876			btrfs_end_transaction(trans);
   1877			btrfs_block_rsv_release(fs_info, rc->block_rsv,
   1878						num_bytes, NULL);
   1879			goto again;
   1880		}
   1881	}
   1882
   1883	rc->merge_reloc_tree = 1;
   1884
   1885	while (!list_empty(&rc->reloc_roots)) {
   1886		reloc_root = list_entry(rc->reloc_roots.next,
   1887					struct btrfs_root, root_list);
   1888		list_del_init(&reloc_root->root_list);
   1889
   1890		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
   1891				false);
   1892		if (IS_ERR(root)) {
   1893			/*
   1894			 * Even if we have an error we need this reloc root
   1895			 * back on our list so we can clean up properly.
   1896			 */
   1897			list_add(&reloc_root->root_list, &reloc_roots);
   1898			btrfs_abort_transaction(trans, (int)PTR_ERR(root));
   1899			if (!err)
   1900				err = PTR_ERR(root);
   1901			break;
   1902		}
   1903		ASSERT(root->reloc_root == reloc_root);
   1904
   1905		/*
   1906		 * set reference count to 1, so btrfs_recover_relocation
   1907		 * knows it should resumes merging
   1908		 */
   1909		if (!err)
   1910			btrfs_set_root_refs(&reloc_root->root_item, 1);
   1911		ret = btrfs_update_reloc_root(trans, root);
   1912
   1913		/*
   1914		 * Even if we have an error we need this reloc root back on our
   1915		 * list so we can clean up properly.
   1916		 */
   1917		list_add(&reloc_root->root_list, &reloc_roots);
   1918		btrfs_put_root(root);
   1919
   1920		if (ret) {
   1921			btrfs_abort_transaction(trans, ret);
   1922			if (!err)
   1923				err = ret;
   1924			break;
   1925		}
   1926	}
   1927
   1928	list_splice(&reloc_roots, &rc->reloc_roots);
   1929
   1930	if (!err)
   1931		err = btrfs_commit_transaction(trans);
   1932	else
   1933		btrfs_end_transaction(trans);
   1934	return err;
   1935}
   1936
   1937static noinline_for_stack
   1938void free_reloc_roots(struct list_head *list)
   1939{
   1940	struct btrfs_root *reloc_root, *tmp;
   1941
   1942	list_for_each_entry_safe(reloc_root, tmp, list, root_list)
   1943		__del_reloc_root(reloc_root);
   1944}
   1945
   1946static noinline_for_stack
   1947void merge_reloc_roots(struct reloc_control *rc)
   1948{
   1949	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
   1950	struct btrfs_root *root;
   1951	struct btrfs_root *reloc_root;
   1952	LIST_HEAD(reloc_roots);
   1953	int found = 0;
   1954	int ret = 0;
   1955again:
   1956	root = rc->extent_root;
   1957
   1958	/*
   1959	 * this serializes us with btrfs_record_root_in_transaction,
   1960	 * we have to make sure nobody is in the middle of
   1961	 * adding their roots to the list while we are
   1962	 * doing this splice
   1963	 */
   1964	mutex_lock(&fs_info->reloc_mutex);
   1965	list_splice_init(&rc->reloc_roots, &reloc_roots);
   1966	mutex_unlock(&fs_info->reloc_mutex);
   1967
   1968	while (!list_empty(&reloc_roots)) {
   1969		found = 1;
   1970		reloc_root = list_entry(reloc_roots.next,
   1971					struct btrfs_root, root_list);
   1972
   1973		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
   1974					 false);
   1975		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
   1976			if (IS_ERR(root)) {
   1977				/*
   1978				 * For recovery we read the fs roots on mount,
   1979				 * and if we didn't find the root then we marked
   1980				 * the reloc root as a garbage root.  For normal
   1981				 * relocation obviously the root should exist in
   1982				 * memory.  However there's no reason we can't
   1983				 * handle the error properly here just in case.
   1984				 */
   1985				ASSERT(0);
   1986				ret = PTR_ERR(root);
   1987				goto out;
   1988			}
   1989			if (root->reloc_root != reloc_root) {
   1990				/*
   1991				 * This is actually impossible without something
   1992				 * going really wrong (like weird race condition
   1993				 * or cosmic rays).
   1994				 */
   1995				ASSERT(0);
   1996				ret = -EINVAL;
   1997				goto out;
   1998			}
   1999			ret = merge_reloc_root(rc, root);
   2000			btrfs_put_root(root);
   2001			if (ret) {
   2002				if (list_empty(&reloc_root->root_list))
   2003					list_add_tail(&reloc_root->root_list,
   2004						      &reloc_roots);
   2005				goto out;
   2006			}
   2007		} else {
   2008			if (!IS_ERR(root)) {
   2009				if (root->reloc_root == reloc_root) {
   2010					root->reloc_root = NULL;
   2011					btrfs_put_root(reloc_root);
   2012				}
   2013				clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
   2014					  &root->state);
   2015				btrfs_put_root(root);
   2016			}
   2017
   2018			list_del_init(&reloc_root->root_list);
   2019			/* Don't forget to queue this reloc root for cleanup */
   2020			list_add_tail(&reloc_root->reloc_dirty_list,
   2021				      &rc->dirty_subvol_roots);
   2022		}
   2023	}
   2024
   2025	if (found) {
   2026		found = 0;
   2027		goto again;
   2028	}
   2029out:
   2030	if (ret) {
   2031		btrfs_handle_fs_error(fs_info, ret, NULL);
   2032		free_reloc_roots(&reloc_roots);
   2033
   2034		/* new reloc root may be added */
   2035		mutex_lock(&fs_info->reloc_mutex);
   2036		list_splice_init(&rc->reloc_roots, &reloc_roots);
   2037		mutex_unlock(&fs_info->reloc_mutex);
   2038		free_reloc_roots(&reloc_roots);
   2039	}
   2040
   2041	/*
   2042	 * We used to have
   2043	 *
   2044	 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
   2045	 *
   2046	 * here, but it's wrong.  If we fail to start the transaction in
   2047	 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
   2048	 * have actually been removed from the reloc_root_tree rb tree.  This is
   2049	 * fine because we're bailing here, and we hold a reference on the root
   2050	 * for the list that holds it, so these roots will be cleaned up when we
   2051	 * do the reloc_dirty_list afterwards.  Meanwhile the root->reloc_root
   2052	 * will be cleaned up on unmount.
   2053	 *
   2054	 * The remaining nodes will be cleaned up by free_reloc_control.
   2055	 */
   2056}
   2057
   2058static void free_block_list(struct rb_root *blocks)
   2059{
   2060	struct tree_block *block;
   2061	struct rb_node *rb_node;
   2062	while ((rb_node = rb_first(blocks))) {
   2063		block = rb_entry(rb_node, struct tree_block, rb_node);
   2064		rb_erase(rb_node, blocks);
   2065		kfree(block);
   2066	}
   2067}
   2068
   2069static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
   2070				      struct btrfs_root *reloc_root)
   2071{
   2072	struct btrfs_fs_info *fs_info = reloc_root->fs_info;
   2073	struct btrfs_root *root;
   2074	int ret;
   2075
   2076	if (reloc_root->last_trans == trans->transid)
   2077		return 0;
   2078
   2079	root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
   2080
   2081	/*
   2082	 * This should succeed, since we can't have a reloc root without having
   2083	 * already looked up the actual root and created the reloc root for this
   2084	 * root.
   2085	 *
   2086	 * However if there's some sort of corruption where we have a ref to a
   2087	 * reloc root without a corresponding root this could return ENOENT.
   2088	 */
   2089	if (IS_ERR(root)) {
   2090		ASSERT(0);
   2091		return PTR_ERR(root);
   2092	}
   2093	if (root->reloc_root != reloc_root) {
   2094		ASSERT(0);
   2095		btrfs_err(fs_info,
   2096			  "root %llu has two reloc roots associated with it",
   2097			  reloc_root->root_key.offset);
   2098		btrfs_put_root(root);
   2099		return -EUCLEAN;
   2100	}
   2101	ret = btrfs_record_root_in_trans(trans, root);
   2102	btrfs_put_root(root);
   2103
   2104	return ret;
   2105}
   2106
   2107static noinline_for_stack
   2108struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
   2109				     struct reloc_control *rc,
   2110				     struct btrfs_backref_node *node,
   2111				     struct btrfs_backref_edge *edges[])
   2112{
   2113	struct btrfs_backref_node *next;
   2114	struct btrfs_root *root;
   2115	int index = 0;
   2116	int ret;
   2117
   2118	next = node;
   2119	while (1) {
   2120		cond_resched();
   2121		next = walk_up_backref(next, edges, &index);
   2122		root = next->root;
   2123
   2124		/*
   2125		 * If there is no root, then our references for this block are
   2126		 * incomplete, as we should be able to walk all the way up to a
   2127		 * block that is owned by a root.
   2128		 *
   2129		 * This path is only for SHAREABLE roots, so if we come upon a
   2130		 * non-SHAREABLE root then we have backrefs that resolve
   2131		 * improperly.
   2132		 *
   2133		 * Both of these cases indicate file system corruption, or a bug
   2134		 * in the backref walking code.
   2135		 */
   2136		if (!root) {
   2137			ASSERT(0);
   2138			btrfs_err(trans->fs_info,
   2139		"bytenr %llu doesn't have a backref path ending in a root",
   2140				  node->bytenr);
   2141			return ERR_PTR(-EUCLEAN);
   2142		}
   2143		if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
   2144			ASSERT(0);
   2145			btrfs_err(trans->fs_info,
   2146	"bytenr %llu has multiple refs with one ending in a non-shareable root",
   2147				  node->bytenr);
   2148			return ERR_PTR(-EUCLEAN);
   2149		}
   2150
   2151		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
   2152			ret = record_reloc_root_in_trans(trans, root);
   2153			if (ret)
   2154				return ERR_PTR(ret);
   2155			break;
   2156		}
   2157
   2158		ret = btrfs_record_root_in_trans(trans, root);
   2159		if (ret)
   2160			return ERR_PTR(ret);
   2161		root = root->reloc_root;
   2162
   2163		/*
   2164		 * We could have raced with another thread which failed, so
   2165		 * root->reloc_root may not be set, return ENOENT in this case.
   2166		 */
   2167		if (!root)
   2168			return ERR_PTR(-ENOENT);
   2169
   2170		if (next->new_bytenr != root->node->start) {
   2171			/*
   2172			 * We just created the reloc root, so we shouldn't have
   2173			 * ->new_bytenr set and this shouldn't be in the changed
   2174			 *  list.  If it is then we have multiple roots pointing
   2175			 *  at the same bytenr which indicates corruption, or
   2176			 *  we've made a mistake in the backref walking code.
   2177			 */
   2178			ASSERT(next->new_bytenr == 0);
   2179			ASSERT(list_empty(&next->list));
   2180			if (next->new_bytenr || !list_empty(&next->list)) {
   2181				btrfs_err(trans->fs_info,
   2182	"bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
   2183					  node->bytenr, next->bytenr);
   2184				return ERR_PTR(-EUCLEAN);
   2185			}
   2186
   2187			next->new_bytenr = root->node->start;
   2188			btrfs_put_root(next->root);
   2189			next->root = btrfs_grab_root(root);
   2190			ASSERT(next->root);
   2191			list_add_tail(&next->list,
   2192				      &rc->backref_cache.changed);
   2193			mark_block_processed(rc, next);
   2194			break;
   2195		}
   2196
   2197		WARN_ON(1);
   2198		root = NULL;
   2199		next = walk_down_backref(edges, &index);
   2200		if (!next || next->level <= node->level)
   2201			break;
   2202	}
   2203	if (!root) {
   2204		/*
   2205		 * This can happen if there's fs corruption or if there's a bug
   2206		 * in the backref lookup code.
   2207		 */
   2208		ASSERT(0);
   2209		return ERR_PTR(-ENOENT);
   2210	}
   2211
   2212	next = node;
   2213	/* setup backref node path for btrfs_reloc_cow_block */
   2214	while (1) {
   2215		rc->backref_cache.path[next->level] = next;
   2216		if (--index < 0)
   2217			break;
   2218		next = edges[index]->node[UPPER];
   2219	}
   2220	return root;
   2221}
   2222
   2223/*
   2224 * Select a tree root for relocation.
   2225 *
   2226 * Return NULL if the block is not shareable. We should use do_relocation() in
   2227 * this case.
   2228 *
   2229 * Return a tree root pointer if the block is shareable.
   2230 * Return -ENOENT if the block is root of reloc tree.
   2231 */
   2232static noinline_for_stack
   2233struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
   2234{
   2235	struct btrfs_backref_node *next;
   2236	struct btrfs_root *root;
   2237	struct btrfs_root *fs_root = NULL;
   2238	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
   2239	int index = 0;
   2240
   2241	next = node;
   2242	while (1) {
   2243		cond_resched();
   2244		next = walk_up_backref(next, edges, &index);
   2245		root = next->root;
   2246
   2247		/*
   2248		 * This can occur if we have incomplete extent refs leading all
   2249		 * the way up a particular path, in this case return -EUCLEAN.
   2250		 */
   2251		if (!root)
   2252			return ERR_PTR(-EUCLEAN);
   2253
   2254		/* No other choice for non-shareable tree */
   2255		if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
   2256			return root;
   2257
   2258		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
   2259			fs_root = root;
   2260
   2261		if (next != node)
   2262			return NULL;
   2263
   2264		next = walk_down_backref(edges, &index);
   2265		if (!next || next->level <= node->level)
   2266			break;
   2267	}
   2268
   2269	if (!fs_root)
   2270		return ERR_PTR(-ENOENT);
   2271	return fs_root;
   2272}
   2273
   2274static noinline_for_stack
   2275u64 calcu_metadata_size(struct reloc_control *rc,
   2276			struct btrfs_backref_node *node, int reserve)
   2277{
   2278	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
   2279	struct btrfs_backref_node *next = node;
   2280	struct btrfs_backref_edge *edge;
   2281	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
   2282	u64 num_bytes = 0;
   2283	int index = 0;
   2284
   2285	BUG_ON(reserve && node->processed);
   2286
   2287	while (next) {
   2288		cond_resched();
   2289		while (1) {
   2290			if (next->processed && (reserve || next != node))
   2291				break;
   2292
   2293			num_bytes += fs_info->nodesize;
   2294
   2295			if (list_empty(&next->upper))
   2296				break;
   2297
   2298			edge = list_entry(next->upper.next,
   2299					struct btrfs_backref_edge, list[LOWER]);
   2300			edges[index++] = edge;
   2301			next = edge->node[UPPER];
   2302		}
   2303		next = walk_down_backref(edges, &index);
   2304	}
   2305	return num_bytes;
   2306}
   2307
   2308static int reserve_metadata_space(struct btrfs_trans_handle *trans,
   2309				  struct reloc_control *rc,
   2310				  struct btrfs_backref_node *node)
   2311{
   2312	struct btrfs_root *root = rc->extent_root;
   2313	struct btrfs_fs_info *fs_info = root->fs_info;
   2314	u64 num_bytes;
   2315	int ret;
   2316	u64 tmp;
   2317
   2318	num_bytes = calcu_metadata_size(rc, node, 1) * 2;
   2319
   2320	trans->block_rsv = rc->block_rsv;
   2321	rc->reserved_bytes += num_bytes;
   2322
   2323	/*
   2324	 * We are under a transaction here so we can only do limited flushing.
   2325	 * If we get an enospc just kick back -EAGAIN so we know to drop the
   2326	 * transaction and try to refill when we can flush all the things.
   2327	 */
   2328	ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
   2329				     BTRFS_RESERVE_FLUSH_LIMIT);
   2330	if (ret) {
   2331		tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
   2332		while (tmp <= rc->reserved_bytes)
   2333			tmp <<= 1;
   2334		/*
   2335		 * only one thread can access block_rsv at this point,
   2336		 * so we don't need hold lock to protect block_rsv.
   2337		 * we expand more reservation size here to allow enough
   2338		 * space for relocation and we will return earlier in
   2339		 * enospc case.
   2340		 */
   2341		rc->block_rsv->size = tmp + fs_info->nodesize *
   2342				      RELOCATION_RESERVED_NODES;
   2343		return -EAGAIN;
   2344	}
   2345
   2346	return 0;
   2347}
   2348
   2349/*
   2350 * relocate a block tree, and then update pointers in upper level
   2351 * blocks that reference the block to point to the new location.
   2352 *
   2353 * if called by link_to_upper, the block has already been relocated.
   2354 * in that case this function just updates pointers.
   2355 */
   2356static int do_relocation(struct btrfs_trans_handle *trans,
   2357			 struct reloc_control *rc,
   2358			 struct btrfs_backref_node *node,
   2359			 struct btrfs_key *key,
   2360			 struct btrfs_path *path, int lowest)
   2361{
   2362	struct btrfs_backref_node *upper;
   2363	struct btrfs_backref_edge *edge;
   2364	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
   2365	struct btrfs_root *root;
   2366	struct extent_buffer *eb;
   2367	u32 blocksize;
   2368	u64 bytenr;
   2369	int slot;
   2370	int ret = 0;
   2371
   2372	/*
   2373	 * If we are lowest then this is the first time we're processing this
   2374	 * block, and thus shouldn't have an eb associated with it yet.
   2375	 */
   2376	ASSERT(!lowest || !node->eb);
   2377
   2378	path->lowest_level = node->level + 1;
   2379	rc->backref_cache.path[node->level] = node;
   2380	list_for_each_entry(edge, &node->upper, list[LOWER]) {
   2381		struct btrfs_ref ref = { 0 };
   2382
   2383		cond_resched();
   2384
   2385		upper = edge->node[UPPER];
   2386		root = select_reloc_root(trans, rc, upper, edges);
   2387		if (IS_ERR(root)) {
   2388			ret = PTR_ERR(root);
   2389			goto next;
   2390		}
   2391
   2392		if (upper->eb && !upper->locked) {
   2393			if (!lowest) {
   2394				ret = btrfs_bin_search(upper->eb, key, &slot);
   2395				if (ret < 0)
   2396					goto next;
   2397				BUG_ON(ret);
   2398				bytenr = btrfs_node_blockptr(upper->eb, slot);
   2399				if (node->eb->start == bytenr)
   2400					goto next;
   2401			}
   2402			btrfs_backref_drop_node_buffer(upper);
   2403		}
   2404
   2405		if (!upper->eb) {
   2406			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
   2407			if (ret) {
   2408				if (ret > 0)
   2409					ret = -ENOENT;
   2410
   2411				btrfs_release_path(path);
   2412				break;
   2413			}
   2414
   2415			if (!upper->eb) {
   2416				upper->eb = path->nodes[upper->level];
   2417				path->nodes[upper->level] = NULL;
   2418			} else {
   2419				BUG_ON(upper->eb != path->nodes[upper->level]);
   2420			}
   2421
   2422			upper->locked = 1;
   2423			path->locks[upper->level] = 0;
   2424
   2425			slot = path->slots[upper->level];
   2426			btrfs_release_path(path);
   2427		} else {
   2428			ret = btrfs_bin_search(upper->eb, key, &slot);
   2429			if (ret < 0)
   2430				goto next;
   2431			BUG_ON(ret);
   2432		}
   2433
   2434		bytenr = btrfs_node_blockptr(upper->eb, slot);
   2435		if (lowest) {
   2436			if (bytenr != node->bytenr) {
   2437				btrfs_err(root->fs_info,
   2438		"lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
   2439					  bytenr, node->bytenr, slot,
   2440					  upper->eb->start);
   2441				ret = -EIO;
   2442				goto next;
   2443			}
   2444		} else {
   2445			if (node->eb->start == bytenr)
   2446				goto next;
   2447		}
   2448
   2449		blocksize = root->fs_info->nodesize;
   2450		eb = btrfs_read_node_slot(upper->eb, slot);
   2451		if (IS_ERR(eb)) {
   2452			ret = PTR_ERR(eb);
   2453			goto next;
   2454		}
   2455		btrfs_tree_lock(eb);
   2456
   2457		if (!node->eb) {
   2458			ret = btrfs_cow_block(trans, root, eb, upper->eb,
   2459					      slot, &eb, BTRFS_NESTING_COW);
   2460			btrfs_tree_unlock(eb);
   2461			free_extent_buffer(eb);
   2462			if (ret < 0)
   2463				goto next;
   2464			/*
   2465			 * We've just COWed this block, it should have updated
   2466			 * the correct backref node entry.
   2467			 */
   2468			ASSERT(node->eb == eb);
   2469		} else {
   2470			btrfs_set_node_blockptr(upper->eb, slot,
   2471						node->eb->start);
   2472			btrfs_set_node_ptr_generation(upper->eb, slot,
   2473						      trans->transid);
   2474			btrfs_mark_buffer_dirty(upper->eb);
   2475
   2476			btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
   2477					       node->eb->start, blocksize,
   2478					       upper->eb->start);
   2479			btrfs_init_tree_ref(&ref, node->level,
   2480					    btrfs_header_owner(upper->eb),
   2481					    root->root_key.objectid, false);
   2482			ret = btrfs_inc_extent_ref(trans, &ref);
   2483			if (!ret)
   2484				ret = btrfs_drop_subtree(trans, root, eb,
   2485							 upper->eb);
   2486			if (ret)
   2487				btrfs_abort_transaction(trans, ret);
   2488		}
   2489next:
   2490		if (!upper->pending)
   2491			btrfs_backref_drop_node_buffer(upper);
   2492		else
   2493			btrfs_backref_unlock_node_buffer(upper);
   2494		if (ret)
   2495			break;
   2496	}
   2497
   2498	if (!ret && node->pending) {
   2499		btrfs_backref_drop_node_buffer(node);
   2500		list_move_tail(&node->list, &rc->backref_cache.changed);
   2501		node->pending = 0;
   2502	}
   2503
   2504	path->lowest_level = 0;
   2505
   2506	/*
   2507	 * We should have allocated all of our space in the block rsv and thus
   2508	 * shouldn't ENOSPC.
   2509	 */
   2510	ASSERT(ret != -ENOSPC);
   2511	return ret;
   2512}
   2513
   2514static int link_to_upper(struct btrfs_trans_handle *trans,
   2515			 struct reloc_control *rc,
   2516			 struct btrfs_backref_node *node,
   2517			 struct btrfs_path *path)
   2518{
   2519	struct btrfs_key key;
   2520
   2521	btrfs_node_key_to_cpu(node->eb, &key, 0);
   2522	return do_relocation(trans, rc, node, &key, path, 0);
   2523}
   2524
   2525static int finish_pending_nodes(struct btrfs_trans_handle *trans,
   2526				struct reloc_control *rc,
   2527				struct btrfs_path *path, int err)
   2528{
   2529	LIST_HEAD(list);
   2530	struct btrfs_backref_cache *cache = &rc->backref_cache;
   2531	struct btrfs_backref_node *node;
   2532	int level;
   2533	int ret;
   2534
   2535	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
   2536		while (!list_empty(&cache->pending[level])) {
   2537			node = list_entry(cache->pending[level].next,
   2538					  struct btrfs_backref_node, list);
   2539			list_move_tail(&node->list, &list);
   2540			BUG_ON(!node->pending);
   2541
   2542			if (!err) {
   2543				ret = link_to_upper(trans, rc, node, path);
   2544				if (ret < 0)
   2545					err = ret;
   2546			}
   2547		}
   2548		list_splice_init(&list, &cache->pending[level]);
   2549	}
   2550	return err;
   2551}
   2552
   2553/*
   2554 * mark a block and all blocks directly/indirectly reference the block
   2555 * as processed.
   2556 */
   2557static void update_processed_blocks(struct reloc_control *rc,
   2558				    struct btrfs_backref_node *node)
   2559{
   2560	struct btrfs_backref_node *next = node;
   2561	struct btrfs_backref_edge *edge;
   2562	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
   2563	int index = 0;
   2564
   2565	while (next) {
   2566		cond_resched();
   2567		while (1) {
   2568			if (next->processed)
   2569				break;
   2570
   2571			mark_block_processed(rc, next);
   2572
   2573			if (list_empty(&next->upper))
   2574				break;
   2575
   2576			edge = list_entry(next->upper.next,
   2577					struct btrfs_backref_edge, list[LOWER]);
   2578			edges[index++] = edge;
   2579			next = edge->node[UPPER];
   2580		}
   2581		next = walk_down_backref(edges, &index);
   2582	}
   2583}
   2584
   2585static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
   2586{
   2587	u32 blocksize = rc->extent_root->fs_info->nodesize;
   2588
   2589	if (test_range_bit(&rc->processed_blocks, bytenr,
   2590			   bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
   2591		return 1;
   2592	return 0;
   2593}
   2594
   2595static int get_tree_block_key(struct btrfs_fs_info *fs_info,
   2596			      struct tree_block *block)
   2597{
   2598	struct extent_buffer *eb;
   2599
   2600	eb = read_tree_block(fs_info, block->bytenr, block->owner,
   2601			     block->key.offset, block->level, NULL);
   2602	if (IS_ERR(eb))
   2603		return PTR_ERR(eb);
   2604	if (!extent_buffer_uptodate(eb)) {
   2605		free_extent_buffer(eb);
   2606		return -EIO;
   2607	}
   2608	if (block->level == 0)
   2609		btrfs_item_key_to_cpu(eb, &block->key, 0);
   2610	else
   2611		btrfs_node_key_to_cpu(eb, &block->key, 0);
   2612	free_extent_buffer(eb);
   2613	block->key_ready = 1;
   2614	return 0;
   2615}
   2616
   2617/*
   2618 * helper function to relocate a tree block
   2619 */
   2620static int relocate_tree_block(struct btrfs_trans_handle *trans,
   2621				struct reloc_control *rc,
   2622				struct btrfs_backref_node *node,
   2623				struct btrfs_key *key,
   2624				struct btrfs_path *path)
   2625{
   2626	struct btrfs_root *root;
   2627	int ret = 0;
   2628
   2629	if (!node)
   2630		return 0;
   2631
   2632	/*
   2633	 * If we fail here we want to drop our backref_node because we are going
   2634	 * to start over and regenerate the tree for it.
   2635	 */
   2636	ret = reserve_metadata_space(trans, rc, node);
   2637	if (ret)
   2638		goto out;
   2639
   2640	BUG_ON(node->processed);
   2641	root = select_one_root(node);
   2642	if (IS_ERR(root)) {
   2643		ret = PTR_ERR(root);
   2644
   2645		/* See explanation in select_one_root for the -EUCLEAN case. */
   2646		ASSERT(ret == -ENOENT);
   2647		if (ret == -ENOENT) {
   2648			ret = 0;
   2649			update_processed_blocks(rc, node);
   2650		}
   2651		goto out;
   2652	}
   2653
   2654	if (root) {
   2655		if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
   2656			/*
   2657			 * This block was the root block of a root, and this is
   2658			 * the first time we're processing the block and thus it
   2659			 * should not have had the ->new_bytenr modified and
   2660			 * should have not been included on the changed list.
   2661			 *
   2662			 * However in the case of corruption we could have
   2663			 * multiple refs pointing to the same block improperly,
   2664			 * and thus we would trip over these checks.  ASSERT()
   2665			 * for the developer case, because it could indicate a
   2666			 * bug in the backref code, however error out for a
   2667			 * normal user in the case of corruption.
   2668			 */
   2669			ASSERT(node->new_bytenr == 0);
   2670			ASSERT(list_empty(&node->list));
   2671			if (node->new_bytenr || !list_empty(&node->list)) {
   2672				btrfs_err(root->fs_info,
   2673				  "bytenr %llu has improper references to it",
   2674					  node->bytenr);
   2675				ret = -EUCLEAN;
   2676				goto out;
   2677			}
   2678			ret = btrfs_record_root_in_trans(trans, root);
   2679			if (ret)
   2680				goto out;
   2681			/*
   2682			 * Another thread could have failed, need to check if we
   2683			 * have reloc_root actually set.
   2684			 */
   2685			if (!root->reloc_root) {
   2686				ret = -ENOENT;
   2687				goto out;
   2688			}
   2689			root = root->reloc_root;
   2690			node->new_bytenr = root->node->start;
   2691			btrfs_put_root(node->root);
   2692			node->root = btrfs_grab_root(root);
   2693			ASSERT(node->root);
   2694			list_add_tail(&node->list, &rc->backref_cache.changed);
   2695		} else {
   2696			path->lowest_level = node->level;
   2697			if (root == root->fs_info->chunk_root)
   2698				btrfs_reserve_chunk_metadata(trans, false);
   2699			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
   2700			btrfs_release_path(path);
   2701			if (root == root->fs_info->chunk_root)
   2702				btrfs_trans_release_chunk_metadata(trans);
   2703			if (ret > 0)
   2704				ret = 0;
   2705		}
   2706		if (!ret)
   2707			update_processed_blocks(rc, node);
   2708	} else {
   2709		ret = do_relocation(trans, rc, node, key, path, 1);
   2710	}
   2711out:
   2712	if (ret || node->level == 0 || node->cowonly)
   2713		btrfs_backref_cleanup_node(&rc->backref_cache, node);
   2714	return ret;
   2715}
   2716
   2717/*
   2718 * relocate a list of blocks
   2719 */
   2720static noinline_for_stack
   2721int relocate_tree_blocks(struct btrfs_trans_handle *trans,
   2722			 struct reloc_control *rc, struct rb_root *blocks)
   2723{
   2724	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
   2725	struct btrfs_backref_node *node;
   2726	struct btrfs_path *path;
   2727	struct tree_block *block;
   2728	struct tree_block *next;
   2729	int ret;
   2730	int err = 0;
   2731
   2732	path = btrfs_alloc_path();
   2733	if (!path) {
   2734		err = -ENOMEM;
   2735		goto out_free_blocks;
   2736	}
   2737
   2738	/* Kick in readahead for tree blocks with missing keys */
   2739	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
   2740		if (!block->key_ready)
   2741			btrfs_readahead_tree_block(fs_info, block->bytenr,
   2742						   block->owner, 0,
   2743						   block->level);
   2744	}
   2745
   2746	/* Get first keys */
   2747	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
   2748		if (!block->key_ready) {
   2749			err = get_tree_block_key(fs_info, block);
   2750			if (err)
   2751				goto out_free_path;
   2752		}
   2753	}
   2754
   2755	/* Do tree relocation */
   2756	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
   2757		node = build_backref_tree(rc, &block->key,
   2758					  block->level, block->bytenr);
   2759		if (IS_ERR(node)) {
   2760			err = PTR_ERR(node);
   2761			goto out;
   2762		}
   2763
   2764		ret = relocate_tree_block(trans, rc, node, &block->key,
   2765					  path);
   2766		if (ret < 0) {
   2767			err = ret;
   2768			break;
   2769		}
   2770	}
   2771out:
   2772	err = finish_pending_nodes(trans, rc, path, err);
   2773
   2774out_free_path:
   2775	btrfs_free_path(path);
   2776out_free_blocks:
   2777	free_block_list(blocks);
   2778	return err;
   2779}
   2780
   2781static noinline_for_stack int prealloc_file_extent_cluster(
   2782				struct btrfs_inode *inode,
   2783				struct file_extent_cluster *cluster)
   2784{
   2785	u64 alloc_hint = 0;
   2786	u64 start;
   2787	u64 end;
   2788	u64 offset = inode->index_cnt;
   2789	u64 num_bytes;
   2790	int nr;
   2791	int ret = 0;
   2792	u64 i_size = i_size_read(&inode->vfs_inode);
   2793	u64 prealloc_start = cluster->start - offset;
   2794	u64 prealloc_end = cluster->end - offset;
   2795	u64 cur_offset = prealloc_start;
   2796
   2797	/*
   2798	 * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
   2799	 * This means the range [i_size, PAGE_END + 1) is filled with zeros by
   2800	 * btrfs_do_readpage() call of previously relocated file cluster.
   2801	 *
   2802	 * If the current cluster starts in the above range, btrfs_do_readpage()
   2803	 * will skip the read, and relocate_one_page() will later writeback
   2804	 * the padding zeros as new data, causing data corruption.
   2805	 *
   2806	 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
   2807	 */
   2808	if (!IS_ALIGNED(i_size, PAGE_SIZE)) {
   2809		struct address_space *mapping = inode->vfs_inode.i_mapping;
   2810		struct btrfs_fs_info *fs_info = inode->root->fs_info;
   2811		const u32 sectorsize = fs_info->sectorsize;
   2812		struct page *page;
   2813
   2814		ASSERT(sectorsize < PAGE_SIZE);
   2815		ASSERT(IS_ALIGNED(i_size, sectorsize));
   2816
   2817		/*
   2818		 * Subpage can't handle page with DIRTY but without UPTODATE
   2819		 * bit as it can lead to the following deadlock:
   2820		 *
   2821		 * btrfs_read_folio()
   2822		 * | Page already *locked*
   2823		 * |- btrfs_lock_and_flush_ordered_range()
   2824		 *    |- btrfs_start_ordered_extent()
   2825		 *       |- extent_write_cache_pages()
   2826		 *          |- lock_page()
   2827		 *             We try to lock the page we already hold.
   2828		 *
   2829		 * Here we just writeback the whole data reloc inode, so that
   2830		 * we will be ensured to have no dirty range in the page, and
   2831		 * are safe to clear the uptodate bits.
   2832		 *
   2833		 * This shouldn't cause too much overhead, as we need to write
   2834		 * the data back anyway.
   2835		 */
   2836		ret = filemap_write_and_wait(mapping);
   2837		if (ret < 0)
   2838			return ret;
   2839
   2840		clear_extent_bits(&inode->io_tree, i_size,
   2841				  round_up(i_size, PAGE_SIZE) - 1,
   2842				  EXTENT_UPTODATE);
   2843		page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
   2844		/*
   2845		 * If page is freed we don't need to do anything then, as we
   2846		 * will re-read the whole page anyway.
   2847		 */
   2848		if (page) {
   2849			btrfs_subpage_clear_uptodate(fs_info, page, i_size,
   2850					round_up(i_size, PAGE_SIZE) - i_size);
   2851			unlock_page(page);
   2852			put_page(page);
   2853		}
   2854	}
   2855
   2856	BUG_ON(cluster->start != cluster->boundary[0]);
   2857	ret = btrfs_alloc_data_chunk_ondemand(inode,
   2858					      prealloc_end + 1 - prealloc_start);
   2859	if (ret)
   2860		return ret;
   2861
   2862	btrfs_inode_lock(&inode->vfs_inode, 0);
   2863	for (nr = 0; nr < cluster->nr; nr++) {
   2864		start = cluster->boundary[nr] - offset;
   2865		if (nr + 1 < cluster->nr)
   2866			end = cluster->boundary[nr + 1] - 1 - offset;
   2867		else
   2868			end = cluster->end - offset;
   2869
   2870		lock_extent(&inode->io_tree, start, end);
   2871		num_bytes = end + 1 - start;
   2872		ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
   2873						num_bytes, num_bytes,
   2874						end + 1, &alloc_hint);
   2875		cur_offset = end + 1;
   2876		unlock_extent(&inode->io_tree, start, end);
   2877		if (ret)
   2878			break;
   2879	}
   2880	btrfs_inode_unlock(&inode->vfs_inode, 0);
   2881
   2882	if (cur_offset < prealloc_end)
   2883		btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
   2884					       prealloc_end + 1 - cur_offset);
   2885	return ret;
   2886}
   2887
   2888static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode,
   2889				u64 start, u64 end, u64 block_start)
   2890{
   2891	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
   2892	struct extent_map *em;
   2893	int ret = 0;
   2894
   2895	em = alloc_extent_map();
   2896	if (!em)
   2897		return -ENOMEM;
   2898
   2899	em->start = start;
   2900	em->len = end + 1 - start;
   2901	em->block_len = em->len;
   2902	em->block_start = block_start;
   2903	set_bit(EXTENT_FLAG_PINNED, &em->flags);
   2904
   2905	lock_extent(&BTRFS_I(inode)->io_tree, start, end);
   2906	while (1) {
   2907		write_lock(&em_tree->lock);
   2908		ret = add_extent_mapping(em_tree, em, 0);
   2909		write_unlock(&em_tree->lock);
   2910		if (ret != -EEXIST) {
   2911			free_extent_map(em);
   2912			break;
   2913		}
   2914		btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
   2915	}
   2916	unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
   2917	return ret;
   2918}
   2919
   2920/*
   2921 * Allow error injection to test balance/relocation cancellation
   2922 */
   2923noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
   2924{
   2925	return atomic_read(&fs_info->balance_cancel_req) ||
   2926		atomic_read(&fs_info->reloc_cancel_req) ||
   2927		fatal_signal_pending(current);
   2928}
   2929ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
   2930
   2931static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
   2932				    int cluster_nr)
   2933{
   2934	/* Last extent, use cluster end directly */
   2935	if (cluster_nr >= cluster->nr - 1)
   2936		return cluster->end;
   2937
   2938	/* Use next boundary start*/
   2939	return cluster->boundary[cluster_nr + 1] - 1;
   2940}
   2941
   2942static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
   2943			     struct file_extent_cluster *cluster,
   2944			     int *cluster_nr, unsigned long page_index)
   2945{
   2946	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
   2947	u64 offset = BTRFS_I(inode)->index_cnt;
   2948	const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
   2949	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
   2950	struct page *page;
   2951	u64 page_start;
   2952	u64 page_end;
   2953	u64 cur;
   2954	int ret;
   2955
   2956	ASSERT(page_index <= last_index);
   2957	page = find_lock_page(inode->i_mapping, page_index);
   2958	if (!page) {
   2959		page_cache_sync_readahead(inode->i_mapping, ra, NULL,
   2960				page_index, last_index + 1 - page_index);
   2961		page = find_or_create_page(inode->i_mapping, page_index, mask);
   2962		if (!page)
   2963			return -ENOMEM;
   2964	}
   2965	ret = set_page_extent_mapped(page);
   2966	if (ret < 0)
   2967		goto release_page;
   2968
   2969	if (PageReadahead(page))
   2970		page_cache_async_readahead(inode->i_mapping, ra, NULL,
   2971				page_folio(page), page_index,
   2972				last_index + 1 - page_index);
   2973
   2974	if (!PageUptodate(page)) {
   2975		btrfs_read_folio(NULL, page_folio(page));
   2976		lock_page(page);
   2977		if (!PageUptodate(page)) {
   2978			ret = -EIO;
   2979			goto release_page;
   2980		}
   2981	}
   2982
   2983	page_start = page_offset(page);
   2984	page_end = page_start + PAGE_SIZE - 1;
   2985
   2986	/*
   2987	 * Start from the cluster, as for subpage case, the cluster can start
   2988	 * inside the page.
   2989	 */
   2990	cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
   2991	while (cur <= page_end) {
   2992		u64 extent_start = cluster->boundary[*cluster_nr] - offset;
   2993		u64 extent_end = get_cluster_boundary_end(cluster,
   2994						*cluster_nr) - offset;
   2995		u64 clamped_start = max(page_start, extent_start);
   2996		u64 clamped_end = min(page_end, extent_end);
   2997		u32 clamped_len = clamped_end + 1 - clamped_start;
   2998
   2999		/* Reserve metadata for this range */
   3000		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
   3001						      clamped_len, clamped_len,
   3002						      false);
   3003		if (ret)
   3004			goto release_page;
   3005
   3006		/* Mark the range delalloc and dirty for later writeback */
   3007		lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end);
   3008		ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
   3009						clamped_end, 0, NULL);
   3010		if (ret) {
   3011			clear_extent_bits(&BTRFS_I(inode)->io_tree,
   3012					clamped_start, clamped_end,
   3013					EXTENT_LOCKED | EXTENT_BOUNDARY);
   3014			btrfs_delalloc_release_metadata(BTRFS_I(inode),
   3015							clamped_len, true);
   3016			btrfs_delalloc_release_extents(BTRFS_I(inode),
   3017						       clamped_len);
   3018			goto release_page;
   3019		}
   3020		btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len);
   3021
   3022		/*
   3023		 * Set the boundary if it's inside the page.
   3024		 * Data relocation requires the destination extents to have the
   3025		 * same size as the source.
   3026		 * EXTENT_BOUNDARY bit prevents current extent from being merged
   3027		 * with previous extent.
   3028		 */
   3029		if (in_range(cluster->boundary[*cluster_nr] - offset,
   3030			     page_start, PAGE_SIZE)) {
   3031			u64 boundary_start = cluster->boundary[*cluster_nr] -
   3032						offset;
   3033			u64 boundary_end = boundary_start +
   3034					   fs_info->sectorsize - 1;
   3035
   3036			set_extent_bits(&BTRFS_I(inode)->io_tree,
   3037					boundary_start, boundary_end,
   3038					EXTENT_BOUNDARY);
   3039		}
   3040		unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end);
   3041		btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
   3042		cur += clamped_len;
   3043
   3044		/* Crossed extent end, go to next extent */
   3045		if (cur >= extent_end) {
   3046			(*cluster_nr)++;
   3047			/* Just finished the last extent of the cluster, exit. */
   3048			if (*cluster_nr >= cluster->nr)
   3049				break;
   3050		}
   3051	}
   3052	unlock_page(page);
   3053	put_page(page);
   3054
   3055	balance_dirty_pages_ratelimited(inode->i_mapping);
   3056	btrfs_throttle(fs_info);
   3057	if (btrfs_should_cancel_balance(fs_info))
   3058		ret = -ECANCELED;
   3059	return ret;
   3060
   3061release_page:
   3062	unlock_page(page);
   3063	put_page(page);
   3064	return ret;
   3065}
   3066
   3067static int relocate_file_extent_cluster(struct inode *inode,
   3068					struct file_extent_cluster *cluster)
   3069{
   3070	u64 offset = BTRFS_I(inode)->index_cnt;
   3071	unsigned long index;
   3072	unsigned long last_index;
   3073	struct file_ra_state *ra;
   3074	int cluster_nr = 0;
   3075	int ret = 0;
   3076
   3077	if (!cluster->nr)
   3078		return 0;
   3079
   3080	ra = kzalloc(sizeof(*ra), GFP_NOFS);
   3081	if (!ra)
   3082		return -ENOMEM;
   3083
   3084	ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
   3085	if (ret)
   3086		goto out;
   3087
   3088	file_ra_state_init(ra, inode->i_mapping);
   3089
   3090	ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
   3091				   cluster->end - offset, cluster->start);
   3092	if (ret)
   3093		goto out;
   3094
   3095	last_index = (cluster->end - offset) >> PAGE_SHIFT;
   3096	for (index = (cluster->start - offset) >> PAGE_SHIFT;
   3097	     index <= last_index && !ret; index++)
   3098		ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
   3099	if (ret == 0)
   3100		WARN_ON(cluster_nr != cluster->nr);
   3101out:
   3102	kfree(ra);
   3103	return ret;
   3104}
   3105
   3106static noinline_for_stack
   3107int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
   3108			 struct file_extent_cluster *cluster)
   3109{
   3110	int ret;
   3111
   3112	if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
   3113		ret = relocate_file_extent_cluster(inode, cluster);
   3114		if (ret)
   3115			return ret;
   3116		cluster->nr = 0;
   3117	}
   3118
   3119	if (!cluster->nr)
   3120		cluster->start = extent_key->objectid;
   3121	else
   3122		BUG_ON(cluster->nr >= MAX_EXTENTS);
   3123	cluster->end = extent_key->objectid + extent_key->offset - 1;
   3124	cluster->boundary[cluster->nr] = extent_key->objectid;
   3125	cluster->nr++;
   3126
   3127	if (cluster->nr >= MAX_EXTENTS) {
   3128		ret = relocate_file_extent_cluster(inode, cluster);
   3129		if (ret)
   3130			return ret;
   3131		cluster->nr = 0;
   3132	}
   3133	return 0;
   3134}
   3135
   3136/*
   3137 * helper to add a tree block to the list.
   3138 * the major work is getting the generation and level of the block
   3139 */
   3140static int add_tree_block(struct reloc_control *rc,
   3141			  struct btrfs_key *extent_key,
   3142			  struct btrfs_path *path,
   3143			  struct rb_root *blocks)
   3144{
   3145	struct extent_buffer *eb;
   3146	struct btrfs_extent_item *ei;
   3147	struct btrfs_tree_block_info *bi;
   3148	struct tree_block *block;
   3149	struct rb_node *rb_node;
   3150	u32 item_size;
   3151	int level = -1;
   3152	u64 generation;
   3153	u64 owner = 0;
   3154
   3155	eb =  path->nodes[0];
   3156	item_size = btrfs_item_size(eb, path->slots[0]);
   3157
   3158	if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
   3159	    item_size >= sizeof(*ei) + sizeof(*bi)) {
   3160		unsigned long ptr = 0, end;
   3161
   3162		ei = btrfs_item_ptr(eb, path->slots[0],
   3163				struct btrfs_extent_item);
   3164		end = (unsigned long)ei + item_size;
   3165		if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
   3166			bi = (struct btrfs_tree_block_info *)(ei + 1);
   3167			level = btrfs_tree_block_level(eb, bi);
   3168			ptr = (unsigned long)(bi + 1);
   3169		} else {
   3170			level = (int)extent_key->offset;
   3171			ptr = (unsigned long)(ei + 1);
   3172		}
   3173		generation = btrfs_extent_generation(eb, ei);
   3174
   3175		/*
   3176		 * We're reading random blocks without knowing their owner ahead
   3177		 * of time.  This is ok most of the time, as all reloc roots and
   3178		 * fs roots have the same lock type.  However normal trees do
   3179		 * not, and the only way to know ahead of time is to read the
   3180		 * inline ref offset.  We know it's an fs root if
   3181		 *
   3182		 * 1. There's more than one ref.
   3183		 * 2. There's a SHARED_DATA_REF_KEY set.
   3184		 * 3. FULL_BACKREF is set on the flags.
   3185		 *
   3186		 * Otherwise it's safe to assume that the ref offset == the
   3187		 * owner of this block, so we can use that when calling
   3188		 * read_tree_block.
   3189		 */
   3190		if (btrfs_extent_refs(eb, ei) == 1 &&
   3191		    !(btrfs_extent_flags(eb, ei) &
   3192		      BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
   3193		    ptr < end) {
   3194			struct btrfs_extent_inline_ref *iref;
   3195			int type;
   3196
   3197			iref = (struct btrfs_extent_inline_ref *)ptr;
   3198			type = btrfs_get_extent_inline_ref_type(eb, iref,
   3199							BTRFS_REF_TYPE_BLOCK);
   3200			if (type == BTRFS_REF_TYPE_INVALID)
   3201				return -EINVAL;
   3202			if (type == BTRFS_TREE_BLOCK_REF_KEY)
   3203				owner = btrfs_extent_inline_ref_offset(eb, iref);
   3204		}
   3205	} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
   3206		btrfs_print_v0_err(eb->fs_info);
   3207		btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
   3208		return -EINVAL;
   3209	} else {
   3210		BUG();
   3211	}
   3212
   3213	btrfs_release_path(path);
   3214
   3215	BUG_ON(level == -1);
   3216
   3217	block = kmalloc(sizeof(*block), GFP_NOFS);
   3218	if (!block)
   3219		return -ENOMEM;
   3220
   3221	block->bytenr = extent_key->objectid;
   3222	block->key.objectid = rc->extent_root->fs_info->nodesize;
   3223	block->key.offset = generation;
   3224	block->level = level;
   3225	block->key_ready = 0;
   3226	block->owner = owner;
   3227
   3228	rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
   3229	if (rb_node)
   3230		btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
   3231				    -EEXIST);
   3232
   3233	return 0;
   3234}
   3235
   3236/*
   3237 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
   3238 */
   3239static int __add_tree_block(struct reloc_control *rc,
   3240			    u64 bytenr, u32 blocksize,
   3241			    struct rb_root *blocks)
   3242{
   3243	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
   3244	struct btrfs_path *path;
   3245	struct btrfs_key key;
   3246	int ret;
   3247	bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
   3248
   3249	if (tree_block_processed(bytenr, rc))
   3250		return 0;
   3251
   3252	if (rb_simple_search(blocks, bytenr))
   3253		return 0;
   3254
   3255	path = btrfs_alloc_path();
   3256	if (!path)
   3257		return -ENOMEM;
   3258again:
   3259	key.objectid = bytenr;
   3260	if (skinny) {
   3261		key.type = BTRFS_METADATA_ITEM_KEY;
   3262		key.offset = (u64)-1;
   3263	} else {
   3264		key.type = BTRFS_EXTENT_ITEM_KEY;
   3265		key.offset = blocksize;
   3266	}
   3267
   3268	path->search_commit_root = 1;
   3269	path->skip_locking = 1;
   3270	ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
   3271	if (ret < 0)
   3272		goto out;
   3273
   3274	if (ret > 0 && skinny) {
   3275		if (path->slots[0]) {
   3276			path->slots[0]--;
   3277			btrfs_item_key_to_cpu(path->nodes[0], &key,
   3278					      path->slots[0]);
   3279			if (key.objectid == bytenr &&
   3280			    (key.type == BTRFS_METADATA_ITEM_KEY ||
   3281			     (key.type == BTRFS_EXTENT_ITEM_KEY &&
   3282			      key.offset == blocksize)))
   3283				ret = 0;
   3284		}
   3285
   3286		if (ret) {
   3287			skinny = false;
   3288			btrfs_release_path(path);
   3289			goto again;
   3290		}
   3291	}
   3292	if (ret) {
   3293		ASSERT(ret == 1);
   3294		btrfs_print_leaf(path->nodes[0]);
   3295		btrfs_err(fs_info,
   3296	     "tree block extent item (%llu) is not found in extent tree",
   3297		     bytenr);
   3298		WARN_ON(1);
   3299		ret = -EINVAL;
   3300		goto out;
   3301	}
   3302
   3303	ret = add_tree_block(rc, &key, path, blocks);
   3304out:
   3305	btrfs_free_path(path);
   3306	return ret;
   3307}
   3308
   3309static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
   3310				    struct btrfs_block_group *block_group,
   3311				    struct inode *inode,
   3312				    u64 ino)
   3313{
   3314	struct btrfs_root *root = fs_info->tree_root;
   3315	struct btrfs_trans_handle *trans;
   3316	int ret = 0;
   3317
   3318	if (inode)
   3319		goto truncate;
   3320
   3321	inode = btrfs_iget(fs_info->sb, ino, root);
   3322	if (IS_ERR(inode))
   3323		return -ENOENT;
   3324
   3325truncate:
   3326	ret = btrfs_check_trunc_cache_free_space(fs_info,
   3327						 &fs_info->global_block_rsv);
   3328	if (ret)
   3329		goto out;
   3330
   3331	trans = btrfs_join_transaction(root);
   3332	if (IS_ERR(trans)) {
   3333		ret = PTR_ERR(trans);
   3334		goto out;
   3335	}
   3336
   3337	ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
   3338
   3339	btrfs_end_transaction(trans);
   3340	btrfs_btree_balance_dirty(fs_info);
   3341out:
   3342	iput(inode);
   3343	return ret;
   3344}
   3345
   3346/*
   3347 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
   3348 * cache inode, to avoid free space cache data extent blocking data relocation.
   3349 */
   3350static int delete_v1_space_cache(struct extent_buffer *leaf,
   3351				 struct btrfs_block_group *block_group,
   3352				 u64 data_bytenr)
   3353{
   3354	u64 space_cache_ino;
   3355	struct btrfs_file_extent_item *ei;
   3356	struct btrfs_key key;
   3357	bool found = false;
   3358	int i;
   3359	int ret;
   3360
   3361	if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
   3362		return 0;
   3363
   3364	for (i = 0; i < btrfs_header_nritems(leaf); i++) {
   3365		u8 type;
   3366
   3367		btrfs_item_key_to_cpu(leaf, &key, i);
   3368		if (key.type != BTRFS_EXTENT_DATA_KEY)
   3369			continue;
   3370		ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
   3371		type = btrfs_file_extent_type(leaf, ei);
   3372
   3373		if ((type == BTRFS_FILE_EXTENT_REG ||
   3374		     type == BTRFS_FILE_EXTENT_PREALLOC) &&
   3375		    btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
   3376			found = true;
   3377			space_cache_ino = key.objectid;
   3378			break;
   3379		}
   3380	}
   3381	if (!found)
   3382		return -ENOENT;
   3383	ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
   3384					space_cache_ino);
   3385	return ret;
   3386}
   3387
   3388/*
   3389 * helper to find all tree blocks that reference a given data extent
   3390 */
   3391static noinline_for_stack
   3392int add_data_references(struct reloc_control *rc,
   3393			struct btrfs_key *extent_key,
   3394			struct btrfs_path *path,
   3395			struct rb_root *blocks)
   3396{
   3397	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
   3398	struct ulist *leaves = NULL;
   3399	struct ulist_iterator leaf_uiter;
   3400	struct ulist_node *ref_node = NULL;
   3401	const u32 blocksize = fs_info->nodesize;
   3402	int ret = 0;
   3403
   3404	btrfs_release_path(path);
   3405	ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid,
   3406				   0, &leaves, NULL, true);
   3407	if (ret < 0)
   3408		return ret;
   3409
   3410	ULIST_ITER_INIT(&leaf_uiter);
   3411	while ((ref_node = ulist_next(leaves, &leaf_uiter))) {
   3412		struct extent_buffer *eb;
   3413
   3414		eb = read_tree_block(fs_info, ref_node->val, 0, 0, 0, NULL);
   3415		if (IS_ERR(eb)) {
   3416			ret = PTR_ERR(eb);
   3417			break;
   3418		}
   3419		ret = delete_v1_space_cache(eb, rc->block_group,
   3420					    extent_key->objectid);
   3421		free_extent_buffer(eb);
   3422		if (ret < 0)
   3423			break;
   3424		ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
   3425		if (ret < 0)
   3426			break;
   3427	}
   3428	if (ret < 0)
   3429		free_block_list(blocks);
   3430	ulist_free(leaves);
   3431	return ret;
   3432}
   3433
   3434/*
   3435 * helper to find next unprocessed extent
   3436 */
   3437static noinline_for_stack
   3438int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
   3439		     struct btrfs_key *extent_key)
   3440{
   3441	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
   3442	struct btrfs_key key;
   3443	struct extent_buffer *leaf;
   3444	u64 start, end, last;
   3445	int ret;
   3446
   3447	last = rc->block_group->start + rc->block_group->length;
   3448	while (1) {
   3449		cond_resched();
   3450		if (rc->search_start >= last) {
   3451			ret = 1;
   3452			break;
   3453		}
   3454
   3455		key.objectid = rc->search_start;
   3456		key.type = BTRFS_EXTENT_ITEM_KEY;
   3457		key.offset = 0;
   3458
   3459		path->search_commit_root = 1;
   3460		path->skip_locking = 1;
   3461		ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
   3462					0, 0);
   3463		if (ret < 0)
   3464			break;
   3465next:
   3466		leaf = path->nodes[0];
   3467		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
   3468			ret = btrfs_next_leaf(rc->extent_root, path);
   3469			if (ret != 0)
   3470				break;
   3471			leaf = path->nodes[0];
   3472		}
   3473
   3474		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
   3475		if (key.objectid >= last) {
   3476			ret = 1;
   3477			break;
   3478		}
   3479
   3480		if (key.type != BTRFS_EXTENT_ITEM_KEY &&
   3481		    key.type != BTRFS_METADATA_ITEM_KEY) {
   3482			path->slots[0]++;
   3483			goto next;
   3484		}
   3485
   3486		if (key.type == BTRFS_EXTENT_ITEM_KEY &&
   3487		    key.objectid + key.offset <= rc->search_start) {
   3488			path->slots[0]++;
   3489			goto next;
   3490		}
   3491
   3492		if (key.type == BTRFS_METADATA_ITEM_KEY &&
   3493		    key.objectid + fs_info->nodesize <=
   3494		    rc->search_start) {
   3495			path->slots[0]++;
   3496			goto next;
   3497		}
   3498
   3499		ret = find_first_extent_bit(&rc->processed_blocks,
   3500					    key.objectid, &start, &end,
   3501					    EXTENT_DIRTY, NULL);
   3502
   3503		if (ret == 0 && start <= key.objectid) {
   3504			btrfs_release_path(path);
   3505			rc->search_start = end + 1;
   3506		} else {
   3507			if (key.type == BTRFS_EXTENT_ITEM_KEY)
   3508				rc->search_start = key.objectid + key.offset;
   3509			else
   3510				rc->search_start = key.objectid +
   3511					fs_info->nodesize;
   3512			memcpy(extent_key, &key, sizeof(key));
   3513			return 0;
   3514		}
   3515	}
   3516	btrfs_release_path(path);
   3517	return ret;
   3518}
   3519
   3520static void set_reloc_control(struct reloc_control *rc)
   3521{
   3522	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
   3523
   3524	mutex_lock(&fs_info->reloc_mutex);
   3525	fs_info->reloc_ctl = rc;
   3526	mutex_unlock(&fs_info->reloc_mutex);
   3527}
   3528
   3529static void unset_reloc_control(struct reloc_control *rc)
   3530{
   3531	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
   3532
   3533	mutex_lock(&fs_info->reloc_mutex);
   3534	fs_info->reloc_ctl = NULL;
   3535	mutex_unlock(&fs_info->reloc_mutex);
   3536}
   3537
   3538static noinline_for_stack
   3539int prepare_to_relocate(struct reloc_control *rc)
   3540{
   3541	struct btrfs_trans_handle *trans;
   3542	int ret;
   3543
   3544	rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
   3545					      BTRFS_BLOCK_RSV_TEMP);
   3546	if (!rc->block_rsv)
   3547		return -ENOMEM;
   3548
   3549	memset(&rc->cluster, 0, sizeof(rc->cluster));
   3550	rc->search_start = rc->block_group->start;
   3551	rc->extents_found = 0;
   3552	rc->nodes_relocated = 0;
   3553	rc->merging_rsv_size = 0;
   3554	rc->reserved_bytes = 0;
   3555	rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
   3556			      RELOCATION_RESERVED_NODES;
   3557	ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
   3558				     rc->block_rsv, rc->block_rsv->size,
   3559				     BTRFS_RESERVE_FLUSH_ALL);
   3560	if (ret)
   3561		return ret;
   3562
   3563	rc->create_reloc_tree = 1;
   3564	set_reloc_control(rc);
   3565
   3566	trans = btrfs_join_transaction(rc->extent_root);
   3567	if (IS_ERR(trans)) {
   3568		unset_reloc_control(rc);
   3569		/*
   3570		 * extent tree is not a ref_cow tree and has no reloc_root to
   3571		 * cleanup.  And callers are responsible to free the above
   3572		 * block rsv.
   3573		 */
   3574		return PTR_ERR(trans);
   3575	}
   3576	return btrfs_commit_transaction(trans);
   3577}
   3578
   3579static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
   3580{
   3581	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
   3582	struct rb_root blocks = RB_ROOT;
   3583	struct btrfs_key key;
   3584	struct btrfs_trans_handle *trans = NULL;
   3585	struct btrfs_path *path;
   3586	struct btrfs_extent_item *ei;
   3587	u64 flags;
   3588	int ret;
   3589	int err = 0;
   3590	int progress = 0;
   3591
   3592	path = btrfs_alloc_path();
   3593	if (!path)
   3594		return -ENOMEM;
   3595	path->reada = READA_FORWARD;
   3596
   3597	ret = prepare_to_relocate(rc);
   3598	if (ret) {
   3599		err = ret;
   3600		goto out_free;
   3601	}
   3602
   3603	while (1) {
   3604		rc->reserved_bytes = 0;
   3605		ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
   3606					     rc->block_rsv->size,
   3607					     BTRFS_RESERVE_FLUSH_ALL);
   3608		if (ret) {
   3609			err = ret;
   3610			break;
   3611		}
   3612		progress++;
   3613		trans = btrfs_start_transaction(rc->extent_root, 0);
   3614		if (IS_ERR(trans)) {
   3615			err = PTR_ERR(trans);
   3616			trans = NULL;
   3617			break;
   3618		}
   3619restart:
   3620		if (update_backref_cache(trans, &rc->backref_cache)) {
   3621			btrfs_end_transaction(trans);
   3622			trans = NULL;
   3623			continue;
   3624		}
   3625
   3626		ret = find_next_extent(rc, path, &key);
   3627		if (ret < 0)
   3628			err = ret;
   3629		if (ret != 0)
   3630			break;
   3631
   3632		rc->extents_found++;
   3633
   3634		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
   3635				    struct btrfs_extent_item);
   3636		flags = btrfs_extent_flags(path->nodes[0], ei);
   3637
   3638		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
   3639			ret = add_tree_block(rc, &key, path, &blocks);
   3640		} else if (rc->stage == UPDATE_DATA_PTRS &&
   3641			   (flags & BTRFS_EXTENT_FLAG_DATA)) {
   3642			ret = add_data_references(rc, &key, path, &blocks);
   3643		} else {
   3644			btrfs_release_path(path);
   3645			ret = 0;
   3646		}
   3647		if (ret < 0) {
   3648			err = ret;
   3649			break;
   3650		}
   3651
   3652		if (!RB_EMPTY_ROOT(&blocks)) {
   3653			ret = relocate_tree_blocks(trans, rc, &blocks);
   3654			if (ret < 0) {
   3655				if (ret != -EAGAIN) {
   3656					err = ret;
   3657					break;
   3658				}
   3659				rc->extents_found--;
   3660				rc->search_start = key.objectid;
   3661			}
   3662		}
   3663
   3664		btrfs_end_transaction_throttle(trans);
   3665		btrfs_btree_balance_dirty(fs_info);
   3666		trans = NULL;
   3667
   3668		if (rc->stage == MOVE_DATA_EXTENTS &&
   3669		    (flags & BTRFS_EXTENT_FLAG_DATA)) {
   3670			rc->found_file_extent = 1;
   3671			ret = relocate_data_extent(rc->data_inode,
   3672						   &key, &rc->cluster);
   3673			if (ret < 0) {
   3674				err = ret;
   3675				break;
   3676			}
   3677		}
   3678		if (btrfs_should_cancel_balance(fs_info)) {
   3679			err = -ECANCELED;
   3680			break;
   3681		}
   3682	}
   3683	if (trans && progress && err == -ENOSPC) {
   3684		ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
   3685		if (ret == 1) {
   3686			err = 0;
   3687			progress = 0;
   3688			goto restart;
   3689		}
   3690	}
   3691
   3692	btrfs_release_path(path);
   3693	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
   3694
   3695	if (trans) {
   3696		btrfs_end_transaction_throttle(trans);
   3697		btrfs_btree_balance_dirty(fs_info);
   3698	}
   3699
   3700	if (!err) {
   3701		ret = relocate_file_extent_cluster(rc->data_inode,
   3702						   &rc->cluster);
   3703		if (ret < 0)
   3704			err = ret;
   3705	}
   3706
   3707	rc->create_reloc_tree = 0;
   3708	set_reloc_control(rc);
   3709
   3710	btrfs_backref_release_cache(&rc->backref_cache);
   3711	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
   3712
   3713	/*
   3714	 * Even in the case when the relocation is cancelled, we should all go
   3715	 * through prepare_to_merge() and merge_reloc_roots().
   3716	 *
   3717	 * For error (including cancelled balance), prepare_to_merge() will
   3718	 * mark all reloc trees orphan, then queue them for cleanup in
   3719	 * merge_reloc_roots()
   3720	 */
   3721	err = prepare_to_merge(rc, err);
   3722
   3723	merge_reloc_roots(rc);
   3724
   3725	rc->merge_reloc_tree = 0;
   3726	unset_reloc_control(rc);
   3727	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
   3728
   3729	/* get rid of pinned extents */
   3730	trans = btrfs_join_transaction(rc->extent_root);
   3731	if (IS_ERR(trans)) {
   3732		err = PTR_ERR(trans);
   3733		goto out_free;
   3734	}
   3735	ret = btrfs_commit_transaction(trans);
   3736	if (ret && !err)
   3737		err = ret;
   3738out_free:
   3739	ret = clean_dirty_subvols(rc);
   3740	if (ret < 0 && !err)
   3741		err = ret;
   3742	btrfs_free_block_rsv(fs_info, rc->block_rsv);
   3743	btrfs_free_path(path);
   3744	return err;
   3745}
   3746
   3747static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
   3748				 struct btrfs_root *root, u64 objectid)
   3749{
   3750	struct btrfs_path *path;
   3751	struct btrfs_inode_item *item;
   3752	struct extent_buffer *leaf;
   3753	int ret;
   3754
   3755	path = btrfs_alloc_path();
   3756	if (!path)
   3757		return -ENOMEM;
   3758
   3759	ret = btrfs_insert_empty_inode(trans, root, path, objectid);
   3760	if (ret)
   3761		goto out;
   3762
   3763	leaf = path->nodes[0];
   3764	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
   3765	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
   3766	btrfs_set_inode_generation(leaf, item, 1);
   3767	btrfs_set_inode_size(leaf, item, 0);
   3768	btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
   3769	btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
   3770					  BTRFS_INODE_PREALLOC);
   3771	btrfs_mark_buffer_dirty(leaf);
   3772out:
   3773	btrfs_free_path(path);
   3774	return ret;
   3775}
   3776
   3777static void delete_orphan_inode(struct btrfs_trans_handle *trans,
   3778				struct btrfs_root *root, u64 objectid)
   3779{
   3780	struct btrfs_path *path;
   3781	struct btrfs_key key;
   3782	int ret = 0;
   3783
   3784	path = btrfs_alloc_path();
   3785	if (!path) {
   3786		ret = -ENOMEM;
   3787		goto out;
   3788	}
   3789
   3790	key.objectid = objectid;
   3791	key.type = BTRFS_INODE_ITEM_KEY;
   3792	key.offset = 0;
   3793	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
   3794	if (ret) {
   3795		if (ret > 0)
   3796			ret = -ENOENT;
   3797		goto out;
   3798	}
   3799	ret = btrfs_del_item(trans, root, path);
   3800out:
   3801	if (ret)
   3802		btrfs_abort_transaction(trans, ret);
   3803	btrfs_free_path(path);
   3804}
   3805
   3806/*
   3807 * helper to create inode for data relocation.
   3808 * the inode is in data relocation tree and its link count is 0
   3809 */
   3810static noinline_for_stack
   3811struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
   3812				 struct btrfs_block_group *group)
   3813{
   3814	struct inode *inode = NULL;
   3815	struct btrfs_trans_handle *trans;
   3816	struct btrfs_root *root;
   3817	u64 objectid;
   3818	int err = 0;
   3819
   3820	root = btrfs_grab_root(fs_info->data_reloc_root);
   3821	trans = btrfs_start_transaction(root, 6);
   3822	if (IS_ERR(trans)) {
   3823		btrfs_put_root(root);
   3824		return ERR_CAST(trans);
   3825	}
   3826
   3827	err = btrfs_get_free_objectid(root, &objectid);
   3828	if (err)
   3829		goto out;
   3830
   3831	err = __insert_orphan_inode(trans, root, objectid);
   3832	if (err)
   3833		goto out;
   3834
   3835	inode = btrfs_iget(fs_info->sb, objectid, root);
   3836	if (IS_ERR(inode)) {
   3837		delete_orphan_inode(trans, root, objectid);
   3838		err = PTR_ERR(inode);
   3839		inode = NULL;
   3840		goto out;
   3841	}
   3842	BTRFS_I(inode)->index_cnt = group->start;
   3843
   3844	err = btrfs_orphan_add(trans, BTRFS_I(inode));
   3845out:
   3846	btrfs_put_root(root);
   3847	btrfs_end_transaction(trans);
   3848	btrfs_btree_balance_dirty(fs_info);
   3849	if (err) {
   3850		iput(inode);
   3851		inode = ERR_PTR(err);
   3852	}
   3853	return inode;
   3854}
   3855
   3856/*
   3857 * Mark start of chunk relocation that is cancellable. Check if the cancellation
   3858 * has been requested meanwhile and don't start in that case.
   3859 *
   3860 * Return:
   3861 *   0             success
   3862 *   -EINPROGRESS  operation is already in progress, that's probably a bug
   3863 *   -ECANCELED    cancellation request was set before the operation started
   3864 */
   3865static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
   3866{
   3867	if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
   3868		/* This should not happen */
   3869		btrfs_err(fs_info, "reloc already running, cannot start");
   3870		return -EINPROGRESS;
   3871	}
   3872
   3873	if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
   3874		btrfs_info(fs_info, "chunk relocation canceled on start");
   3875		/*
   3876		 * On cancel, clear all requests but let the caller mark
   3877		 * the end after cleanup operations.
   3878		 */
   3879		atomic_set(&fs_info->reloc_cancel_req, 0);
   3880		return -ECANCELED;
   3881	}
   3882	return 0;
   3883}
   3884
   3885/*
   3886 * Mark end of chunk relocation that is cancellable and wake any waiters.
   3887 */
   3888static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
   3889{
   3890	/* Requested after start, clear bit first so any waiters can continue */
   3891	if (atomic_read(&fs_info->reloc_cancel_req) > 0)
   3892		btrfs_info(fs_info, "chunk relocation canceled during operation");
   3893	clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
   3894	atomic_set(&fs_info->reloc_cancel_req, 0);
   3895}
   3896
   3897static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
   3898{
   3899	struct reloc_control *rc;
   3900
   3901	rc = kzalloc(sizeof(*rc), GFP_NOFS);
   3902	if (!rc)
   3903		return NULL;
   3904
   3905	INIT_LIST_HEAD(&rc->reloc_roots);
   3906	INIT_LIST_HEAD(&rc->dirty_subvol_roots);
   3907	btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
   3908	mapping_tree_init(&rc->reloc_root_tree);
   3909	extent_io_tree_init(fs_info, &rc->processed_blocks,
   3910			    IO_TREE_RELOC_BLOCKS, NULL);
   3911	return rc;
   3912}
   3913
   3914static void free_reloc_control(struct reloc_control *rc)
   3915{
   3916	struct mapping_node *node, *tmp;
   3917
   3918	free_reloc_roots(&rc->reloc_roots);
   3919	rbtree_postorder_for_each_entry_safe(node, tmp,
   3920			&rc->reloc_root_tree.rb_root, rb_node)
   3921		kfree(node);
   3922
   3923	kfree(rc);
   3924}
   3925
   3926/*
   3927 * Print the block group being relocated
   3928 */
   3929static void describe_relocation(struct btrfs_fs_info *fs_info,
   3930				struct btrfs_block_group *block_group)
   3931{
   3932	char buf[128] = {'\0'};
   3933
   3934	btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
   3935
   3936	btrfs_info(fs_info,
   3937		   "relocating block group %llu flags %s",
   3938		   block_group->start, buf);
   3939}
   3940
   3941static const char *stage_to_string(int stage)
   3942{
   3943	if (stage == MOVE_DATA_EXTENTS)
   3944		return "move data extents";
   3945	if (stage == UPDATE_DATA_PTRS)
   3946		return "update data pointers";
   3947	return "unknown";
   3948}
   3949
   3950/*
   3951 * function to relocate all extents in a block group.
   3952 */
   3953int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
   3954{
   3955	struct btrfs_block_group *bg;
   3956	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
   3957	struct reloc_control *rc;
   3958	struct inode *inode;
   3959	struct btrfs_path *path;
   3960	int ret;
   3961	int rw = 0;
   3962	int err = 0;
   3963
   3964	/*
   3965	 * This only gets set if we had a half-deleted snapshot on mount.  We
   3966	 * cannot allow relocation to start while we're still trying to clean up
   3967	 * these pending deletions.
   3968	 */
   3969	ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
   3970	if (ret)
   3971		return ret;
   3972
   3973	/* We may have been woken up by close_ctree, so bail if we're closing. */
   3974	if (btrfs_fs_closing(fs_info))
   3975		return -EINTR;
   3976
   3977	bg = btrfs_lookup_block_group(fs_info, group_start);
   3978	if (!bg)
   3979		return -ENOENT;
   3980
   3981	/*
   3982	 * Relocation of a data block group creates ordered extents.  Without
   3983	 * sb_start_write(), we can freeze the filesystem while unfinished
   3984	 * ordered extents are left. Such ordered extents can cause a deadlock
   3985	 * e.g. when syncfs() is waiting for their completion but they can't
   3986	 * finish because they block when joining a transaction, due to the
   3987	 * fact that the freeze locks are being held in write mode.
   3988	 */
   3989	if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
   3990		ASSERT(sb_write_started(fs_info->sb));
   3991
   3992	if (btrfs_pinned_by_swapfile(fs_info, bg)) {
   3993		btrfs_put_block_group(bg);
   3994		return -ETXTBSY;
   3995	}
   3996
   3997	rc = alloc_reloc_control(fs_info);
   3998	if (!rc) {
   3999		btrfs_put_block_group(bg);
   4000		return -ENOMEM;
   4001	}
   4002
   4003	ret = reloc_chunk_start(fs_info);
   4004	if (ret < 0) {
   4005		err = ret;
   4006		goto out_put_bg;
   4007	}
   4008
   4009	rc->extent_root = extent_root;
   4010	rc->block_group = bg;
   4011
   4012	ret = btrfs_inc_block_group_ro(rc->block_group, true);
   4013	if (ret) {
   4014		err = ret;
   4015		goto out;
   4016	}
   4017	rw = 1;
   4018
   4019	path = btrfs_alloc_path();
   4020	if (!path) {
   4021		err = -ENOMEM;
   4022		goto out;
   4023	}
   4024
   4025	inode = lookup_free_space_inode(rc->block_group, path);
   4026	btrfs_free_path(path);
   4027
   4028	if (!IS_ERR(inode))
   4029		ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
   4030	else
   4031		ret = PTR_ERR(inode);
   4032
   4033	if (ret && ret != -ENOENT) {
   4034		err = ret;
   4035		goto out;
   4036	}
   4037
   4038	rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
   4039	if (IS_ERR(rc->data_inode)) {
   4040		err = PTR_ERR(rc->data_inode);
   4041		rc->data_inode = NULL;
   4042		goto out;
   4043	}
   4044
   4045	describe_relocation(fs_info, rc->block_group);
   4046
   4047	btrfs_wait_block_group_reservations(rc->block_group);
   4048	btrfs_wait_nocow_writers(rc->block_group);
   4049	btrfs_wait_ordered_roots(fs_info, U64_MAX,
   4050				 rc->block_group->start,
   4051				 rc->block_group->length);
   4052
   4053	ret = btrfs_zone_finish(rc->block_group);
   4054	WARN_ON(ret && ret != -EAGAIN);
   4055
   4056	while (1) {
   4057		int finishes_stage;
   4058
   4059		mutex_lock(&fs_info->cleaner_mutex);
   4060		ret = relocate_block_group(rc);
   4061		mutex_unlock(&fs_info->cleaner_mutex);
   4062		if (ret < 0)
   4063			err = ret;
   4064
   4065		finishes_stage = rc->stage;
   4066		/*
   4067		 * We may have gotten ENOSPC after we already dirtied some
   4068		 * extents.  If writeout happens while we're relocating a
   4069		 * different block group we could end up hitting the
   4070		 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
   4071		 * btrfs_reloc_cow_block.  Make sure we write everything out
   4072		 * properly so we don't trip over this problem, and then break
   4073		 * out of the loop if we hit an error.
   4074		 */
   4075		if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
   4076			ret = btrfs_wait_ordered_range(rc->data_inode, 0,
   4077						       (u64)-1);
   4078			if (ret)
   4079				err = ret;
   4080			invalidate_mapping_pages(rc->data_inode->i_mapping,
   4081						 0, -1);
   4082			rc->stage = UPDATE_DATA_PTRS;
   4083		}
   4084
   4085		if (err < 0)
   4086			goto out;
   4087
   4088		if (rc->extents_found == 0)
   4089			break;
   4090
   4091		btrfs_info(fs_info, "found %llu extents, stage: %s",
   4092			   rc->extents_found, stage_to_string(finishes_stage));
   4093	}
   4094
   4095	WARN_ON(rc->block_group->pinned > 0);
   4096	WARN_ON(rc->block_group->reserved > 0);
   4097	WARN_ON(rc->block_group->used > 0);
   4098out:
   4099	if (err && rw)
   4100		btrfs_dec_block_group_ro(rc->block_group);
   4101	iput(rc->data_inode);
   4102out_put_bg:
   4103	btrfs_put_block_group(bg);
   4104	reloc_chunk_end(fs_info);
   4105	free_reloc_control(rc);
   4106	return err;
   4107}
   4108
   4109static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
   4110{
   4111	struct btrfs_fs_info *fs_info = root->fs_info;
   4112	struct btrfs_trans_handle *trans;
   4113	int ret, err;
   4114
   4115	trans = btrfs_start_transaction(fs_info->tree_root, 0);
   4116	if (IS_ERR(trans))
   4117		return PTR_ERR(trans);
   4118
   4119	memset(&root->root_item.drop_progress, 0,
   4120		sizeof(root->root_item.drop_progress));
   4121	btrfs_set_root_drop_level(&root->root_item, 0);
   4122	btrfs_set_root_refs(&root->root_item, 0);
   4123	ret = btrfs_update_root(trans, fs_info->tree_root,
   4124				&root->root_key, &root->root_item);
   4125
   4126	err = btrfs_end_transaction(trans);
   4127	if (err)
   4128		return err;
   4129	return ret;
   4130}
   4131
   4132/*
   4133 * recover relocation interrupted by system crash.
   4134 *
   4135 * this function resumes merging reloc trees with corresponding fs trees.
   4136 * this is important for keeping the sharing of tree blocks
   4137 */
   4138int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
   4139{
   4140	LIST_HEAD(reloc_roots);
   4141	struct btrfs_key key;
   4142	struct btrfs_root *fs_root;
   4143	struct btrfs_root *reloc_root;
   4144	struct btrfs_path *path;
   4145	struct extent_buffer *leaf;
   4146	struct reloc_control *rc = NULL;
   4147	struct btrfs_trans_handle *trans;
   4148	int ret;
   4149	int err = 0;
   4150
   4151	path = btrfs_alloc_path();
   4152	if (!path)
   4153		return -ENOMEM;
   4154	path->reada = READA_BACK;
   4155
   4156	key.objectid = BTRFS_TREE_RELOC_OBJECTID;
   4157	key.type = BTRFS_ROOT_ITEM_KEY;
   4158	key.offset = (u64)-1;
   4159
   4160	while (1) {
   4161		ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
   4162					path, 0, 0);
   4163		if (ret < 0) {
   4164			err = ret;
   4165			goto out;
   4166		}
   4167		if (ret > 0) {
   4168			if (path->slots[0] == 0)
   4169				break;
   4170			path->slots[0]--;
   4171		}
   4172		leaf = path->nodes[0];
   4173		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
   4174		btrfs_release_path(path);
   4175
   4176		if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
   4177		    key.type != BTRFS_ROOT_ITEM_KEY)
   4178			break;
   4179
   4180		reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
   4181		if (IS_ERR(reloc_root)) {
   4182			err = PTR_ERR(reloc_root);
   4183			goto out;
   4184		}
   4185
   4186		set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
   4187		list_add(&reloc_root->root_list, &reloc_roots);
   4188
   4189		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
   4190			fs_root = btrfs_get_fs_root(fs_info,
   4191					reloc_root->root_key.offset, false);
   4192			if (IS_ERR(fs_root)) {
   4193				ret = PTR_ERR(fs_root);
   4194				if (ret != -ENOENT) {
   4195					err = ret;
   4196					goto out;
   4197				}
   4198				ret = mark_garbage_root(reloc_root);
   4199				if (ret < 0) {
   4200					err = ret;
   4201					goto out;
   4202				}
   4203			} else {
   4204				btrfs_put_root(fs_root);
   4205			}
   4206		}
   4207
   4208		if (key.offset == 0)
   4209			break;
   4210
   4211		key.offset--;
   4212	}
   4213	btrfs_release_path(path);
   4214
   4215	if (list_empty(&reloc_roots))
   4216		goto out;
   4217
   4218	rc = alloc_reloc_control(fs_info);
   4219	if (!rc) {
   4220		err = -ENOMEM;
   4221		goto out;
   4222	}
   4223
   4224	ret = reloc_chunk_start(fs_info);
   4225	if (ret < 0) {
   4226		err = ret;
   4227		goto out_end;
   4228	}
   4229
   4230	rc->extent_root = btrfs_extent_root(fs_info, 0);
   4231
   4232	set_reloc_control(rc);
   4233
   4234	trans = btrfs_join_transaction(rc->extent_root);
   4235	if (IS_ERR(trans)) {
   4236		err = PTR_ERR(trans);
   4237		goto out_unset;
   4238	}
   4239
   4240	rc->merge_reloc_tree = 1;
   4241
   4242	while (!list_empty(&reloc_roots)) {
   4243		reloc_root = list_entry(reloc_roots.next,
   4244					struct btrfs_root, root_list);
   4245		list_del(&reloc_root->root_list);
   4246
   4247		if (btrfs_root_refs(&reloc_root->root_item) == 0) {
   4248			list_add_tail(&reloc_root->root_list,
   4249				      &rc->reloc_roots);
   4250			continue;
   4251		}
   4252
   4253		fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
   4254					    false);
   4255		if (IS_ERR(fs_root)) {
   4256			err = PTR_ERR(fs_root);
   4257			list_add_tail(&reloc_root->root_list, &reloc_roots);
   4258			btrfs_end_transaction(trans);
   4259			goto out_unset;
   4260		}
   4261
   4262		err = __add_reloc_root(reloc_root);
   4263		ASSERT(err != -EEXIST);
   4264		if (err) {
   4265			list_add_tail(&reloc_root->root_list, &reloc_roots);
   4266			btrfs_put_root(fs_root);
   4267			btrfs_end_transaction(trans);
   4268			goto out_unset;
   4269		}
   4270		fs_root->reloc_root = btrfs_grab_root(reloc_root);
   4271		btrfs_put_root(fs_root);
   4272	}
   4273
   4274	err = btrfs_commit_transaction(trans);
   4275	if (err)
   4276		goto out_unset;
   4277
   4278	merge_reloc_roots(rc);
   4279
   4280	unset_reloc_control(rc);
   4281
   4282	trans = btrfs_join_transaction(rc->extent_root);
   4283	if (IS_ERR(trans)) {
   4284		err = PTR_ERR(trans);
   4285		goto out_clean;
   4286	}
   4287	err = btrfs_commit_transaction(trans);
   4288out_clean:
   4289	ret = clean_dirty_subvols(rc);
   4290	if (ret < 0 && !err)
   4291		err = ret;
   4292out_unset:
   4293	unset_reloc_control(rc);
   4294out_end:
   4295	reloc_chunk_end(fs_info);
   4296	free_reloc_control(rc);
   4297out:
   4298	free_reloc_roots(&reloc_roots);
   4299
   4300	btrfs_free_path(path);
   4301
   4302	if (err == 0) {
   4303		/* cleanup orphan inode in data relocation tree */
   4304		fs_root = btrfs_grab_root(fs_info->data_reloc_root);
   4305		ASSERT(fs_root);
   4306		err = btrfs_orphan_cleanup(fs_root);
   4307		btrfs_put_root(fs_root);
   4308	}
   4309	return err;
   4310}
   4311
   4312/*
   4313 * helper to add ordered checksum for data relocation.
   4314 *
   4315 * cloning checksum properly handles the nodatasum extents.
   4316 * it also saves CPU time to re-calculate the checksum.
   4317 */
   4318int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
   4319{
   4320	struct btrfs_fs_info *fs_info = inode->root->fs_info;
   4321	struct btrfs_root *csum_root;
   4322	struct btrfs_ordered_sum *sums;
   4323	struct btrfs_ordered_extent *ordered;
   4324	int ret;
   4325	u64 disk_bytenr;
   4326	u64 new_bytenr;
   4327	LIST_HEAD(list);
   4328
   4329	ordered = btrfs_lookup_ordered_extent(inode, file_pos);
   4330	BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
   4331
   4332	disk_bytenr = file_pos + inode->index_cnt;
   4333	csum_root = btrfs_csum_root(fs_info, disk_bytenr);
   4334	ret = btrfs_lookup_csums_range(csum_root, disk_bytenr,
   4335				       disk_bytenr + len - 1, &list, 0);
   4336	if (ret)
   4337		goto out;
   4338
   4339	while (!list_empty(&list)) {
   4340		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
   4341		list_del_init(&sums->list);
   4342
   4343		/*
   4344		 * We need to offset the new_bytenr based on where the csum is.
   4345		 * We need to do this because we will read in entire prealloc
   4346		 * extents but we may have written to say the middle of the
   4347		 * prealloc extent, so we need to make sure the csum goes with
   4348		 * the right disk offset.
   4349		 *
   4350		 * We can do this because the data reloc inode refers strictly
   4351		 * to the on disk bytes, so we don't have to worry about
   4352		 * disk_len vs real len like with real inodes since it's all
   4353		 * disk length.
   4354		 */
   4355		new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
   4356		sums->bytenr = new_bytenr;
   4357
   4358		btrfs_add_ordered_sum(ordered, sums);
   4359	}
   4360out:
   4361	btrfs_put_ordered_extent(ordered);
   4362	return ret;
   4363}
   4364
   4365int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
   4366			  struct btrfs_root *root, struct extent_buffer *buf,
   4367			  struct extent_buffer *cow)
   4368{
   4369	struct btrfs_fs_info *fs_info = root->fs_info;
   4370	struct reloc_control *rc;
   4371	struct btrfs_backref_node *node;
   4372	int first_cow = 0;
   4373	int level;
   4374	int ret = 0;
   4375
   4376	rc = fs_info->reloc_ctl;
   4377	if (!rc)
   4378		return 0;
   4379
   4380	BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
   4381
   4382	level = btrfs_header_level(buf);
   4383	if (btrfs_header_generation(buf) <=
   4384	    btrfs_root_last_snapshot(&root->root_item))
   4385		first_cow = 1;
   4386
   4387	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
   4388	    rc->create_reloc_tree) {
   4389		WARN_ON(!first_cow && level == 0);
   4390
   4391		node = rc->backref_cache.path[level];
   4392		BUG_ON(node->bytenr != buf->start &&
   4393		       node->new_bytenr != buf->start);
   4394
   4395		btrfs_backref_drop_node_buffer(node);
   4396		atomic_inc(&cow->refs);
   4397		node->eb = cow;
   4398		node->new_bytenr = cow->start;
   4399
   4400		if (!node->pending) {
   4401			list_move_tail(&node->list,
   4402				       &rc->backref_cache.pending[level]);
   4403			node->pending = 1;
   4404		}
   4405
   4406		if (first_cow)
   4407			mark_block_processed(rc, node);
   4408
   4409		if (first_cow && level > 0)
   4410			rc->nodes_relocated += buf->len;
   4411	}
   4412
   4413	if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
   4414		ret = replace_file_extents(trans, rc, root, cow);
   4415	return ret;
   4416}
   4417
   4418/*
   4419 * called before creating snapshot. it calculates metadata reservation
   4420 * required for relocating tree blocks in the snapshot
   4421 */
   4422void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
   4423			      u64 *bytes_to_reserve)
   4424{
   4425	struct btrfs_root *root = pending->root;
   4426	struct reloc_control *rc = root->fs_info->reloc_ctl;
   4427
   4428	if (!rc || !have_reloc_root(root))
   4429		return;
   4430
   4431	if (!rc->merge_reloc_tree)
   4432		return;
   4433
   4434	root = root->reloc_root;
   4435	BUG_ON(btrfs_root_refs(&root->root_item) == 0);
   4436	/*
   4437	 * relocation is in the stage of merging trees. the space
   4438	 * used by merging a reloc tree is twice the size of
   4439	 * relocated tree nodes in the worst case. half for cowing
   4440	 * the reloc tree, half for cowing the fs tree. the space
   4441	 * used by cowing the reloc tree will be freed after the
   4442	 * tree is dropped. if we create snapshot, cowing the fs
   4443	 * tree may use more space than it frees. so we need
   4444	 * reserve extra space.
   4445	 */
   4446	*bytes_to_reserve += rc->nodes_relocated;
   4447}
   4448
   4449/*
   4450 * called after snapshot is created. migrate block reservation
   4451 * and create reloc root for the newly created snapshot
   4452 *
   4453 * This is similar to btrfs_init_reloc_root(), we come out of here with two
   4454 * references held on the reloc_root, one for root->reloc_root and one for
   4455 * rc->reloc_roots.
   4456 */
   4457int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
   4458			       struct btrfs_pending_snapshot *pending)
   4459{
   4460	struct btrfs_root *root = pending->root;
   4461	struct btrfs_root *reloc_root;
   4462	struct btrfs_root *new_root;
   4463	struct reloc_control *rc = root->fs_info->reloc_ctl;
   4464	int ret;
   4465
   4466	if (!rc || !have_reloc_root(root))
   4467		return 0;
   4468
   4469	rc = root->fs_info->reloc_ctl;
   4470	rc->merging_rsv_size += rc->nodes_relocated;
   4471
   4472	if (rc->merge_reloc_tree) {
   4473		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
   4474					      rc->block_rsv,
   4475					      rc->nodes_relocated, true);
   4476		if (ret)
   4477			return ret;
   4478	}
   4479
   4480	new_root = pending->snap;
   4481	reloc_root = create_reloc_root(trans, root->reloc_root,
   4482				       new_root->root_key.objectid);
   4483	if (IS_ERR(reloc_root))
   4484		return PTR_ERR(reloc_root);
   4485
   4486	ret = __add_reloc_root(reloc_root);
   4487	ASSERT(ret != -EEXIST);
   4488	if (ret) {
   4489		/* Pairs with create_reloc_root */
   4490		btrfs_put_root(reloc_root);
   4491		return ret;
   4492	}
   4493	new_root->reloc_root = btrfs_grab_root(reloc_root);
   4494
   4495	if (rc->create_reloc_tree)
   4496		ret = clone_backref_node(trans, rc, root, reloc_root);
   4497	return ret;
   4498}