cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

delayed-inode.c (51107B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2011 Fujitsu.  All rights reserved.
      4 * Written by Miao Xie <miaox@cn.fujitsu.com>
      5 */
      6
      7#include <linux/slab.h>
      8#include <linux/iversion.h>
      9#include "misc.h"
     10#include "delayed-inode.h"
     11#include "disk-io.h"
     12#include "transaction.h"
     13#include "ctree.h"
     14#include "qgroup.h"
     15#include "locking.h"
     16#include "inode-item.h"
     17
     18#define BTRFS_DELAYED_WRITEBACK		512
     19#define BTRFS_DELAYED_BACKGROUND	128
     20#define BTRFS_DELAYED_BATCH		16
     21
     22static struct kmem_cache *delayed_node_cache;
     23
     24int __init btrfs_delayed_inode_init(void)
     25{
     26	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
     27					sizeof(struct btrfs_delayed_node),
     28					0,
     29					SLAB_MEM_SPREAD,
     30					NULL);
     31	if (!delayed_node_cache)
     32		return -ENOMEM;
     33	return 0;
     34}
     35
     36void __cold btrfs_delayed_inode_exit(void)
     37{
     38	kmem_cache_destroy(delayed_node_cache);
     39}
     40
     41static inline void btrfs_init_delayed_node(
     42				struct btrfs_delayed_node *delayed_node,
     43				struct btrfs_root *root, u64 inode_id)
     44{
     45	delayed_node->root = root;
     46	delayed_node->inode_id = inode_id;
     47	refcount_set(&delayed_node->refs, 0);
     48	delayed_node->ins_root = RB_ROOT_CACHED;
     49	delayed_node->del_root = RB_ROOT_CACHED;
     50	mutex_init(&delayed_node->mutex);
     51	INIT_LIST_HEAD(&delayed_node->n_list);
     52	INIT_LIST_HEAD(&delayed_node->p_list);
     53}
     54
     55static inline int btrfs_is_continuous_delayed_item(
     56					struct btrfs_delayed_item *item1,
     57					struct btrfs_delayed_item *item2)
     58{
     59	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
     60	    item1->key.objectid == item2->key.objectid &&
     61	    item1->key.type == item2->key.type &&
     62	    item1->key.offset + 1 == item2->key.offset)
     63		return 1;
     64	return 0;
     65}
     66
     67static struct btrfs_delayed_node *btrfs_get_delayed_node(
     68		struct btrfs_inode *btrfs_inode)
     69{
     70	struct btrfs_root *root = btrfs_inode->root;
     71	u64 ino = btrfs_ino(btrfs_inode);
     72	struct btrfs_delayed_node *node;
     73
     74	node = READ_ONCE(btrfs_inode->delayed_node);
     75	if (node) {
     76		refcount_inc(&node->refs);
     77		return node;
     78	}
     79
     80	spin_lock(&root->inode_lock);
     81	node = xa_load(&root->delayed_nodes, ino);
     82
     83	if (node) {
     84		if (btrfs_inode->delayed_node) {
     85			refcount_inc(&node->refs);	/* can be accessed */
     86			BUG_ON(btrfs_inode->delayed_node != node);
     87			spin_unlock(&root->inode_lock);
     88			return node;
     89		}
     90
     91		/*
     92		 * It's possible that we're racing into the middle of removing
     93		 * this node from the xarray.  In this case, the refcount
     94		 * was zero and it should never go back to one.  Just return
     95		 * NULL like it was never in the xarray at all; our release
     96		 * function is in the process of removing it.
     97		 *
     98		 * Some implementations of refcount_inc refuse to bump the
     99		 * refcount once it has hit zero.  If we don't do this dance
    100		 * here, refcount_inc() may decide to just WARN_ONCE() instead
    101		 * of actually bumping the refcount.
    102		 *
    103		 * If this node is properly in the xarray, we want to bump the
    104		 * refcount twice, once for the inode and once for this get
    105		 * operation.
    106		 */
    107		if (refcount_inc_not_zero(&node->refs)) {
    108			refcount_inc(&node->refs);
    109			btrfs_inode->delayed_node = node;
    110		} else {
    111			node = NULL;
    112		}
    113
    114		spin_unlock(&root->inode_lock);
    115		return node;
    116	}
    117	spin_unlock(&root->inode_lock);
    118
    119	return NULL;
    120}
    121
    122/* Will return either the node or PTR_ERR(-ENOMEM) */
    123static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
    124		struct btrfs_inode *btrfs_inode)
    125{
    126	struct btrfs_delayed_node *node;
    127	struct btrfs_root *root = btrfs_inode->root;
    128	u64 ino = btrfs_ino(btrfs_inode);
    129	int ret;
    130
    131	do {
    132		node = btrfs_get_delayed_node(btrfs_inode);
    133		if (node)
    134			return node;
    135
    136		node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
    137		if (!node)
    138			return ERR_PTR(-ENOMEM);
    139		btrfs_init_delayed_node(node, root, ino);
    140
    141		/* Cached in the inode and can be accessed */
    142		refcount_set(&node->refs, 2);
    143
    144		spin_lock(&root->inode_lock);
    145		ret = xa_insert(&root->delayed_nodes, ino, node, GFP_NOFS);
    146		if (ret) {
    147			spin_unlock(&root->inode_lock);
    148			kmem_cache_free(delayed_node_cache, node);
    149			if (ret != -EBUSY)
    150				return ERR_PTR(ret);
    151		}
    152	} while (ret);
    153	btrfs_inode->delayed_node = node;
    154	spin_unlock(&root->inode_lock);
    155
    156	return node;
    157}
    158
    159/*
    160 * Call it when holding delayed_node->mutex
    161 *
    162 * If mod = 1, add this node into the prepared list.
    163 */
    164static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
    165				     struct btrfs_delayed_node *node,
    166				     int mod)
    167{
    168	spin_lock(&root->lock);
    169	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
    170		if (!list_empty(&node->p_list))
    171			list_move_tail(&node->p_list, &root->prepare_list);
    172		else if (mod)
    173			list_add_tail(&node->p_list, &root->prepare_list);
    174	} else {
    175		list_add_tail(&node->n_list, &root->node_list);
    176		list_add_tail(&node->p_list, &root->prepare_list);
    177		refcount_inc(&node->refs);	/* inserted into list */
    178		root->nodes++;
    179		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
    180	}
    181	spin_unlock(&root->lock);
    182}
    183
    184/* Call it when holding delayed_node->mutex */
    185static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
    186				       struct btrfs_delayed_node *node)
    187{
    188	spin_lock(&root->lock);
    189	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
    190		root->nodes--;
    191		refcount_dec(&node->refs);	/* not in the list */
    192		list_del_init(&node->n_list);
    193		if (!list_empty(&node->p_list))
    194			list_del_init(&node->p_list);
    195		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
    196	}
    197	spin_unlock(&root->lock);
    198}
    199
    200static struct btrfs_delayed_node *btrfs_first_delayed_node(
    201			struct btrfs_delayed_root *delayed_root)
    202{
    203	struct list_head *p;
    204	struct btrfs_delayed_node *node = NULL;
    205
    206	spin_lock(&delayed_root->lock);
    207	if (list_empty(&delayed_root->node_list))
    208		goto out;
    209
    210	p = delayed_root->node_list.next;
    211	node = list_entry(p, struct btrfs_delayed_node, n_list);
    212	refcount_inc(&node->refs);
    213out:
    214	spin_unlock(&delayed_root->lock);
    215
    216	return node;
    217}
    218
    219static struct btrfs_delayed_node *btrfs_next_delayed_node(
    220						struct btrfs_delayed_node *node)
    221{
    222	struct btrfs_delayed_root *delayed_root;
    223	struct list_head *p;
    224	struct btrfs_delayed_node *next = NULL;
    225
    226	delayed_root = node->root->fs_info->delayed_root;
    227	spin_lock(&delayed_root->lock);
    228	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
    229		/* not in the list */
    230		if (list_empty(&delayed_root->node_list))
    231			goto out;
    232		p = delayed_root->node_list.next;
    233	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
    234		goto out;
    235	else
    236		p = node->n_list.next;
    237
    238	next = list_entry(p, struct btrfs_delayed_node, n_list);
    239	refcount_inc(&next->refs);
    240out:
    241	spin_unlock(&delayed_root->lock);
    242
    243	return next;
    244}
    245
    246static void __btrfs_release_delayed_node(
    247				struct btrfs_delayed_node *delayed_node,
    248				int mod)
    249{
    250	struct btrfs_delayed_root *delayed_root;
    251
    252	if (!delayed_node)
    253		return;
    254
    255	delayed_root = delayed_node->root->fs_info->delayed_root;
    256
    257	mutex_lock(&delayed_node->mutex);
    258	if (delayed_node->count)
    259		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
    260	else
    261		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
    262	mutex_unlock(&delayed_node->mutex);
    263
    264	if (refcount_dec_and_test(&delayed_node->refs)) {
    265		struct btrfs_root *root = delayed_node->root;
    266
    267		spin_lock(&root->inode_lock);
    268		/*
    269		 * Once our refcount goes to zero, nobody is allowed to bump it
    270		 * back up.  We can delete it now.
    271		 */
    272		ASSERT(refcount_read(&delayed_node->refs) == 0);
    273		xa_erase(&root->delayed_nodes, delayed_node->inode_id);
    274		spin_unlock(&root->inode_lock);
    275		kmem_cache_free(delayed_node_cache, delayed_node);
    276	}
    277}
    278
    279static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
    280{
    281	__btrfs_release_delayed_node(node, 0);
    282}
    283
    284static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
    285					struct btrfs_delayed_root *delayed_root)
    286{
    287	struct list_head *p;
    288	struct btrfs_delayed_node *node = NULL;
    289
    290	spin_lock(&delayed_root->lock);
    291	if (list_empty(&delayed_root->prepare_list))
    292		goto out;
    293
    294	p = delayed_root->prepare_list.next;
    295	list_del_init(p);
    296	node = list_entry(p, struct btrfs_delayed_node, p_list);
    297	refcount_inc(&node->refs);
    298out:
    299	spin_unlock(&delayed_root->lock);
    300
    301	return node;
    302}
    303
    304static inline void btrfs_release_prepared_delayed_node(
    305					struct btrfs_delayed_node *node)
    306{
    307	__btrfs_release_delayed_node(node, 1);
    308}
    309
    310static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
    311{
    312	struct btrfs_delayed_item *item;
    313	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
    314	if (item) {
    315		item->data_len = data_len;
    316		item->ins_or_del = 0;
    317		item->bytes_reserved = 0;
    318		item->delayed_node = NULL;
    319		refcount_set(&item->refs, 1);
    320	}
    321	return item;
    322}
    323
    324/*
    325 * __btrfs_lookup_delayed_item - look up the delayed item by key
    326 * @delayed_node: pointer to the delayed node
    327 * @key:	  the key to look up
    328 * @prev:	  used to store the prev item if the right item isn't found
    329 * @next:	  used to store the next item if the right item isn't found
    330 *
    331 * Note: if we don't find the right item, we will return the prev item and
    332 * the next item.
    333 */
    334static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
    335				struct rb_root *root,
    336				struct btrfs_key *key,
    337				struct btrfs_delayed_item **prev,
    338				struct btrfs_delayed_item **next)
    339{
    340	struct rb_node *node, *prev_node = NULL;
    341	struct btrfs_delayed_item *delayed_item = NULL;
    342	int ret = 0;
    343
    344	node = root->rb_node;
    345
    346	while (node) {
    347		delayed_item = rb_entry(node, struct btrfs_delayed_item,
    348					rb_node);
    349		prev_node = node;
    350		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
    351		if (ret < 0)
    352			node = node->rb_right;
    353		else if (ret > 0)
    354			node = node->rb_left;
    355		else
    356			return delayed_item;
    357	}
    358
    359	if (prev) {
    360		if (!prev_node)
    361			*prev = NULL;
    362		else if (ret < 0)
    363			*prev = delayed_item;
    364		else if ((node = rb_prev(prev_node)) != NULL) {
    365			*prev = rb_entry(node, struct btrfs_delayed_item,
    366					 rb_node);
    367		} else
    368			*prev = NULL;
    369	}
    370
    371	if (next) {
    372		if (!prev_node)
    373			*next = NULL;
    374		else if (ret > 0)
    375			*next = delayed_item;
    376		else if ((node = rb_next(prev_node)) != NULL) {
    377			*next = rb_entry(node, struct btrfs_delayed_item,
    378					 rb_node);
    379		} else
    380			*next = NULL;
    381	}
    382	return NULL;
    383}
    384
    385static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
    386					struct btrfs_delayed_node *delayed_node,
    387					struct btrfs_key *key)
    388{
    389	return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
    390					   NULL, NULL);
    391}
    392
    393static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
    394				    struct btrfs_delayed_item *ins,
    395				    int action)
    396{
    397	struct rb_node **p, *node;
    398	struct rb_node *parent_node = NULL;
    399	struct rb_root_cached *root;
    400	struct btrfs_delayed_item *item;
    401	int cmp;
    402	bool leftmost = true;
    403
    404	if (action == BTRFS_DELAYED_INSERTION_ITEM)
    405		root = &delayed_node->ins_root;
    406	else if (action == BTRFS_DELAYED_DELETION_ITEM)
    407		root = &delayed_node->del_root;
    408	else
    409		BUG();
    410	p = &root->rb_root.rb_node;
    411	node = &ins->rb_node;
    412
    413	while (*p) {
    414		parent_node = *p;
    415		item = rb_entry(parent_node, struct btrfs_delayed_item,
    416				 rb_node);
    417
    418		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
    419		if (cmp < 0) {
    420			p = &(*p)->rb_right;
    421			leftmost = false;
    422		} else if (cmp > 0) {
    423			p = &(*p)->rb_left;
    424		} else {
    425			return -EEXIST;
    426		}
    427	}
    428
    429	rb_link_node(node, parent_node, p);
    430	rb_insert_color_cached(node, root, leftmost);
    431	ins->delayed_node = delayed_node;
    432	ins->ins_or_del = action;
    433
    434	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
    435	    action == BTRFS_DELAYED_INSERTION_ITEM &&
    436	    ins->key.offset >= delayed_node->index_cnt)
    437			delayed_node->index_cnt = ins->key.offset + 1;
    438
    439	delayed_node->count++;
    440	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
    441	return 0;
    442}
    443
    444static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
    445					      struct btrfs_delayed_item *item)
    446{
    447	return __btrfs_add_delayed_item(node, item,
    448					BTRFS_DELAYED_INSERTION_ITEM);
    449}
    450
    451static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
    452					     struct btrfs_delayed_item *item)
    453{
    454	return __btrfs_add_delayed_item(node, item,
    455					BTRFS_DELAYED_DELETION_ITEM);
    456}
    457
    458static void finish_one_item(struct btrfs_delayed_root *delayed_root)
    459{
    460	int seq = atomic_inc_return(&delayed_root->items_seq);
    461
    462	/* atomic_dec_return implies a barrier */
    463	if ((atomic_dec_return(&delayed_root->items) <
    464	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
    465		cond_wake_up_nomb(&delayed_root->wait);
    466}
    467
    468static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
    469{
    470	struct rb_root_cached *root;
    471	struct btrfs_delayed_root *delayed_root;
    472
    473	/* Not associated with any delayed_node */
    474	if (!delayed_item->delayed_node)
    475		return;
    476	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
    477
    478	BUG_ON(!delayed_root);
    479	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
    480	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
    481
    482	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
    483		root = &delayed_item->delayed_node->ins_root;
    484	else
    485		root = &delayed_item->delayed_node->del_root;
    486
    487	rb_erase_cached(&delayed_item->rb_node, root);
    488	delayed_item->delayed_node->count--;
    489
    490	finish_one_item(delayed_root);
    491}
    492
    493static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
    494{
    495	if (item) {
    496		__btrfs_remove_delayed_item(item);
    497		if (refcount_dec_and_test(&item->refs))
    498			kfree(item);
    499	}
    500}
    501
    502static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
    503					struct btrfs_delayed_node *delayed_node)
    504{
    505	struct rb_node *p;
    506	struct btrfs_delayed_item *item = NULL;
    507
    508	p = rb_first_cached(&delayed_node->ins_root);
    509	if (p)
    510		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
    511
    512	return item;
    513}
    514
    515static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
    516					struct btrfs_delayed_node *delayed_node)
    517{
    518	struct rb_node *p;
    519	struct btrfs_delayed_item *item = NULL;
    520
    521	p = rb_first_cached(&delayed_node->del_root);
    522	if (p)
    523		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
    524
    525	return item;
    526}
    527
    528static struct btrfs_delayed_item *__btrfs_next_delayed_item(
    529						struct btrfs_delayed_item *item)
    530{
    531	struct rb_node *p;
    532	struct btrfs_delayed_item *next = NULL;
    533
    534	p = rb_next(&item->rb_node);
    535	if (p)
    536		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
    537
    538	return next;
    539}
    540
    541static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
    542					       struct btrfs_root *root,
    543					       struct btrfs_delayed_item *item)
    544{
    545	struct btrfs_block_rsv *src_rsv;
    546	struct btrfs_block_rsv *dst_rsv;
    547	struct btrfs_fs_info *fs_info = root->fs_info;
    548	u64 num_bytes;
    549	int ret;
    550
    551	if (!trans->bytes_reserved)
    552		return 0;
    553
    554	src_rsv = trans->block_rsv;
    555	dst_rsv = &fs_info->delayed_block_rsv;
    556
    557	num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
    558
    559	/*
    560	 * Here we migrate space rsv from transaction rsv, since have already
    561	 * reserved space when starting a transaction.  So no need to reserve
    562	 * qgroup space here.
    563	 */
    564	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
    565	if (!ret) {
    566		trace_btrfs_space_reservation(fs_info, "delayed_item",
    567					      item->key.objectid,
    568					      num_bytes, 1);
    569		item->bytes_reserved = num_bytes;
    570	}
    571
    572	return ret;
    573}
    574
    575static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
    576						struct btrfs_delayed_item *item)
    577{
    578	struct btrfs_block_rsv *rsv;
    579	struct btrfs_fs_info *fs_info = root->fs_info;
    580
    581	if (!item->bytes_reserved)
    582		return;
    583
    584	rsv = &fs_info->delayed_block_rsv;
    585	/*
    586	 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
    587	 * to release/reserve qgroup space.
    588	 */
    589	trace_btrfs_space_reservation(fs_info, "delayed_item",
    590				      item->key.objectid, item->bytes_reserved,
    591				      0);
    592	btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
    593}
    594
    595static int btrfs_delayed_inode_reserve_metadata(
    596					struct btrfs_trans_handle *trans,
    597					struct btrfs_root *root,
    598					struct btrfs_delayed_node *node)
    599{
    600	struct btrfs_fs_info *fs_info = root->fs_info;
    601	struct btrfs_block_rsv *src_rsv;
    602	struct btrfs_block_rsv *dst_rsv;
    603	u64 num_bytes;
    604	int ret;
    605
    606	src_rsv = trans->block_rsv;
    607	dst_rsv = &fs_info->delayed_block_rsv;
    608
    609	num_bytes = btrfs_calc_metadata_size(fs_info, 1);
    610
    611	/*
    612	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
    613	 * which doesn't reserve space for speed.  This is a problem since we
    614	 * still need to reserve space for this update, so try to reserve the
    615	 * space.
    616	 *
    617	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
    618	 * we always reserve enough to update the inode item.
    619	 */
    620	if (!src_rsv || (!trans->bytes_reserved &&
    621			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
    622		ret = btrfs_qgroup_reserve_meta(root, num_bytes,
    623					  BTRFS_QGROUP_RSV_META_PREALLOC, true);
    624		if (ret < 0)
    625			return ret;
    626		ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
    627					  BTRFS_RESERVE_NO_FLUSH);
    628		/* NO_FLUSH could only fail with -ENOSPC */
    629		ASSERT(ret == 0 || ret == -ENOSPC);
    630		if (ret)
    631			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
    632	} else {
    633		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
    634	}
    635
    636	if (!ret) {
    637		trace_btrfs_space_reservation(fs_info, "delayed_inode",
    638					      node->inode_id, num_bytes, 1);
    639		node->bytes_reserved = num_bytes;
    640	}
    641
    642	return ret;
    643}
    644
    645static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
    646						struct btrfs_delayed_node *node,
    647						bool qgroup_free)
    648{
    649	struct btrfs_block_rsv *rsv;
    650
    651	if (!node->bytes_reserved)
    652		return;
    653
    654	rsv = &fs_info->delayed_block_rsv;
    655	trace_btrfs_space_reservation(fs_info, "delayed_inode",
    656				      node->inode_id, node->bytes_reserved, 0);
    657	btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
    658	if (qgroup_free)
    659		btrfs_qgroup_free_meta_prealloc(node->root,
    660				node->bytes_reserved);
    661	else
    662		btrfs_qgroup_convert_reserved_meta(node->root,
    663				node->bytes_reserved);
    664	node->bytes_reserved = 0;
    665}
    666
    667/*
    668 * Insert a single delayed item or a batch of delayed items that have consecutive
    669 * keys if they exist.
    670 */
    671static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
    672				     struct btrfs_root *root,
    673				     struct btrfs_path *path,
    674				     struct btrfs_delayed_item *first_item)
    675{
    676	LIST_HEAD(item_list);
    677	struct btrfs_delayed_item *curr;
    678	struct btrfs_delayed_item *next;
    679	const int max_size = BTRFS_LEAF_DATA_SIZE(root->fs_info);
    680	struct btrfs_item_batch batch;
    681	int total_size;
    682	char *ins_data = NULL;
    683	int ret;
    684
    685	list_add_tail(&first_item->tree_list, &item_list);
    686	batch.total_data_size = first_item->data_len;
    687	batch.nr = 1;
    688	total_size = first_item->data_len + sizeof(struct btrfs_item);
    689	curr = first_item;
    690
    691	while (true) {
    692		int next_size;
    693
    694		next = __btrfs_next_delayed_item(curr);
    695		if (!next || !btrfs_is_continuous_delayed_item(curr, next))
    696			break;
    697
    698		next_size = next->data_len + sizeof(struct btrfs_item);
    699		if (total_size + next_size > max_size)
    700			break;
    701
    702		list_add_tail(&next->tree_list, &item_list);
    703		batch.nr++;
    704		total_size += next_size;
    705		batch.total_data_size += next->data_len;
    706		curr = next;
    707	}
    708
    709	if (batch.nr == 1) {
    710		batch.keys = &first_item->key;
    711		batch.data_sizes = &first_item->data_len;
    712	} else {
    713		struct btrfs_key *ins_keys;
    714		u32 *ins_sizes;
    715		int i = 0;
    716
    717		ins_data = kmalloc(batch.nr * sizeof(u32) +
    718				   batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
    719		if (!ins_data) {
    720			ret = -ENOMEM;
    721			goto out;
    722		}
    723		ins_sizes = (u32 *)ins_data;
    724		ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
    725		batch.keys = ins_keys;
    726		batch.data_sizes = ins_sizes;
    727		list_for_each_entry(curr, &item_list, tree_list) {
    728			ins_keys[i] = curr->key;
    729			ins_sizes[i] = curr->data_len;
    730			i++;
    731		}
    732	}
    733
    734	ret = btrfs_insert_empty_items(trans, root, path, &batch);
    735	if (ret)
    736		goto out;
    737
    738	list_for_each_entry(curr, &item_list, tree_list) {
    739		char *data_ptr;
    740
    741		data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
    742		write_extent_buffer(path->nodes[0], &curr->data,
    743				    (unsigned long)data_ptr, curr->data_len);
    744		path->slots[0]++;
    745	}
    746
    747	/*
    748	 * Now release our path before releasing the delayed items and their
    749	 * metadata reservations, so that we don't block other tasks for more
    750	 * time than needed.
    751	 */
    752	btrfs_release_path(path);
    753
    754	list_for_each_entry_safe(curr, next, &item_list, tree_list) {
    755		list_del(&curr->tree_list);
    756		btrfs_delayed_item_release_metadata(root, curr);
    757		btrfs_release_delayed_item(curr);
    758	}
    759out:
    760	kfree(ins_data);
    761	return ret;
    762}
    763
    764static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
    765				      struct btrfs_path *path,
    766				      struct btrfs_root *root,
    767				      struct btrfs_delayed_node *node)
    768{
    769	int ret = 0;
    770
    771	while (ret == 0) {
    772		struct btrfs_delayed_item *curr;
    773
    774		mutex_lock(&node->mutex);
    775		curr = __btrfs_first_delayed_insertion_item(node);
    776		if (!curr) {
    777			mutex_unlock(&node->mutex);
    778			break;
    779		}
    780		ret = btrfs_insert_delayed_item(trans, root, path, curr);
    781		mutex_unlock(&node->mutex);
    782	}
    783
    784	return ret;
    785}
    786
    787static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
    788				    struct btrfs_root *root,
    789				    struct btrfs_path *path,
    790				    struct btrfs_delayed_item *item)
    791{
    792	struct btrfs_delayed_item *curr, *next;
    793	struct extent_buffer *leaf;
    794	struct btrfs_key key;
    795	struct list_head head;
    796	int nitems, i, last_item;
    797	int ret = 0;
    798
    799	BUG_ON(!path->nodes[0]);
    800
    801	leaf = path->nodes[0];
    802
    803	i = path->slots[0];
    804	last_item = btrfs_header_nritems(leaf) - 1;
    805	if (i > last_item)
    806		return -ENOENT;	/* FIXME: Is errno suitable? */
    807
    808	next = item;
    809	INIT_LIST_HEAD(&head);
    810	btrfs_item_key_to_cpu(leaf, &key, i);
    811	nitems = 0;
    812	/*
    813	 * count the number of the dir index items that we can delete in batch
    814	 */
    815	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
    816		list_add_tail(&next->tree_list, &head);
    817		nitems++;
    818
    819		curr = next;
    820		next = __btrfs_next_delayed_item(curr);
    821		if (!next)
    822			break;
    823
    824		if (!btrfs_is_continuous_delayed_item(curr, next))
    825			break;
    826
    827		i++;
    828		if (i > last_item)
    829			break;
    830		btrfs_item_key_to_cpu(leaf, &key, i);
    831	}
    832
    833	if (!nitems)
    834		return 0;
    835
    836	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
    837	if (ret)
    838		goto out;
    839
    840	list_for_each_entry_safe(curr, next, &head, tree_list) {
    841		btrfs_delayed_item_release_metadata(root, curr);
    842		list_del(&curr->tree_list);
    843		btrfs_release_delayed_item(curr);
    844	}
    845
    846out:
    847	return ret;
    848}
    849
    850static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
    851				      struct btrfs_path *path,
    852				      struct btrfs_root *root,
    853				      struct btrfs_delayed_node *node)
    854{
    855	struct btrfs_delayed_item *curr, *prev;
    856	int ret = 0;
    857
    858do_again:
    859	mutex_lock(&node->mutex);
    860	curr = __btrfs_first_delayed_deletion_item(node);
    861	if (!curr)
    862		goto delete_fail;
    863
    864	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
    865	if (ret < 0)
    866		goto delete_fail;
    867	else if (ret > 0) {
    868		/*
    869		 * can't find the item which the node points to, so this node
    870		 * is invalid, just drop it.
    871		 */
    872		prev = curr;
    873		curr = __btrfs_next_delayed_item(prev);
    874		btrfs_release_delayed_item(prev);
    875		ret = 0;
    876		btrfs_release_path(path);
    877		if (curr) {
    878			mutex_unlock(&node->mutex);
    879			goto do_again;
    880		} else
    881			goto delete_fail;
    882	}
    883
    884	btrfs_batch_delete_items(trans, root, path, curr);
    885	btrfs_release_path(path);
    886	mutex_unlock(&node->mutex);
    887	goto do_again;
    888
    889delete_fail:
    890	btrfs_release_path(path);
    891	mutex_unlock(&node->mutex);
    892	return ret;
    893}
    894
    895static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
    896{
    897	struct btrfs_delayed_root *delayed_root;
    898
    899	if (delayed_node &&
    900	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
    901		BUG_ON(!delayed_node->root);
    902		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
    903		delayed_node->count--;
    904
    905		delayed_root = delayed_node->root->fs_info->delayed_root;
    906		finish_one_item(delayed_root);
    907	}
    908}
    909
    910static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
    911{
    912
    913	if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
    914		struct btrfs_delayed_root *delayed_root;
    915
    916		ASSERT(delayed_node->root);
    917		delayed_node->count--;
    918
    919		delayed_root = delayed_node->root->fs_info->delayed_root;
    920		finish_one_item(delayed_root);
    921	}
    922}
    923
    924static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
    925					struct btrfs_root *root,
    926					struct btrfs_path *path,
    927					struct btrfs_delayed_node *node)
    928{
    929	struct btrfs_fs_info *fs_info = root->fs_info;
    930	struct btrfs_key key;
    931	struct btrfs_inode_item *inode_item;
    932	struct extent_buffer *leaf;
    933	int mod;
    934	int ret;
    935
    936	key.objectid = node->inode_id;
    937	key.type = BTRFS_INODE_ITEM_KEY;
    938	key.offset = 0;
    939
    940	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
    941		mod = -1;
    942	else
    943		mod = 1;
    944
    945	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
    946	if (ret > 0)
    947		ret = -ENOENT;
    948	if (ret < 0)
    949		goto out;
    950
    951	leaf = path->nodes[0];
    952	inode_item = btrfs_item_ptr(leaf, path->slots[0],
    953				    struct btrfs_inode_item);
    954	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
    955			    sizeof(struct btrfs_inode_item));
    956	btrfs_mark_buffer_dirty(leaf);
    957
    958	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
    959		goto out;
    960
    961	path->slots[0]++;
    962	if (path->slots[0] >= btrfs_header_nritems(leaf))
    963		goto search;
    964again:
    965	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
    966	if (key.objectid != node->inode_id)
    967		goto out;
    968
    969	if (key.type != BTRFS_INODE_REF_KEY &&
    970	    key.type != BTRFS_INODE_EXTREF_KEY)
    971		goto out;
    972
    973	/*
    974	 * Delayed iref deletion is for the inode who has only one link,
    975	 * so there is only one iref. The case that several irefs are
    976	 * in the same item doesn't exist.
    977	 */
    978	btrfs_del_item(trans, root, path);
    979out:
    980	btrfs_release_delayed_iref(node);
    981	btrfs_release_path(path);
    982err_out:
    983	btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
    984	btrfs_release_delayed_inode(node);
    985
    986	/*
    987	 * If we fail to update the delayed inode we need to abort the
    988	 * transaction, because we could leave the inode with the improper
    989	 * counts behind.
    990	 */
    991	if (ret && ret != -ENOENT)
    992		btrfs_abort_transaction(trans, ret);
    993
    994	return ret;
    995
    996search:
    997	btrfs_release_path(path);
    998
    999	key.type = BTRFS_INODE_EXTREF_KEY;
   1000	key.offset = -1;
   1001
   1002	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
   1003	if (ret < 0)
   1004		goto err_out;
   1005	ASSERT(ret);
   1006
   1007	ret = 0;
   1008	leaf = path->nodes[0];
   1009	path->slots[0]--;
   1010	goto again;
   1011}
   1012
   1013static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
   1014					     struct btrfs_root *root,
   1015					     struct btrfs_path *path,
   1016					     struct btrfs_delayed_node *node)
   1017{
   1018	int ret;
   1019
   1020	mutex_lock(&node->mutex);
   1021	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
   1022		mutex_unlock(&node->mutex);
   1023		return 0;
   1024	}
   1025
   1026	ret = __btrfs_update_delayed_inode(trans, root, path, node);
   1027	mutex_unlock(&node->mutex);
   1028	return ret;
   1029}
   1030
   1031static inline int
   1032__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
   1033				   struct btrfs_path *path,
   1034				   struct btrfs_delayed_node *node)
   1035{
   1036	int ret;
   1037
   1038	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
   1039	if (ret)
   1040		return ret;
   1041
   1042	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
   1043	if (ret)
   1044		return ret;
   1045
   1046	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
   1047	return ret;
   1048}
   1049
   1050/*
   1051 * Called when committing the transaction.
   1052 * Returns 0 on success.
   1053 * Returns < 0 on error and returns with an aborted transaction with any
   1054 * outstanding delayed items cleaned up.
   1055 */
   1056static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
   1057{
   1058	struct btrfs_fs_info *fs_info = trans->fs_info;
   1059	struct btrfs_delayed_root *delayed_root;
   1060	struct btrfs_delayed_node *curr_node, *prev_node;
   1061	struct btrfs_path *path;
   1062	struct btrfs_block_rsv *block_rsv;
   1063	int ret = 0;
   1064	bool count = (nr > 0);
   1065
   1066	if (TRANS_ABORTED(trans))
   1067		return -EIO;
   1068
   1069	path = btrfs_alloc_path();
   1070	if (!path)
   1071		return -ENOMEM;
   1072
   1073	block_rsv = trans->block_rsv;
   1074	trans->block_rsv = &fs_info->delayed_block_rsv;
   1075
   1076	delayed_root = fs_info->delayed_root;
   1077
   1078	curr_node = btrfs_first_delayed_node(delayed_root);
   1079	while (curr_node && (!count || nr--)) {
   1080		ret = __btrfs_commit_inode_delayed_items(trans, path,
   1081							 curr_node);
   1082		if (ret) {
   1083			btrfs_release_delayed_node(curr_node);
   1084			curr_node = NULL;
   1085			btrfs_abort_transaction(trans, ret);
   1086			break;
   1087		}
   1088
   1089		prev_node = curr_node;
   1090		curr_node = btrfs_next_delayed_node(curr_node);
   1091		btrfs_release_delayed_node(prev_node);
   1092	}
   1093
   1094	if (curr_node)
   1095		btrfs_release_delayed_node(curr_node);
   1096	btrfs_free_path(path);
   1097	trans->block_rsv = block_rsv;
   1098
   1099	return ret;
   1100}
   1101
   1102int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
   1103{
   1104	return __btrfs_run_delayed_items(trans, -1);
   1105}
   1106
   1107int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
   1108{
   1109	return __btrfs_run_delayed_items(trans, nr);
   1110}
   1111
   1112int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
   1113				     struct btrfs_inode *inode)
   1114{
   1115	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
   1116	struct btrfs_path *path;
   1117	struct btrfs_block_rsv *block_rsv;
   1118	int ret;
   1119
   1120	if (!delayed_node)
   1121		return 0;
   1122
   1123	mutex_lock(&delayed_node->mutex);
   1124	if (!delayed_node->count) {
   1125		mutex_unlock(&delayed_node->mutex);
   1126		btrfs_release_delayed_node(delayed_node);
   1127		return 0;
   1128	}
   1129	mutex_unlock(&delayed_node->mutex);
   1130
   1131	path = btrfs_alloc_path();
   1132	if (!path) {
   1133		btrfs_release_delayed_node(delayed_node);
   1134		return -ENOMEM;
   1135	}
   1136
   1137	block_rsv = trans->block_rsv;
   1138	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
   1139
   1140	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
   1141
   1142	btrfs_release_delayed_node(delayed_node);
   1143	btrfs_free_path(path);
   1144	trans->block_rsv = block_rsv;
   1145
   1146	return ret;
   1147}
   1148
   1149int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
   1150{
   1151	struct btrfs_fs_info *fs_info = inode->root->fs_info;
   1152	struct btrfs_trans_handle *trans;
   1153	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
   1154	struct btrfs_path *path;
   1155	struct btrfs_block_rsv *block_rsv;
   1156	int ret;
   1157
   1158	if (!delayed_node)
   1159		return 0;
   1160
   1161	mutex_lock(&delayed_node->mutex);
   1162	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
   1163		mutex_unlock(&delayed_node->mutex);
   1164		btrfs_release_delayed_node(delayed_node);
   1165		return 0;
   1166	}
   1167	mutex_unlock(&delayed_node->mutex);
   1168
   1169	trans = btrfs_join_transaction(delayed_node->root);
   1170	if (IS_ERR(trans)) {
   1171		ret = PTR_ERR(trans);
   1172		goto out;
   1173	}
   1174
   1175	path = btrfs_alloc_path();
   1176	if (!path) {
   1177		ret = -ENOMEM;
   1178		goto trans_out;
   1179	}
   1180
   1181	block_rsv = trans->block_rsv;
   1182	trans->block_rsv = &fs_info->delayed_block_rsv;
   1183
   1184	mutex_lock(&delayed_node->mutex);
   1185	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
   1186		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
   1187						   path, delayed_node);
   1188	else
   1189		ret = 0;
   1190	mutex_unlock(&delayed_node->mutex);
   1191
   1192	btrfs_free_path(path);
   1193	trans->block_rsv = block_rsv;
   1194trans_out:
   1195	btrfs_end_transaction(trans);
   1196	btrfs_btree_balance_dirty(fs_info);
   1197out:
   1198	btrfs_release_delayed_node(delayed_node);
   1199
   1200	return ret;
   1201}
   1202
   1203void btrfs_remove_delayed_node(struct btrfs_inode *inode)
   1204{
   1205	struct btrfs_delayed_node *delayed_node;
   1206
   1207	delayed_node = READ_ONCE(inode->delayed_node);
   1208	if (!delayed_node)
   1209		return;
   1210
   1211	inode->delayed_node = NULL;
   1212	btrfs_release_delayed_node(delayed_node);
   1213}
   1214
   1215struct btrfs_async_delayed_work {
   1216	struct btrfs_delayed_root *delayed_root;
   1217	int nr;
   1218	struct btrfs_work work;
   1219};
   1220
   1221static void btrfs_async_run_delayed_root(struct btrfs_work *work)
   1222{
   1223	struct btrfs_async_delayed_work *async_work;
   1224	struct btrfs_delayed_root *delayed_root;
   1225	struct btrfs_trans_handle *trans;
   1226	struct btrfs_path *path;
   1227	struct btrfs_delayed_node *delayed_node = NULL;
   1228	struct btrfs_root *root;
   1229	struct btrfs_block_rsv *block_rsv;
   1230	int total_done = 0;
   1231
   1232	async_work = container_of(work, struct btrfs_async_delayed_work, work);
   1233	delayed_root = async_work->delayed_root;
   1234
   1235	path = btrfs_alloc_path();
   1236	if (!path)
   1237		goto out;
   1238
   1239	do {
   1240		if (atomic_read(&delayed_root->items) <
   1241		    BTRFS_DELAYED_BACKGROUND / 2)
   1242			break;
   1243
   1244		delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
   1245		if (!delayed_node)
   1246			break;
   1247
   1248		root = delayed_node->root;
   1249
   1250		trans = btrfs_join_transaction(root);
   1251		if (IS_ERR(trans)) {
   1252			btrfs_release_path(path);
   1253			btrfs_release_prepared_delayed_node(delayed_node);
   1254			total_done++;
   1255			continue;
   1256		}
   1257
   1258		block_rsv = trans->block_rsv;
   1259		trans->block_rsv = &root->fs_info->delayed_block_rsv;
   1260
   1261		__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
   1262
   1263		trans->block_rsv = block_rsv;
   1264		btrfs_end_transaction(trans);
   1265		btrfs_btree_balance_dirty_nodelay(root->fs_info);
   1266
   1267		btrfs_release_path(path);
   1268		btrfs_release_prepared_delayed_node(delayed_node);
   1269		total_done++;
   1270
   1271	} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
   1272		 || total_done < async_work->nr);
   1273
   1274	btrfs_free_path(path);
   1275out:
   1276	wake_up(&delayed_root->wait);
   1277	kfree(async_work);
   1278}
   1279
   1280
   1281static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
   1282				     struct btrfs_fs_info *fs_info, int nr)
   1283{
   1284	struct btrfs_async_delayed_work *async_work;
   1285
   1286	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
   1287	if (!async_work)
   1288		return -ENOMEM;
   1289
   1290	async_work->delayed_root = delayed_root;
   1291	btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
   1292			NULL);
   1293	async_work->nr = nr;
   1294
   1295	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
   1296	return 0;
   1297}
   1298
   1299void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
   1300{
   1301	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
   1302}
   1303
   1304static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
   1305{
   1306	int val = atomic_read(&delayed_root->items_seq);
   1307
   1308	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
   1309		return 1;
   1310
   1311	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
   1312		return 1;
   1313
   1314	return 0;
   1315}
   1316
   1317void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
   1318{
   1319	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
   1320
   1321	if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
   1322		btrfs_workqueue_normal_congested(fs_info->delayed_workers))
   1323		return;
   1324
   1325	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
   1326		int seq;
   1327		int ret;
   1328
   1329		seq = atomic_read(&delayed_root->items_seq);
   1330
   1331		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
   1332		if (ret)
   1333			return;
   1334
   1335		wait_event_interruptible(delayed_root->wait,
   1336					 could_end_wait(delayed_root, seq));
   1337		return;
   1338	}
   1339
   1340	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
   1341}
   1342
   1343/* Will return 0 or -ENOMEM */
   1344int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
   1345				   const char *name, int name_len,
   1346				   struct btrfs_inode *dir,
   1347				   struct btrfs_disk_key *disk_key, u8 type,
   1348				   u64 index)
   1349{
   1350	struct btrfs_delayed_node *delayed_node;
   1351	struct btrfs_delayed_item *delayed_item;
   1352	struct btrfs_dir_item *dir_item;
   1353	int ret;
   1354
   1355	delayed_node = btrfs_get_or_create_delayed_node(dir);
   1356	if (IS_ERR(delayed_node))
   1357		return PTR_ERR(delayed_node);
   1358
   1359	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
   1360	if (!delayed_item) {
   1361		ret = -ENOMEM;
   1362		goto release_node;
   1363	}
   1364
   1365	delayed_item->key.objectid = btrfs_ino(dir);
   1366	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
   1367	delayed_item->key.offset = index;
   1368
   1369	dir_item = (struct btrfs_dir_item *)delayed_item->data;
   1370	dir_item->location = *disk_key;
   1371	btrfs_set_stack_dir_transid(dir_item, trans->transid);
   1372	btrfs_set_stack_dir_data_len(dir_item, 0);
   1373	btrfs_set_stack_dir_name_len(dir_item, name_len);
   1374	btrfs_set_stack_dir_type(dir_item, type);
   1375	memcpy((char *)(dir_item + 1), name, name_len);
   1376
   1377	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
   1378	/*
   1379	 * we have reserved enough space when we start a new transaction,
   1380	 * so reserving metadata failure is impossible
   1381	 */
   1382	BUG_ON(ret);
   1383
   1384	mutex_lock(&delayed_node->mutex);
   1385	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
   1386	if (unlikely(ret)) {
   1387		btrfs_err(trans->fs_info,
   1388			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
   1389			  name_len, name, delayed_node->root->root_key.objectid,
   1390			  delayed_node->inode_id, ret);
   1391		BUG();
   1392	}
   1393	mutex_unlock(&delayed_node->mutex);
   1394
   1395release_node:
   1396	btrfs_release_delayed_node(delayed_node);
   1397	return ret;
   1398}
   1399
   1400static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
   1401					       struct btrfs_delayed_node *node,
   1402					       struct btrfs_key *key)
   1403{
   1404	struct btrfs_delayed_item *item;
   1405
   1406	mutex_lock(&node->mutex);
   1407	item = __btrfs_lookup_delayed_insertion_item(node, key);
   1408	if (!item) {
   1409		mutex_unlock(&node->mutex);
   1410		return 1;
   1411	}
   1412
   1413	btrfs_delayed_item_release_metadata(node->root, item);
   1414	btrfs_release_delayed_item(item);
   1415	mutex_unlock(&node->mutex);
   1416	return 0;
   1417}
   1418
   1419int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
   1420				   struct btrfs_inode *dir, u64 index)
   1421{
   1422	struct btrfs_delayed_node *node;
   1423	struct btrfs_delayed_item *item;
   1424	struct btrfs_key item_key;
   1425	int ret;
   1426
   1427	node = btrfs_get_or_create_delayed_node(dir);
   1428	if (IS_ERR(node))
   1429		return PTR_ERR(node);
   1430
   1431	item_key.objectid = btrfs_ino(dir);
   1432	item_key.type = BTRFS_DIR_INDEX_KEY;
   1433	item_key.offset = index;
   1434
   1435	ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
   1436						  &item_key);
   1437	if (!ret)
   1438		goto end;
   1439
   1440	item = btrfs_alloc_delayed_item(0);
   1441	if (!item) {
   1442		ret = -ENOMEM;
   1443		goto end;
   1444	}
   1445
   1446	item->key = item_key;
   1447
   1448	ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
   1449	/*
   1450	 * we have reserved enough space when we start a new transaction,
   1451	 * so reserving metadata failure is impossible.
   1452	 */
   1453	if (ret < 0) {
   1454		btrfs_err(trans->fs_info,
   1455"metadata reservation failed for delayed dir item deltiona, should have been reserved");
   1456		btrfs_release_delayed_item(item);
   1457		goto end;
   1458	}
   1459
   1460	mutex_lock(&node->mutex);
   1461	ret = __btrfs_add_delayed_deletion_item(node, item);
   1462	if (unlikely(ret)) {
   1463		btrfs_err(trans->fs_info,
   1464			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
   1465			  index, node->root->root_key.objectid,
   1466			  node->inode_id, ret);
   1467		btrfs_delayed_item_release_metadata(dir->root, item);
   1468		btrfs_release_delayed_item(item);
   1469	}
   1470	mutex_unlock(&node->mutex);
   1471end:
   1472	btrfs_release_delayed_node(node);
   1473	return ret;
   1474}
   1475
   1476int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
   1477{
   1478	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
   1479
   1480	if (!delayed_node)
   1481		return -ENOENT;
   1482
   1483	/*
   1484	 * Since we have held i_mutex of this directory, it is impossible that
   1485	 * a new directory index is added into the delayed node and index_cnt
   1486	 * is updated now. So we needn't lock the delayed node.
   1487	 */
   1488	if (!delayed_node->index_cnt) {
   1489		btrfs_release_delayed_node(delayed_node);
   1490		return -EINVAL;
   1491	}
   1492
   1493	inode->index_cnt = delayed_node->index_cnt;
   1494	btrfs_release_delayed_node(delayed_node);
   1495	return 0;
   1496}
   1497
   1498bool btrfs_readdir_get_delayed_items(struct inode *inode,
   1499				     struct list_head *ins_list,
   1500				     struct list_head *del_list)
   1501{
   1502	struct btrfs_delayed_node *delayed_node;
   1503	struct btrfs_delayed_item *item;
   1504
   1505	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
   1506	if (!delayed_node)
   1507		return false;
   1508
   1509	/*
   1510	 * We can only do one readdir with delayed items at a time because of
   1511	 * item->readdir_list.
   1512	 */
   1513	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
   1514	btrfs_inode_lock(inode, 0);
   1515
   1516	mutex_lock(&delayed_node->mutex);
   1517	item = __btrfs_first_delayed_insertion_item(delayed_node);
   1518	while (item) {
   1519		refcount_inc(&item->refs);
   1520		list_add_tail(&item->readdir_list, ins_list);
   1521		item = __btrfs_next_delayed_item(item);
   1522	}
   1523
   1524	item = __btrfs_first_delayed_deletion_item(delayed_node);
   1525	while (item) {
   1526		refcount_inc(&item->refs);
   1527		list_add_tail(&item->readdir_list, del_list);
   1528		item = __btrfs_next_delayed_item(item);
   1529	}
   1530	mutex_unlock(&delayed_node->mutex);
   1531	/*
   1532	 * This delayed node is still cached in the btrfs inode, so refs
   1533	 * must be > 1 now, and we needn't check it is going to be freed
   1534	 * or not.
   1535	 *
   1536	 * Besides that, this function is used to read dir, we do not
   1537	 * insert/delete delayed items in this period. So we also needn't
   1538	 * requeue or dequeue this delayed node.
   1539	 */
   1540	refcount_dec(&delayed_node->refs);
   1541
   1542	return true;
   1543}
   1544
   1545void btrfs_readdir_put_delayed_items(struct inode *inode,
   1546				     struct list_head *ins_list,
   1547				     struct list_head *del_list)
   1548{
   1549	struct btrfs_delayed_item *curr, *next;
   1550
   1551	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
   1552		list_del(&curr->readdir_list);
   1553		if (refcount_dec_and_test(&curr->refs))
   1554			kfree(curr);
   1555	}
   1556
   1557	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
   1558		list_del(&curr->readdir_list);
   1559		if (refcount_dec_and_test(&curr->refs))
   1560			kfree(curr);
   1561	}
   1562
   1563	/*
   1564	 * The VFS is going to do up_read(), so we need to downgrade back to a
   1565	 * read lock.
   1566	 */
   1567	downgrade_write(&inode->i_rwsem);
   1568}
   1569
   1570int btrfs_should_delete_dir_index(struct list_head *del_list,
   1571				  u64 index)
   1572{
   1573	struct btrfs_delayed_item *curr;
   1574	int ret = 0;
   1575
   1576	list_for_each_entry(curr, del_list, readdir_list) {
   1577		if (curr->key.offset > index)
   1578			break;
   1579		if (curr->key.offset == index) {
   1580			ret = 1;
   1581			break;
   1582		}
   1583	}
   1584	return ret;
   1585}
   1586
   1587/*
   1588 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
   1589 *
   1590 */
   1591int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
   1592				    struct list_head *ins_list)
   1593{
   1594	struct btrfs_dir_item *di;
   1595	struct btrfs_delayed_item *curr, *next;
   1596	struct btrfs_key location;
   1597	char *name;
   1598	int name_len;
   1599	int over = 0;
   1600	unsigned char d_type;
   1601
   1602	if (list_empty(ins_list))
   1603		return 0;
   1604
   1605	/*
   1606	 * Changing the data of the delayed item is impossible. So
   1607	 * we needn't lock them. And we have held i_mutex of the
   1608	 * directory, nobody can delete any directory indexes now.
   1609	 */
   1610	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
   1611		list_del(&curr->readdir_list);
   1612
   1613		if (curr->key.offset < ctx->pos) {
   1614			if (refcount_dec_and_test(&curr->refs))
   1615				kfree(curr);
   1616			continue;
   1617		}
   1618
   1619		ctx->pos = curr->key.offset;
   1620
   1621		di = (struct btrfs_dir_item *)curr->data;
   1622		name = (char *)(di + 1);
   1623		name_len = btrfs_stack_dir_name_len(di);
   1624
   1625		d_type = fs_ftype_to_dtype(di->type);
   1626		btrfs_disk_key_to_cpu(&location, &di->location);
   1627
   1628		over = !dir_emit(ctx, name, name_len,
   1629			       location.objectid, d_type);
   1630
   1631		if (refcount_dec_and_test(&curr->refs))
   1632			kfree(curr);
   1633
   1634		if (over)
   1635			return 1;
   1636		ctx->pos++;
   1637	}
   1638	return 0;
   1639}
   1640
   1641static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
   1642				  struct btrfs_inode_item *inode_item,
   1643				  struct inode *inode)
   1644{
   1645	u64 flags;
   1646
   1647	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
   1648	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
   1649	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
   1650	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
   1651	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
   1652	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
   1653	btrfs_set_stack_inode_generation(inode_item,
   1654					 BTRFS_I(inode)->generation);
   1655	btrfs_set_stack_inode_sequence(inode_item,
   1656				       inode_peek_iversion(inode));
   1657	btrfs_set_stack_inode_transid(inode_item, trans->transid);
   1658	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
   1659	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
   1660					  BTRFS_I(inode)->ro_flags);
   1661	btrfs_set_stack_inode_flags(inode_item, flags);
   1662	btrfs_set_stack_inode_block_group(inode_item, 0);
   1663
   1664	btrfs_set_stack_timespec_sec(&inode_item->atime,
   1665				     inode->i_atime.tv_sec);
   1666	btrfs_set_stack_timespec_nsec(&inode_item->atime,
   1667				      inode->i_atime.tv_nsec);
   1668
   1669	btrfs_set_stack_timespec_sec(&inode_item->mtime,
   1670				     inode->i_mtime.tv_sec);
   1671	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
   1672				      inode->i_mtime.tv_nsec);
   1673
   1674	btrfs_set_stack_timespec_sec(&inode_item->ctime,
   1675				     inode->i_ctime.tv_sec);
   1676	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
   1677				      inode->i_ctime.tv_nsec);
   1678
   1679	btrfs_set_stack_timespec_sec(&inode_item->otime,
   1680				     BTRFS_I(inode)->i_otime.tv_sec);
   1681	btrfs_set_stack_timespec_nsec(&inode_item->otime,
   1682				     BTRFS_I(inode)->i_otime.tv_nsec);
   1683}
   1684
   1685int btrfs_fill_inode(struct inode *inode, u32 *rdev)
   1686{
   1687	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
   1688	struct btrfs_delayed_node *delayed_node;
   1689	struct btrfs_inode_item *inode_item;
   1690
   1691	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
   1692	if (!delayed_node)
   1693		return -ENOENT;
   1694
   1695	mutex_lock(&delayed_node->mutex);
   1696	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
   1697		mutex_unlock(&delayed_node->mutex);
   1698		btrfs_release_delayed_node(delayed_node);
   1699		return -ENOENT;
   1700	}
   1701
   1702	inode_item = &delayed_node->inode_item;
   1703
   1704	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
   1705	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
   1706	btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
   1707	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
   1708			round_up(i_size_read(inode), fs_info->sectorsize));
   1709	inode->i_mode = btrfs_stack_inode_mode(inode_item);
   1710	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
   1711	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
   1712	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
   1713        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
   1714
   1715	inode_set_iversion_queried(inode,
   1716				   btrfs_stack_inode_sequence(inode_item));
   1717	inode->i_rdev = 0;
   1718	*rdev = btrfs_stack_inode_rdev(inode_item);
   1719	btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
   1720				&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
   1721
   1722	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
   1723	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
   1724
   1725	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
   1726	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
   1727
   1728	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
   1729	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
   1730
   1731	BTRFS_I(inode)->i_otime.tv_sec =
   1732		btrfs_stack_timespec_sec(&inode_item->otime);
   1733	BTRFS_I(inode)->i_otime.tv_nsec =
   1734		btrfs_stack_timespec_nsec(&inode_item->otime);
   1735
   1736	inode->i_generation = BTRFS_I(inode)->generation;
   1737	BTRFS_I(inode)->index_cnt = (u64)-1;
   1738
   1739	mutex_unlock(&delayed_node->mutex);
   1740	btrfs_release_delayed_node(delayed_node);
   1741	return 0;
   1742}
   1743
   1744int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
   1745			       struct btrfs_root *root,
   1746			       struct btrfs_inode *inode)
   1747{
   1748	struct btrfs_delayed_node *delayed_node;
   1749	int ret = 0;
   1750
   1751	delayed_node = btrfs_get_or_create_delayed_node(inode);
   1752	if (IS_ERR(delayed_node))
   1753		return PTR_ERR(delayed_node);
   1754
   1755	mutex_lock(&delayed_node->mutex);
   1756	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
   1757		fill_stack_inode_item(trans, &delayed_node->inode_item,
   1758				      &inode->vfs_inode);
   1759		goto release_node;
   1760	}
   1761
   1762	ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
   1763	if (ret)
   1764		goto release_node;
   1765
   1766	fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
   1767	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
   1768	delayed_node->count++;
   1769	atomic_inc(&root->fs_info->delayed_root->items);
   1770release_node:
   1771	mutex_unlock(&delayed_node->mutex);
   1772	btrfs_release_delayed_node(delayed_node);
   1773	return ret;
   1774}
   1775
   1776int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
   1777{
   1778	struct btrfs_fs_info *fs_info = inode->root->fs_info;
   1779	struct btrfs_delayed_node *delayed_node;
   1780
   1781	/*
   1782	 * we don't do delayed inode updates during log recovery because it
   1783	 * leads to enospc problems.  This means we also can't do
   1784	 * delayed inode refs
   1785	 */
   1786	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
   1787		return -EAGAIN;
   1788
   1789	delayed_node = btrfs_get_or_create_delayed_node(inode);
   1790	if (IS_ERR(delayed_node))
   1791		return PTR_ERR(delayed_node);
   1792
   1793	/*
   1794	 * We don't reserve space for inode ref deletion is because:
   1795	 * - We ONLY do async inode ref deletion for the inode who has only
   1796	 *   one link(i_nlink == 1), it means there is only one inode ref.
   1797	 *   And in most case, the inode ref and the inode item are in the
   1798	 *   same leaf, and we will deal with them at the same time.
   1799	 *   Since we are sure we will reserve the space for the inode item,
   1800	 *   it is unnecessary to reserve space for inode ref deletion.
   1801	 * - If the inode ref and the inode item are not in the same leaf,
   1802	 *   We also needn't worry about enospc problem, because we reserve
   1803	 *   much more space for the inode update than it needs.
   1804	 * - At the worst, we can steal some space from the global reservation.
   1805	 *   It is very rare.
   1806	 */
   1807	mutex_lock(&delayed_node->mutex);
   1808	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
   1809		goto release_node;
   1810
   1811	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
   1812	delayed_node->count++;
   1813	atomic_inc(&fs_info->delayed_root->items);
   1814release_node:
   1815	mutex_unlock(&delayed_node->mutex);
   1816	btrfs_release_delayed_node(delayed_node);
   1817	return 0;
   1818}
   1819
   1820static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
   1821{
   1822	struct btrfs_root *root = delayed_node->root;
   1823	struct btrfs_fs_info *fs_info = root->fs_info;
   1824	struct btrfs_delayed_item *curr_item, *prev_item;
   1825
   1826	mutex_lock(&delayed_node->mutex);
   1827	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
   1828	while (curr_item) {
   1829		btrfs_delayed_item_release_metadata(root, curr_item);
   1830		prev_item = curr_item;
   1831		curr_item = __btrfs_next_delayed_item(prev_item);
   1832		btrfs_release_delayed_item(prev_item);
   1833	}
   1834
   1835	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
   1836	while (curr_item) {
   1837		btrfs_delayed_item_release_metadata(root, curr_item);
   1838		prev_item = curr_item;
   1839		curr_item = __btrfs_next_delayed_item(prev_item);
   1840		btrfs_release_delayed_item(prev_item);
   1841	}
   1842
   1843	btrfs_release_delayed_iref(delayed_node);
   1844
   1845	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
   1846		btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
   1847		btrfs_release_delayed_inode(delayed_node);
   1848	}
   1849	mutex_unlock(&delayed_node->mutex);
   1850}
   1851
   1852void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
   1853{
   1854	struct btrfs_delayed_node *delayed_node;
   1855
   1856	delayed_node = btrfs_get_delayed_node(inode);
   1857	if (!delayed_node)
   1858		return;
   1859
   1860	__btrfs_kill_delayed_node(delayed_node);
   1861	btrfs_release_delayed_node(delayed_node);
   1862}
   1863
   1864void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
   1865{
   1866	unsigned long index = 0;
   1867	struct btrfs_delayed_node *delayed_node;
   1868	struct btrfs_delayed_node *delayed_nodes[8];
   1869
   1870	while (1) {
   1871		int n = 0;
   1872
   1873		spin_lock(&root->inode_lock);
   1874		if (xa_empty(&root->delayed_nodes)) {
   1875			spin_unlock(&root->inode_lock);
   1876			return;
   1877		}
   1878
   1879		xa_for_each_start(&root->delayed_nodes, index, delayed_node, index) {
   1880			/*
   1881			 * Don't increase refs in case the node is dead and
   1882			 * about to be removed from the tree in the loop below
   1883			 */
   1884			if (refcount_inc_not_zero(&delayed_node->refs)) {
   1885				delayed_nodes[n] = delayed_node;
   1886				n++;
   1887			}
   1888			if (n >= ARRAY_SIZE(delayed_nodes))
   1889				break;
   1890		}
   1891		index++;
   1892		spin_unlock(&root->inode_lock);
   1893
   1894		for (int i = 0; i < n; i++) {
   1895			__btrfs_kill_delayed_node(delayed_nodes[i]);
   1896			btrfs_release_delayed_node(delayed_nodes[i]);
   1897		}
   1898	}
   1899}
   1900
   1901void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
   1902{
   1903	struct btrfs_delayed_node *curr_node, *prev_node;
   1904
   1905	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
   1906	while (curr_node) {
   1907		__btrfs_kill_delayed_node(curr_node);
   1908
   1909		prev_node = curr_node;
   1910		curr_node = btrfs_next_delayed_node(curr_node);
   1911		btrfs_release_delayed_node(prev_node);
   1912	}
   1913}
   1914