cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

extent_cache.c (22193B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * f2fs extent cache support
      4 *
      5 * Copyright (c) 2015 Motorola Mobility
      6 * Copyright (c) 2015 Samsung Electronics
      7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
      8 *          Chao Yu <chao2.yu@samsung.com>
      9 */
     10
     11#include <linux/fs.h>
     12#include <linux/f2fs_fs.h>
     13
     14#include "f2fs.h"
     15#include "node.h"
     16#include <trace/events/f2fs.h>
     17
     18static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
     19							unsigned int ofs)
     20{
     21	if (cached_re) {
     22		if (cached_re->ofs <= ofs &&
     23				cached_re->ofs + cached_re->len > ofs) {
     24			return cached_re;
     25		}
     26	}
     27	return NULL;
     28}
     29
     30static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
     31							unsigned int ofs)
     32{
     33	struct rb_node *node = root->rb_root.rb_node;
     34	struct rb_entry *re;
     35
     36	while (node) {
     37		re = rb_entry(node, struct rb_entry, rb_node);
     38
     39		if (ofs < re->ofs)
     40			node = node->rb_left;
     41		else if (ofs >= re->ofs + re->len)
     42			node = node->rb_right;
     43		else
     44			return re;
     45	}
     46	return NULL;
     47}
     48
     49struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
     50				struct rb_entry *cached_re, unsigned int ofs)
     51{
     52	struct rb_entry *re;
     53
     54	re = __lookup_rb_tree_fast(cached_re, ofs);
     55	if (!re)
     56		return __lookup_rb_tree_slow(root, ofs);
     57
     58	return re;
     59}
     60
     61struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
     62					struct rb_root_cached *root,
     63					struct rb_node **parent,
     64					unsigned long long key, bool *leftmost)
     65{
     66	struct rb_node **p = &root->rb_root.rb_node;
     67	struct rb_entry *re;
     68
     69	while (*p) {
     70		*parent = *p;
     71		re = rb_entry(*parent, struct rb_entry, rb_node);
     72
     73		if (key < re->key) {
     74			p = &(*p)->rb_left;
     75		} else {
     76			p = &(*p)->rb_right;
     77			*leftmost = false;
     78		}
     79	}
     80
     81	return p;
     82}
     83
     84struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
     85				struct rb_root_cached *root,
     86				struct rb_node **parent,
     87				unsigned int ofs, bool *leftmost)
     88{
     89	struct rb_node **p = &root->rb_root.rb_node;
     90	struct rb_entry *re;
     91
     92	while (*p) {
     93		*parent = *p;
     94		re = rb_entry(*parent, struct rb_entry, rb_node);
     95
     96		if (ofs < re->ofs) {
     97			p = &(*p)->rb_left;
     98		} else if (ofs >= re->ofs + re->len) {
     99			p = &(*p)->rb_right;
    100			*leftmost = false;
    101		} else {
    102			f2fs_bug_on(sbi, 1);
    103		}
    104	}
    105
    106	return p;
    107}
    108
    109/*
    110 * lookup rb entry in position of @ofs in rb-tree,
    111 * if hit, return the entry, otherwise, return NULL
    112 * @prev_ex: extent before ofs
    113 * @next_ex: extent after ofs
    114 * @insert_p: insert point for new extent at ofs
    115 * in order to simpfy the insertion after.
    116 * tree must stay unchanged between lookup and insertion.
    117 */
    118struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
    119				struct rb_entry *cached_re,
    120				unsigned int ofs,
    121				struct rb_entry **prev_entry,
    122				struct rb_entry **next_entry,
    123				struct rb_node ***insert_p,
    124				struct rb_node **insert_parent,
    125				bool force, bool *leftmost)
    126{
    127	struct rb_node **pnode = &root->rb_root.rb_node;
    128	struct rb_node *parent = NULL, *tmp_node;
    129	struct rb_entry *re = cached_re;
    130
    131	*insert_p = NULL;
    132	*insert_parent = NULL;
    133	*prev_entry = NULL;
    134	*next_entry = NULL;
    135
    136	if (RB_EMPTY_ROOT(&root->rb_root))
    137		return NULL;
    138
    139	if (re) {
    140		if (re->ofs <= ofs && re->ofs + re->len > ofs)
    141			goto lookup_neighbors;
    142	}
    143
    144	if (leftmost)
    145		*leftmost = true;
    146
    147	while (*pnode) {
    148		parent = *pnode;
    149		re = rb_entry(*pnode, struct rb_entry, rb_node);
    150
    151		if (ofs < re->ofs) {
    152			pnode = &(*pnode)->rb_left;
    153		} else if (ofs >= re->ofs + re->len) {
    154			pnode = &(*pnode)->rb_right;
    155			if (leftmost)
    156				*leftmost = false;
    157		} else {
    158			goto lookup_neighbors;
    159		}
    160	}
    161
    162	*insert_p = pnode;
    163	*insert_parent = parent;
    164
    165	re = rb_entry(parent, struct rb_entry, rb_node);
    166	tmp_node = parent;
    167	if (parent && ofs > re->ofs)
    168		tmp_node = rb_next(parent);
    169	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
    170
    171	tmp_node = parent;
    172	if (parent && ofs < re->ofs)
    173		tmp_node = rb_prev(parent);
    174	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
    175	return NULL;
    176
    177lookup_neighbors:
    178	if (ofs == re->ofs || force) {
    179		/* lookup prev node for merging backward later */
    180		tmp_node = rb_prev(&re->rb_node);
    181		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
    182	}
    183	if (ofs == re->ofs + re->len - 1 || force) {
    184		/* lookup next node for merging frontward later */
    185		tmp_node = rb_next(&re->rb_node);
    186		*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
    187	}
    188	return re;
    189}
    190
    191bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
    192				struct rb_root_cached *root, bool check_key)
    193{
    194#ifdef CONFIG_F2FS_CHECK_FS
    195	struct rb_node *cur = rb_first_cached(root), *next;
    196	struct rb_entry *cur_re, *next_re;
    197
    198	if (!cur)
    199		return true;
    200
    201	while (cur) {
    202		next = rb_next(cur);
    203		if (!next)
    204			return true;
    205
    206		cur_re = rb_entry(cur, struct rb_entry, rb_node);
    207		next_re = rb_entry(next, struct rb_entry, rb_node);
    208
    209		if (check_key) {
    210			if (cur_re->key > next_re->key) {
    211				f2fs_info(sbi, "inconsistent rbtree, "
    212					"cur(%llu) next(%llu)",
    213					cur_re->key, next_re->key);
    214				return false;
    215			}
    216			goto next;
    217		}
    218
    219		if (cur_re->ofs + cur_re->len > next_re->ofs) {
    220			f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
    221				  cur_re->ofs, cur_re->len,
    222				  next_re->ofs, next_re->len);
    223			return false;
    224		}
    225next:
    226		cur = next;
    227	}
    228#endif
    229	return true;
    230}
    231
    232static struct kmem_cache *extent_tree_slab;
    233static struct kmem_cache *extent_node_slab;
    234
    235static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
    236				struct extent_tree *et, struct extent_info *ei,
    237				struct rb_node *parent, struct rb_node **p,
    238				bool leftmost)
    239{
    240	struct extent_node *en;
    241
    242	en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
    243	if (!en)
    244		return NULL;
    245
    246	en->ei = *ei;
    247	INIT_LIST_HEAD(&en->list);
    248	en->et = et;
    249
    250	rb_link_node(&en->rb_node, parent, p);
    251	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
    252	atomic_inc(&et->node_cnt);
    253	atomic_inc(&sbi->total_ext_node);
    254	return en;
    255}
    256
    257static void __detach_extent_node(struct f2fs_sb_info *sbi,
    258				struct extent_tree *et, struct extent_node *en)
    259{
    260	rb_erase_cached(&en->rb_node, &et->root);
    261	atomic_dec(&et->node_cnt);
    262	atomic_dec(&sbi->total_ext_node);
    263
    264	if (et->cached_en == en)
    265		et->cached_en = NULL;
    266	kmem_cache_free(extent_node_slab, en);
    267}
    268
    269/*
    270 * Flow to release an extent_node:
    271 * 1. list_del_init
    272 * 2. __detach_extent_node
    273 * 3. kmem_cache_free.
    274 */
    275static void __release_extent_node(struct f2fs_sb_info *sbi,
    276			struct extent_tree *et, struct extent_node *en)
    277{
    278	spin_lock(&sbi->extent_lock);
    279	f2fs_bug_on(sbi, list_empty(&en->list));
    280	list_del_init(&en->list);
    281	spin_unlock(&sbi->extent_lock);
    282
    283	__detach_extent_node(sbi, et, en);
    284}
    285
    286static struct extent_tree *__grab_extent_tree(struct inode *inode)
    287{
    288	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    289	struct extent_tree *et;
    290	nid_t ino = inode->i_ino;
    291
    292	mutex_lock(&sbi->extent_tree_lock);
    293	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
    294	if (!et) {
    295		et = f2fs_kmem_cache_alloc(extent_tree_slab,
    296					GFP_NOFS, true, NULL);
    297		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
    298		memset(et, 0, sizeof(struct extent_tree));
    299		et->ino = ino;
    300		et->root = RB_ROOT_CACHED;
    301		et->cached_en = NULL;
    302		rwlock_init(&et->lock);
    303		INIT_LIST_HEAD(&et->list);
    304		atomic_set(&et->node_cnt, 0);
    305		atomic_inc(&sbi->total_ext_tree);
    306	} else {
    307		atomic_dec(&sbi->total_zombie_tree);
    308		list_del_init(&et->list);
    309	}
    310	mutex_unlock(&sbi->extent_tree_lock);
    311
    312	/* never died until evict_inode */
    313	F2FS_I(inode)->extent_tree = et;
    314
    315	return et;
    316}
    317
    318static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
    319				struct extent_tree *et, struct extent_info *ei)
    320{
    321	struct rb_node **p = &et->root.rb_root.rb_node;
    322	struct extent_node *en;
    323
    324	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
    325	if (!en)
    326		return NULL;
    327
    328	et->largest = en->ei;
    329	et->cached_en = en;
    330	return en;
    331}
    332
    333static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
    334					struct extent_tree *et)
    335{
    336	struct rb_node *node, *next;
    337	struct extent_node *en;
    338	unsigned int count = atomic_read(&et->node_cnt);
    339
    340	node = rb_first_cached(&et->root);
    341	while (node) {
    342		next = rb_next(node);
    343		en = rb_entry(node, struct extent_node, rb_node);
    344		__release_extent_node(sbi, et, en);
    345		node = next;
    346	}
    347
    348	return count - atomic_read(&et->node_cnt);
    349}
    350
    351static void __drop_largest_extent(struct extent_tree *et,
    352					pgoff_t fofs, unsigned int len)
    353{
    354	if (fofs < et->largest.fofs + et->largest.len &&
    355			fofs + len > et->largest.fofs) {
    356		et->largest.len = 0;
    357		et->largest_updated = true;
    358	}
    359}
    360
    361/* return true, if inode page is changed */
    362static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
    363{
    364	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    365	struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
    366	struct extent_tree *et;
    367	struct extent_node *en;
    368	struct extent_info ei;
    369
    370	if (!f2fs_may_extent_tree(inode)) {
    371		/* drop largest extent */
    372		if (i_ext && i_ext->len) {
    373			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
    374			i_ext->len = 0;
    375			set_page_dirty(ipage);
    376			return;
    377		}
    378		return;
    379	}
    380
    381	et = __grab_extent_tree(inode);
    382
    383	if (!i_ext || !i_ext->len)
    384		return;
    385
    386	get_extent_info(&ei, i_ext);
    387
    388	write_lock(&et->lock);
    389	if (atomic_read(&et->node_cnt))
    390		goto out;
    391
    392	en = __init_extent_tree(sbi, et, &ei);
    393	if (en) {
    394		spin_lock(&sbi->extent_lock);
    395		list_add_tail(&en->list, &sbi->extent_list);
    396		spin_unlock(&sbi->extent_lock);
    397	}
    398out:
    399	write_unlock(&et->lock);
    400}
    401
    402void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
    403{
    404	__f2fs_init_extent_tree(inode, ipage);
    405
    406	if (!F2FS_I(inode)->extent_tree)
    407		set_inode_flag(inode, FI_NO_EXTENT);
    408}
    409
    410static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
    411							struct extent_info *ei)
    412{
    413	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    414	struct extent_tree *et = F2FS_I(inode)->extent_tree;
    415	struct extent_node *en;
    416	bool ret = false;
    417
    418	f2fs_bug_on(sbi, !et);
    419
    420	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
    421
    422	read_lock(&et->lock);
    423
    424	if (et->largest.fofs <= pgofs &&
    425			et->largest.fofs + et->largest.len > pgofs) {
    426		*ei = et->largest;
    427		ret = true;
    428		stat_inc_largest_node_hit(sbi);
    429		goto out;
    430	}
    431
    432	en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
    433				(struct rb_entry *)et->cached_en, pgofs);
    434	if (!en)
    435		goto out;
    436
    437	if (en == et->cached_en)
    438		stat_inc_cached_node_hit(sbi);
    439	else
    440		stat_inc_rbtree_node_hit(sbi);
    441
    442	*ei = en->ei;
    443	spin_lock(&sbi->extent_lock);
    444	if (!list_empty(&en->list)) {
    445		list_move_tail(&en->list, &sbi->extent_list);
    446		et->cached_en = en;
    447	}
    448	spin_unlock(&sbi->extent_lock);
    449	ret = true;
    450out:
    451	stat_inc_total_hit(sbi);
    452	read_unlock(&et->lock);
    453
    454	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
    455	return ret;
    456}
    457
    458static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
    459				struct extent_tree *et, struct extent_info *ei,
    460				struct extent_node *prev_ex,
    461				struct extent_node *next_ex)
    462{
    463	struct extent_node *en = NULL;
    464
    465	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
    466		prev_ex->ei.len += ei->len;
    467		ei = &prev_ex->ei;
    468		en = prev_ex;
    469	}
    470
    471	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
    472		next_ex->ei.fofs = ei->fofs;
    473		next_ex->ei.blk = ei->blk;
    474		next_ex->ei.len += ei->len;
    475		if (en)
    476			__release_extent_node(sbi, et, prev_ex);
    477
    478		en = next_ex;
    479	}
    480
    481	if (!en)
    482		return NULL;
    483
    484	__try_update_largest_extent(et, en);
    485
    486	spin_lock(&sbi->extent_lock);
    487	if (!list_empty(&en->list)) {
    488		list_move_tail(&en->list, &sbi->extent_list);
    489		et->cached_en = en;
    490	}
    491	spin_unlock(&sbi->extent_lock);
    492	return en;
    493}
    494
    495static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
    496				struct extent_tree *et, struct extent_info *ei,
    497				struct rb_node **insert_p,
    498				struct rb_node *insert_parent,
    499				bool leftmost)
    500{
    501	struct rb_node **p;
    502	struct rb_node *parent = NULL;
    503	struct extent_node *en = NULL;
    504
    505	if (insert_p && insert_parent) {
    506		parent = insert_parent;
    507		p = insert_p;
    508		goto do_insert;
    509	}
    510
    511	leftmost = true;
    512
    513	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
    514						ei->fofs, &leftmost);
    515do_insert:
    516	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
    517	if (!en)
    518		return NULL;
    519
    520	__try_update_largest_extent(et, en);
    521
    522	/* update in global extent list */
    523	spin_lock(&sbi->extent_lock);
    524	list_add_tail(&en->list, &sbi->extent_list);
    525	et->cached_en = en;
    526	spin_unlock(&sbi->extent_lock);
    527	return en;
    528}
    529
    530static void f2fs_update_extent_tree_range(struct inode *inode,
    531				pgoff_t fofs, block_t blkaddr, unsigned int len)
    532{
    533	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    534	struct extent_tree *et = F2FS_I(inode)->extent_tree;
    535	struct extent_node *en = NULL, *en1 = NULL;
    536	struct extent_node *prev_en = NULL, *next_en = NULL;
    537	struct extent_info ei, dei, prev;
    538	struct rb_node **insert_p = NULL, *insert_parent = NULL;
    539	unsigned int end = fofs + len;
    540	unsigned int pos = (unsigned int)fofs;
    541	bool updated = false;
    542	bool leftmost = false;
    543
    544	if (!et)
    545		return;
    546
    547	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
    548
    549	write_lock(&et->lock);
    550
    551	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
    552		write_unlock(&et->lock);
    553		return;
    554	}
    555
    556	prev = et->largest;
    557	dei.len = 0;
    558
    559	/*
    560	 * drop largest extent before lookup, in case it's already
    561	 * been shrunk from extent tree
    562	 */
    563	__drop_largest_extent(et, fofs, len);
    564
    565	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
    566	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
    567					(struct rb_entry *)et->cached_en, fofs,
    568					(struct rb_entry **)&prev_en,
    569					(struct rb_entry **)&next_en,
    570					&insert_p, &insert_parent, false,
    571					&leftmost);
    572	if (!en)
    573		en = next_en;
    574
    575	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
    576	while (en && en->ei.fofs < end) {
    577		unsigned int org_end;
    578		int parts = 0;	/* # of parts current extent split into */
    579
    580		next_en = en1 = NULL;
    581
    582		dei = en->ei;
    583		org_end = dei.fofs + dei.len;
    584		f2fs_bug_on(sbi, pos >= org_end);
    585
    586		if (pos > dei.fofs &&	pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
    587			en->ei.len = pos - en->ei.fofs;
    588			prev_en = en;
    589			parts = 1;
    590		}
    591
    592		if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
    593			if (parts) {
    594				set_extent_info(&ei, end,
    595						end - dei.fofs + dei.blk,
    596						org_end - end);
    597				en1 = __insert_extent_tree(sbi, et, &ei,
    598							NULL, NULL, true);
    599				next_en = en1;
    600			} else {
    601				en->ei.fofs = end;
    602				en->ei.blk += end - dei.fofs;
    603				en->ei.len -= end - dei.fofs;
    604				next_en = en;
    605			}
    606			parts++;
    607		}
    608
    609		if (!next_en) {
    610			struct rb_node *node = rb_next(&en->rb_node);
    611
    612			next_en = rb_entry_safe(node, struct extent_node,
    613						rb_node);
    614		}
    615
    616		if (parts)
    617			__try_update_largest_extent(et, en);
    618		else
    619			__release_extent_node(sbi, et, en);
    620
    621		/*
    622		 * if original extent is split into zero or two parts, extent
    623		 * tree has been altered by deletion or insertion, therefore
    624		 * invalidate pointers regard to tree.
    625		 */
    626		if (parts != 1) {
    627			insert_p = NULL;
    628			insert_parent = NULL;
    629		}
    630		en = next_en;
    631	}
    632
    633	/* 3. update extent in extent cache */
    634	if (blkaddr) {
    635
    636		set_extent_info(&ei, fofs, blkaddr, len);
    637		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
    638			__insert_extent_tree(sbi, et, &ei,
    639					insert_p, insert_parent, leftmost);
    640
    641		/* give up extent_cache, if split and small updates happen */
    642		if (dei.len >= 1 &&
    643				prev.len < F2FS_MIN_EXTENT_LEN &&
    644				et->largest.len < F2FS_MIN_EXTENT_LEN) {
    645			et->largest.len = 0;
    646			et->largest_updated = true;
    647			set_inode_flag(inode, FI_NO_EXTENT);
    648		}
    649	}
    650
    651	if (is_inode_flag_set(inode, FI_NO_EXTENT))
    652		__free_extent_tree(sbi, et);
    653
    654	if (et->largest_updated) {
    655		et->largest_updated = false;
    656		updated = true;
    657	}
    658
    659	write_unlock(&et->lock);
    660
    661	if (updated)
    662		f2fs_mark_inode_dirty_sync(inode, true);
    663}
    664
    665#ifdef CONFIG_F2FS_FS_COMPRESSION
    666void f2fs_update_extent_tree_range_compressed(struct inode *inode,
    667				pgoff_t fofs, block_t blkaddr, unsigned int llen,
    668				unsigned int c_len)
    669{
    670	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    671	struct extent_tree *et = F2FS_I(inode)->extent_tree;
    672	struct extent_node *en = NULL;
    673	struct extent_node *prev_en = NULL, *next_en = NULL;
    674	struct extent_info ei;
    675	struct rb_node **insert_p = NULL, *insert_parent = NULL;
    676	bool leftmost = false;
    677
    678	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
    679
    680	/* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
    681	if (is_inode_flag_set(inode, FI_NO_EXTENT))
    682		return;
    683
    684	write_lock(&et->lock);
    685
    686	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
    687				(struct rb_entry *)et->cached_en, fofs,
    688				(struct rb_entry **)&prev_en,
    689				(struct rb_entry **)&next_en,
    690				&insert_p, &insert_parent, false,
    691				&leftmost);
    692	if (en)
    693		goto unlock_out;
    694
    695	set_extent_info(&ei, fofs, blkaddr, llen);
    696	ei.c_len = c_len;
    697
    698	if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
    699		__insert_extent_tree(sbi, et, &ei,
    700				insert_p, insert_parent, leftmost);
    701unlock_out:
    702	write_unlock(&et->lock);
    703}
    704#endif
    705
    706unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
    707{
    708	struct extent_tree *et, *next;
    709	struct extent_node *en;
    710	unsigned int node_cnt = 0, tree_cnt = 0;
    711	int remained;
    712
    713	if (!test_opt(sbi, EXTENT_CACHE))
    714		return 0;
    715
    716	if (!atomic_read(&sbi->total_zombie_tree))
    717		goto free_node;
    718
    719	if (!mutex_trylock(&sbi->extent_tree_lock))
    720		goto out;
    721
    722	/* 1. remove unreferenced extent tree */
    723	list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
    724		if (atomic_read(&et->node_cnt)) {
    725			write_lock(&et->lock);
    726			node_cnt += __free_extent_tree(sbi, et);
    727			write_unlock(&et->lock);
    728		}
    729		f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
    730		list_del_init(&et->list);
    731		radix_tree_delete(&sbi->extent_tree_root, et->ino);
    732		kmem_cache_free(extent_tree_slab, et);
    733		atomic_dec(&sbi->total_ext_tree);
    734		atomic_dec(&sbi->total_zombie_tree);
    735		tree_cnt++;
    736
    737		if (node_cnt + tree_cnt >= nr_shrink)
    738			goto unlock_out;
    739		cond_resched();
    740	}
    741	mutex_unlock(&sbi->extent_tree_lock);
    742
    743free_node:
    744	/* 2. remove LRU extent entries */
    745	if (!mutex_trylock(&sbi->extent_tree_lock))
    746		goto out;
    747
    748	remained = nr_shrink - (node_cnt + tree_cnt);
    749
    750	spin_lock(&sbi->extent_lock);
    751	for (; remained > 0; remained--) {
    752		if (list_empty(&sbi->extent_list))
    753			break;
    754		en = list_first_entry(&sbi->extent_list,
    755					struct extent_node, list);
    756		et = en->et;
    757		if (!write_trylock(&et->lock)) {
    758			/* refresh this extent node's position in extent list */
    759			list_move_tail(&en->list, &sbi->extent_list);
    760			continue;
    761		}
    762
    763		list_del_init(&en->list);
    764		spin_unlock(&sbi->extent_lock);
    765
    766		__detach_extent_node(sbi, et, en);
    767
    768		write_unlock(&et->lock);
    769		node_cnt++;
    770		spin_lock(&sbi->extent_lock);
    771	}
    772	spin_unlock(&sbi->extent_lock);
    773
    774unlock_out:
    775	mutex_unlock(&sbi->extent_tree_lock);
    776out:
    777	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
    778
    779	return node_cnt + tree_cnt;
    780}
    781
    782unsigned int f2fs_destroy_extent_node(struct inode *inode)
    783{
    784	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    785	struct extent_tree *et = F2FS_I(inode)->extent_tree;
    786	unsigned int node_cnt = 0;
    787
    788	if (!et || !atomic_read(&et->node_cnt))
    789		return 0;
    790
    791	write_lock(&et->lock);
    792	node_cnt = __free_extent_tree(sbi, et);
    793	write_unlock(&et->lock);
    794
    795	return node_cnt;
    796}
    797
    798void f2fs_drop_extent_tree(struct inode *inode)
    799{
    800	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    801	struct extent_tree *et = F2FS_I(inode)->extent_tree;
    802	bool updated = false;
    803
    804	if (!f2fs_may_extent_tree(inode))
    805		return;
    806
    807	set_inode_flag(inode, FI_NO_EXTENT);
    808
    809	write_lock(&et->lock);
    810	__free_extent_tree(sbi, et);
    811	if (et->largest.len) {
    812		et->largest.len = 0;
    813		updated = true;
    814	}
    815	write_unlock(&et->lock);
    816	if (updated)
    817		f2fs_mark_inode_dirty_sync(inode, true);
    818}
    819
    820void f2fs_destroy_extent_tree(struct inode *inode)
    821{
    822	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    823	struct extent_tree *et = F2FS_I(inode)->extent_tree;
    824	unsigned int node_cnt = 0;
    825
    826	if (!et)
    827		return;
    828
    829	if (inode->i_nlink && !is_bad_inode(inode) &&
    830					atomic_read(&et->node_cnt)) {
    831		mutex_lock(&sbi->extent_tree_lock);
    832		list_add_tail(&et->list, &sbi->zombie_list);
    833		atomic_inc(&sbi->total_zombie_tree);
    834		mutex_unlock(&sbi->extent_tree_lock);
    835		return;
    836	}
    837
    838	/* free all extent info belong to this extent tree */
    839	node_cnt = f2fs_destroy_extent_node(inode);
    840
    841	/* delete extent tree entry in radix tree */
    842	mutex_lock(&sbi->extent_tree_lock);
    843	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
    844	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
    845	kmem_cache_free(extent_tree_slab, et);
    846	atomic_dec(&sbi->total_ext_tree);
    847	mutex_unlock(&sbi->extent_tree_lock);
    848
    849	F2FS_I(inode)->extent_tree = NULL;
    850
    851	trace_f2fs_destroy_extent_tree(inode, node_cnt);
    852}
    853
    854bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
    855					struct extent_info *ei)
    856{
    857	if (!f2fs_may_extent_tree(inode))
    858		return false;
    859
    860	return f2fs_lookup_extent_tree(inode, pgofs, ei);
    861}
    862
    863void f2fs_update_extent_cache(struct dnode_of_data *dn)
    864{
    865	pgoff_t fofs;
    866	block_t blkaddr;
    867
    868	if (!f2fs_may_extent_tree(dn->inode))
    869		return;
    870
    871	if (dn->data_blkaddr == NEW_ADDR)
    872		blkaddr = NULL_ADDR;
    873	else
    874		blkaddr = dn->data_blkaddr;
    875
    876	fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
    877								dn->ofs_in_node;
    878	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
    879}
    880
    881void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
    882				pgoff_t fofs, block_t blkaddr, unsigned int len)
    883
    884{
    885	if (!f2fs_may_extent_tree(dn->inode))
    886		return;
    887
    888	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
    889}
    890
    891void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
    892{
    893	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
    894	mutex_init(&sbi->extent_tree_lock);
    895	INIT_LIST_HEAD(&sbi->extent_list);
    896	spin_lock_init(&sbi->extent_lock);
    897	atomic_set(&sbi->total_ext_tree, 0);
    898	INIT_LIST_HEAD(&sbi->zombie_list);
    899	atomic_set(&sbi->total_zombie_tree, 0);
    900	atomic_set(&sbi->total_ext_node, 0);
    901}
    902
    903int __init f2fs_create_extent_cache(void)
    904{
    905	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
    906			sizeof(struct extent_tree));
    907	if (!extent_tree_slab)
    908		return -ENOMEM;
    909	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
    910			sizeof(struct extent_node));
    911	if (!extent_node_slab) {
    912		kmem_cache_destroy(extent_tree_slab);
    913		return -ENOMEM;
    914	}
    915	return 0;
    916}
    917
    918void f2fs_destroy_extent_cache(void)
    919{
    920	kmem_cache_destroy(extent_node_slab);
    921	kmem_cache_destroy(extent_tree_slab);
    922}