cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

extent_io.c (202982B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3#include <linux/bitops.h>
      4#include <linux/slab.h>
      5#include <linux/bio.h>
      6#include <linux/mm.h>
      7#include <linux/pagemap.h>
      8#include <linux/page-flags.h>
      9#include <linux/sched/mm.h>
     10#include <linux/spinlock.h>
     11#include <linux/blkdev.h>
     12#include <linux/swap.h>
     13#include <linux/writeback.h>
     14#include <linux/pagevec.h>
     15#include <linux/prefetch.h>
     16#include <linux/fsverity.h>
     17#include "misc.h"
     18#include "extent_io.h"
     19#include "extent-io-tree.h"
     20#include "extent_map.h"
     21#include "ctree.h"
     22#include "btrfs_inode.h"
     23#include "volumes.h"
     24#include "check-integrity.h"
     25#include "locking.h"
     26#include "rcu-string.h"
     27#include "backref.h"
     28#include "disk-io.h"
     29#include "subpage.h"
     30#include "zoned.h"
     31#include "block-group.h"
     32#include "compression.h"
     33
     34static struct kmem_cache *extent_state_cache;
     35static struct kmem_cache *extent_buffer_cache;
     36static struct bio_set btrfs_bioset;
     37
     38static inline bool extent_state_in_tree(const struct extent_state *state)
     39{
     40	return !RB_EMPTY_NODE(&state->rb_node);
     41}
     42
     43#ifdef CONFIG_BTRFS_DEBUG
     44static LIST_HEAD(states);
     45static DEFINE_SPINLOCK(leak_lock);
     46
     47static inline void btrfs_leak_debug_add(spinlock_t *lock,
     48					struct list_head *new,
     49					struct list_head *head)
     50{
     51	unsigned long flags;
     52
     53	spin_lock_irqsave(lock, flags);
     54	list_add(new, head);
     55	spin_unlock_irqrestore(lock, flags);
     56}
     57
     58static inline void btrfs_leak_debug_del(spinlock_t *lock,
     59					struct list_head *entry)
     60{
     61	unsigned long flags;
     62
     63	spin_lock_irqsave(lock, flags);
     64	list_del(entry);
     65	spin_unlock_irqrestore(lock, flags);
     66}
     67
     68void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
     69{
     70	struct extent_buffer *eb;
     71	unsigned long flags;
     72
     73	/*
     74	 * If we didn't get into open_ctree our allocated_ebs will not be
     75	 * initialized, so just skip this.
     76	 */
     77	if (!fs_info->allocated_ebs.next)
     78		return;
     79
     80	WARN_ON(!list_empty(&fs_info->allocated_ebs));
     81	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
     82	while (!list_empty(&fs_info->allocated_ebs)) {
     83		eb = list_first_entry(&fs_info->allocated_ebs,
     84				      struct extent_buffer, leak_list);
     85		pr_err(
     86	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
     87		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
     88		       btrfs_header_owner(eb));
     89		list_del(&eb->leak_list);
     90		kmem_cache_free(extent_buffer_cache, eb);
     91	}
     92	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
     93}
     94
     95static inline void btrfs_extent_state_leak_debug_check(void)
     96{
     97	struct extent_state *state;
     98
     99	while (!list_empty(&states)) {
    100		state = list_entry(states.next, struct extent_state, leak_list);
    101		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
    102		       state->start, state->end, state->state,
    103		       extent_state_in_tree(state),
    104		       refcount_read(&state->refs));
    105		list_del(&state->leak_list);
    106		kmem_cache_free(extent_state_cache, state);
    107	}
    108}
    109
    110#define btrfs_debug_check_extent_io_range(tree, start, end)		\
    111	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
    112static inline void __btrfs_debug_check_extent_io_range(const char *caller,
    113		struct extent_io_tree *tree, u64 start, u64 end)
    114{
    115	struct inode *inode = tree->private_data;
    116	u64 isize;
    117
    118	if (!inode || !is_data_inode(inode))
    119		return;
    120
    121	isize = i_size_read(inode);
    122	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
    123		btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
    124		    "%s: ino %llu isize %llu odd range [%llu,%llu]",
    125			caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
    126	}
    127}
    128#else
    129#define btrfs_leak_debug_add(lock, new, head)	do {} while (0)
    130#define btrfs_leak_debug_del(lock, entry)	do {} while (0)
    131#define btrfs_extent_state_leak_debug_check()	do {} while (0)
    132#define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
    133#endif
    134
    135struct tree_entry {
    136	u64 start;
    137	u64 end;
    138	struct rb_node rb_node;
    139};
    140
    141/*
    142 * Structure to record info about the bio being assembled, and other info like
    143 * how many bytes are there before stripe/ordered extent boundary.
    144 */
    145struct btrfs_bio_ctrl {
    146	struct bio *bio;
    147	enum btrfs_compression_type compress_type;
    148	u32 len_to_stripe_boundary;
    149	u32 len_to_oe_boundary;
    150};
    151
    152struct extent_page_data {
    153	struct btrfs_bio_ctrl bio_ctrl;
    154	/* tells writepage not to lock the state bits for this range
    155	 * it still does the unlocking
    156	 */
    157	unsigned int extent_locked:1;
    158
    159	/* tells the submit_bio code to use REQ_SYNC */
    160	unsigned int sync_io:1;
    161};
    162
    163static int add_extent_changeset(struct extent_state *state, u32 bits,
    164				 struct extent_changeset *changeset,
    165				 int set)
    166{
    167	int ret;
    168
    169	if (!changeset)
    170		return 0;
    171	if (set && (state->state & bits) == bits)
    172		return 0;
    173	if (!set && (state->state & bits) == 0)
    174		return 0;
    175	changeset->bytes_changed += state->end - state->start + 1;
    176	ret = ulist_add(&changeset->range_changed, state->start, state->end,
    177			GFP_ATOMIC);
    178	return ret;
    179}
    180
    181static void submit_one_bio(struct bio *bio, int mirror_num,
    182			   enum btrfs_compression_type compress_type)
    183{
    184	struct extent_io_tree *tree = bio->bi_private;
    185
    186	bio->bi_private = NULL;
    187
    188	/* Caller should ensure the bio has at least some range added */
    189	ASSERT(bio->bi_iter.bi_size);
    190
    191	if (is_data_inode(tree->private_data))
    192		btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
    193					    compress_type);
    194	else
    195		btrfs_submit_metadata_bio(tree->private_data, bio, mirror_num);
    196	/*
    197	 * Above submission hooks will handle the error by ending the bio,
    198	 * which will do the cleanup properly.  So here we should not return
    199	 * any error, or the caller of submit_extent_page() will do cleanup
    200	 * again, causing problems.
    201	 */
    202}
    203
    204/* Cleanup unsubmitted bios */
    205static void end_write_bio(struct extent_page_data *epd, int ret)
    206{
    207	struct bio *bio = epd->bio_ctrl.bio;
    208
    209	if (bio) {
    210		bio->bi_status = errno_to_blk_status(ret);
    211		bio_endio(bio);
    212		epd->bio_ctrl.bio = NULL;
    213	}
    214}
    215
    216/*
    217 * Submit bio from extent page data via submit_one_bio
    218 *
    219 * Return 0 if everything is OK.
    220 * Return <0 for error.
    221 */
    222static void flush_write_bio(struct extent_page_data *epd)
    223{
    224	struct bio *bio = epd->bio_ctrl.bio;
    225
    226	if (bio) {
    227		submit_one_bio(bio, 0, 0);
    228		/*
    229		 * Clean up of epd->bio is handled by its endio function.
    230		 * And endio is either triggered by successful bio execution
    231		 * or the error handler of submit bio hook.
    232		 * So at this point, no matter what happened, we don't need
    233		 * to clean up epd->bio.
    234		 */
    235		epd->bio_ctrl.bio = NULL;
    236	}
    237}
    238
    239int __init extent_state_cache_init(void)
    240{
    241	extent_state_cache = kmem_cache_create("btrfs_extent_state",
    242			sizeof(struct extent_state), 0,
    243			SLAB_MEM_SPREAD, NULL);
    244	if (!extent_state_cache)
    245		return -ENOMEM;
    246	return 0;
    247}
    248
    249int __init extent_io_init(void)
    250{
    251	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
    252			sizeof(struct extent_buffer), 0,
    253			SLAB_MEM_SPREAD, NULL);
    254	if (!extent_buffer_cache)
    255		return -ENOMEM;
    256
    257	if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
    258			offsetof(struct btrfs_bio, bio),
    259			BIOSET_NEED_BVECS))
    260		goto free_buffer_cache;
    261
    262	if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
    263		goto free_bioset;
    264
    265	return 0;
    266
    267free_bioset:
    268	bioset_exit(&btrfs_bioset);
    269
    270free_buffer_cache:
    271	kmem_cache_destroy(extent_buffer_cache);
    272	extent_buffer_cache = NULL;
    273	return -ENOMEM;
    274}
    275
    276void __cold extent_state_cache_exit(void)
    277{
    278	btrfs_extent_state_leak_debug_check();
    279	kmem_cache_destroy(extent_state_cache);
    280}
    281
    282void __cold extent_io_exit(void)
    283{
    284	/*
    285	 * Make sure all delayed rcu free are flushed before we
    286	 * destroy caches.
    287	 */
    288	rcu_barrier();
    289	kmem_cache_destroy(extent_buffer_cache);
    290	bioset_exit(&btrfs_bioset);
    291}
    292
    293/*
    294 * For the file_extent_tree, we want to hold the inode lock when we lookup and
    295 * update the disk_i_size, but lockdep will complain because our io_tree we hold
    296 * the tree lock and get the inode lock when setting delalloc.  These two things
    297 * are unrelated, so make a class for the file_extent_tree so we don't get the
    298 * two locking patterns mixed up.
    299 */
    300static struct lock_class_key file_extent_tree_class;
    301
    302void extent_io_tree_init(struct btrfs_fs_info *fs_info,
    303			 struct extent_io_tree *tree, unsigned int owner,
    304			 void *private_data)
    305{
    306	tree->fs_info = fs_info;
    307	tree->state = RB_ROOT;
    308	tree->dirty_bytes = 0;
    309	spin_lock_init(&tree->lock);
    310	tree->private_data = private_data;
    311	tree->owner = owner;
    312	if (owner == IO_TREE_INODE_FILE_EXTENT)
    313		lockdep_set_class(&tree->lock, &file_extent_tree_class);
    314}
    315
    316void extent_io_tree_release(struct extent_io_tree *tree)
    317{
    318	spin_lock(&tree->lock);
    319	/*
    320	 * Do a single barrier for the waitqueue_active check here, the state
    321	 * of the waitqueue should not change once extent_io_tree_release is
    322	 * called.
    323	 */
    324	smp_mb();
    325	while (!RB_EMPTY_ROOT(&tree->state)) {
    326		struct rb_node *node;
    327		struct extent_state *state;
    328
    329		node = rb_first(&tree->state);
    330		state = rb_entry(node, struct extent_state, rb_node);
    331		rb_erase(&state->rb_node, &tree->state);
    332		RB_CLEAR_NODE(&state->rb_node);
    333		/*
    334		 * btree io trees aren't supposed to have tasks waiting for
    335		 * changes in the flags of extent states ever.
    336		 */
    337		ASSERT(!waitqueue_active(&state->wq));
    338		free_extent_state(state);
    339
    340		cond_resched_lock(&tree->lock);
    341	}
    342	spin_unlock(&tree->lock);
    343}
    344
    345static struct extent_state *alloc_extent_state(gfp_t mask)
    346{
    347	struct extent_state *state;
    348
    349	/*
    350	 * The given mask might be not appropriate for the slab allocator,
    351	 * drop the unsupported bits
    352	 */
    353	mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
    354	state = kmem_cache_alloc(extent_state_cache, mask);
    355	if (!state)
    356		return state;
    357	state->state = 0;
    358	state->failrec = NULL;
    359	RB_CLEAR_NODE(&state->rb_node);
    360	btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
    361	refcount_set(&state->refs, 1);
    362	init_waitqueue_head(&state->wq);
    363	trace_alloc_extent_state(state, mask, _RET_IP_);
    364	return state;
    365}
    366
    367void free_extent_state(struct extent_state *state)
    368{
    369	if (!state)
    370		return;
    371	if (refcount_dec_and_test(&state->refs)) {
    372		WARN_ON(extent_state_in_tree(state));
    373		btrfs_leak_debug_del(&leak_lock, &state->leak_list);
    374		trace_free_extent_state(state, _RET_IP_);
    375		kmem_cache_free(extent_state_cache, state);
    376	}
    377}
    378
    379static struct rb_node *tree_insert(struct rb_root *root,
    380				   struct rb_node *search_start,
    381				   u64 offset,
    382				   struct rb_node *node,
    383				   struct rb_node ***p_in,
    384				   struct rb_node **parent_in)
    385{
    386	struct rb_node **p;
    387	struct rb_node *parent = NULL;
    388	struct tree_entry *entry;
    389
    390	if (p_in && parent_in) {
    391		p = *p_in;
    392		parent = *parent_in;
    393		goto do_insert;
    394	}
    395
    396	p = search_start ? &search_start : &root->rb_node;
    397	while (*p) {
    398		parent = *p;
    399		entry = rb_entry(parent, struct tree_entry, rb_node);
    400
    401		if (offset < entry->start)
    402			p = &(*p)->rb_left;
    403		else if (offset > entry->end)
    404			p = &(*p)->rb_right;
    405		else
    406			return parent;
    407	}
    408
    409do_insert:
    410	rb_link_node(node, parent, p);
    411	rb_insert_color(node, root);
    412	return NULL;
    413}
    414
    415/**
    416 * Search @tree for an entry that contains @offset. Such entry would have
    417 * entry->start <= offset && entry->end >= offset.
    418 *
    419 * @tree:       the tree to search
    420 * @offset:     offset that should fall within an entry in @tree
    421 * @next_ret:   pointer to the first entry whose range ends after @offset
    422 * @prev_ret:   pointer to the first entry whose range begins before @offset
    423 * @p_ret:      pointer where new node should be anchored (used when inserting an
    424 *	        entry in the tree)
    425 * @parent_ret: points to entry which would have been the parent of the entry,
    426 *               containing @offset
    427 *
    428 * This function returns a pointer to the entry that contains @offset byte
    429 * address. If no such entry exists, then NULL is returned and the other
    430 * pointer arguments to the function are filled, otherwise the found entry is
    431 * returned and other pointers are left untouched.
    432 */
    433static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
    434				      struct rb_node **next_ret,
    435				      struct rb_node **prev_ret,
    436				      struct rb_node ***p_ret,
    437				      struct rb_node **parent_ret)
    438{
    439	struct rb_root *root = &tree->state;
    440	struct rb_node **n = &root->rb_node;
    441	struct rb_node *prev = NULL;
    442	struct rb_node *orig_prev = NULL;
    443	struct tree_entry *entry;
    444	struct tree_entry *prev_entry = NULL;
    445
    446	while (*n) {
    447		prev = *n;
    448		entry = rb_entry(prev, struct tree_entry, rb_node);
    449		prev_entry = entry;
    450
    451		if (offset < entry->start)
    452			n = &(*n)->rb_left;
    453		else if (offset > entry->end)
    454			n = &(*n)->rb_right;
    455		else
    456			return *n;
    457	}
    458
    459	if (p_ret)
    460		*p_ret = n;
    461	if (parent_ret)
    462		*parent_ret = prev;
    463
    464	if (next_ret) {
    465		orig_prev = prev;
    466		while (prev && offset > prev_entry->end) {
    467			prev = rb_next(prev);
    468			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
    469		}
    470		*next_ret = prev;
    471		prev = orig_prev;
    472	}
    473
    474	if (prev_ret) {
    475		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
    476		while (prev && offset < prev_entry->start) {
    477			prev = rb_prev(prev);
    478			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
    479		}
    480		*prev_ret = prev;
    481	}
    482	return NULL;
    483}
    484
    485static inline struct rb_node *
    486tree_search_for_insert(struct extent_io_tree *tree,
    487		       u64 offset,
    488		       struct rb_node ***p_ret,
    489		       struct rb_node **parent_ret)
    490{
    491	struct rb_node *next= NULL;
    492	struct rb_node *ret;
    493
    494	ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
    495	if (!ret)
    496		return next;
    497	return ret;
    498}
    499
    500static inline struct rb_node *tree_search(struct extent_io_tree *tree,
    501					  u64 offset)
    502{
    503	return tree_search_for_insert(tree, offset, NULL, NULL);
    504}
    505
    506/*
    507 * utility function to look for merge candidates inside a given range.
    508 * Any extents with matching state are merged together into a single
    509 * extent in the tree.  Extents with EXTENT_IO in their state field
    510 * are not merged because the end_io handlers need to be able to do
    511 * operations on them without sleeping (or doing allocations/splits).
    512 *
    513 * This should be called with the tree lock held.
    514 */
    515static void merge_state(struct extent_io_tree *tree,
    516		        struct extent_state *state)
    517{
    518	struct extent_state *other;
    519	struct rb_node *other_node;
    520
    521	if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
    522		return;
    523
    524	other_node = rb_prev(&state->rb_node);
    525	if (other_node) {
    526		other = rb_entry(other_node, struct extent_state, rb_node);
    527		if (other->end == state->start - 1 &&
    528		    other->state == state->state) {
    529			if (tree->private_data &&
    530			    is_data_inode(tree->private_data))
    531				btrfs_merge_delalloc_extent(tree->private_data,
    532							    state, other);
    533			state->start = other->start;
    534			rb_erase(&other->rb_node, &tree->state);
    535			RB_CLEAR_NODE(&other->rb_node);
    536			free_extent_state(other);
    537		}
    538	}
    539	other_node = rb_next(&state->rb_node);
    540	if (other_node) {
    541		other = rb_entry(other_node, struct extent_state, rb_node);
    542		if (other->start == state->end + 1 &&
    543		    other->state == state->state) {
    544			if (tree->private_data &&
    545			    is_data_inode(tree->private_data))
    546				btrfs_merge_delalloc_extent(tree->private_data,
    547							    state, other);
    548			state->end = other->end;
    549			rb_erase(&other->rb_node, &tree->state);
    550			RB_CLEAR_NODE(&other->rb_node);
    551			free_extent_state(other);
    552		}
    553	}
    554}
    555
    556static void set_state_bits(struct extent_io_tree *tree,
    557			   struct extent_state *state, u32 *bits,
    558			   struct extent_changeset *changeset);
    559
    560/*
    561 * insert an extent_state struct into the tree.  'bits' are set on the
    562 * struct before it is inserted.
    563 *
    564 * This may return -EEXIST if the extent is already there, in which case the
    565 * state struct is freed.
    566 *
    567 * The tree lock is not taken internally.  This is a utility function and
    568 * probably isn't what you want to call (see set/clear_extent_bit).
    569 */
    570static int insert_state(struct extent_io_tree *tree,
    571			struct extent_state *state, u64 start, u64 end,
    572			struct rb_node ***p,
    573			struct rb_node **parent,
    574			u32 *bits, struct extent_changeset *changeset)
    575{
    576	struct rb_node *node;
    577
    578	if (end < start) {
    579		btrfs_err(tree->fs_info,
    580			"insert state: end < start %llu %llu", end, start);
    581		WARN_ON(1);
    582	}
    583	state->start = start;
    584	state->end = end;
    585
    586	set_state_bits(tree, state, bits, changeset);
    587
    588	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
    589	if (node) {
    590		struct extent_state *found;
    591		found = rb_entry(node, struct extent_state, rb_node);
    592		btrfs_err(tree->fs_info,
    593		       "found node %llu %llu on insert of %llu %llu",
    594		       found->start, found->end, start, end);
    595		return -EEXIST;
    596	}
    597	merge_state(tree, state);
    598	return 0;
    599}
    600
    601/*
    602 * split a given extent state struct in two, inserting the preallocated
    603 * struct 'prealloc' as the newly created second half.  'split' indicates an
    604 * offset inside 'orig' where it should be split.
    605 *
    606 * Before calling,
    607 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
    608 * are two extent state structs in the tree:
    609 * prealloc: [orig->start, split - 1]
    610 * orig: [ split, orig->end ]
    611 *
    612 * The tree locks are not taken by this function. They need to be held
    613 * by the caller.
    614 */
    615static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
    616		       struct extent_state *prealloc, u64 split)
    617{
    618	struct rb_node *node;
    619
    620	if (tree->private_data && is_data_inode(tree->private_data))
    621		btrfs_split_delalloc_extent(tree->private_data, orig, split);
    622
    623	prealloc->start = orig->start;
    624	prealloc->end = split - 1;
    625	prealloc->state = orig->state;
    626	orig->start = split;
    627
    628	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
    629			   &prealloc->rb_node, NULL, NULL);
    630	if (node) {
    631		free_extent_state(prealloc);
    632		return -EEXIST;
    633	}
    634	return 0;
    635}
    636
    637static struct extent_state *next_state(struct extent_state *state)
    638{
    639	struct rb_node *next = rb_next(&state->rb_node);
    640	if (next)
    641		return rb_entry(next, struct extent_state, rb_node);
    642	else
    643		return NULL;
    644}
    645
    646/*
    647 * utility function to clear some bits in an extent state struct.
    648 * it will optionally wake up anyone waiting on this state (wake == 1).
    649 *
    650 * If no bits are set on the state struct after clearing things, the
    651 * struct is freed and removed from the tree
    652 */
    653static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
    654					    struct extent_state *state,
    655					    u32 *bits, int wake,
    656					    struct extent_changeset *changeset)
    657{
    658	struct extent_state *next;
    659	u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
    660	int ret;
    661
    662	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
    663		u64 range = state->end - state->start + 1;
    664		WARN_ON(range > tree->dirty_bytes);
    665		tree->dirty_bytes -= range;
    666	}
    667
    668	if (tree->private_data && is_data_inode(tree->private_data))
    669		btrfs_clear_delalloc_extent(tree->private_data, state, bits);
    670
    671	ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
    672	BUG_ON(ret < 0);
    673	state->state &= ~bits_to_clear;
    674	if (wake)
    675		wake_up(&state->wq);
    676	if (state->state == 0) {
    677		next = next_state(state);
    678		if (extent_state_in_tree(state)) {
    679			rb_erase(&state->rb_node, &tree->state);
    680			RB_CLEAR_NODE(&state->rb_node);
    681			free_extent_state(state);
    682		} else {
    683			WARN_ON(1);
    684		}
    685	} else {
    686		merge_state(tree, state);
    687		next = next_state(state);
    688	}
    689	return next;
    690}
    691
    692static struct extent_state *
    693alloc_extent_state_atomic(struct extent_state *prealloc)
    694{
    695	if (!prealloc)
    696		prealloc = alloc_extent_state(GFP_ATOMIC);
    697
    698	return prealloc;
    699}
    700
    701static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
    702{
    703	btrfs_panic(tree->fs_info, err,
    704	"locking error: extent tree was modified by another thread while locked");
    705}
    706
    707/*
    708 * clear some bits on a range in the tree.  This may require splitting
    709 * or inserting elements in the tree, so the gfp mask is used to
    710 * indicate which allocations or sleeping are allowed.
    711 *
    712 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
    713 * the given range from the tree regardless of state (ie for truncate).
    714 *
    715 * the range [start, end] is inclusive.
    716 *
    717 * This takes the tree lock, and returns 0 on success and < 0 on error.
    718 */
    719int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
    720		       u32 bits, int wake, int delete,
    721		       struct extent_state **cached_state,
    722		       gfp_t mask, struct extent_changeset *changeset)
    723{
    724	struct extent_state *state;
    725	struct extent_state *cached;
    726	struct extent_state *prealloc = NULL;
    727	struct rb_node *node;
    728	u64 last_end;
    729	int err;
    730	int clear = 0;
    731
    732	btrfs_debug_check_extent_io_range(tree, start, end);
    733	trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
    734
    735	if (bits & EXTENT_DELALLOC)
    736		bits |= EXTENT_NORESERVE;
    737
    738	if (delete)
    739		bits |= ~EXTENT_CTLBITS;
    740
    741	if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
    742		clear = 1;
    743again:
    744	if (!prealloc && gfpflags_allow_blocking(mask)) {
    745		/*
    746		 * Don't care for allocation failure here because we might end
    747		 * up not needing the pre-allocated extent state at all, which
    748		 * is the case if we only have in the tree extent states that
    749		 * cover our input range and don't cover too any other range.
    750		 * If we end up needing a new extent state we allocate it later.
    751		 */
    752		prealloc = alloc_extent_state(mask);
    753	}
    754
    755	spin_lock(&tree->lock);
    756	if (cached_state) {
    757		cached = *cached_state;
    758
    759		if (clear) {
    760			*cached_state = NULL;
    761			cached_state = NULL;
    762		}
    763
    764		if (cached && extent_state_in_tree(cached) &&
    765		    cached->start <= start && cached->end > start) {
    766			if (clear)
    767				refcount_dec(&cached->refs);
    768			state = cached;
    769			goto hit_next;
    770		}
    771		if (clear)
    772			free_extent_state(cached);
    773	}
    774	/*
    775	 * this search will find the extents that end after
    776	 * our range starts
    777	 */
    778	node = tree_search(tree, start);
    779	if (!node)
    780		goto out;
    781	state = rb_entry(node, struct extent_state, rb_node);
    782hit_next:
    783	if (state->start > end)
    784		goto out;
    785	WARN_ON(state->end < start);
    786	last_end = state->end;
    787
    788	/* the state doesn't have the wanted bits, go ahead */
    789	if (!(state->state & bits)) {
    790		state = next_state(state);
    791		goto next;
    792	}
    793
    794	/*
    795	 *     | ---- desired range ---- |
    796	 *  | state | or
    797	 *  | ------------- state -------------- |
    798	 *
    799	 * We need to split the extent we found, and may flip
    800	 * bits on second half.
    801	 *
    802	 * If the extent we found extends past our range, we
    803	 * just split and search again.  It'll get split again
    804	 * the next time though.
    805	 *
    806	 * If the extent we found is inside our range, we clear
    807	 * the desired bit on it.
    808	 */
    809
    810	if (state->start < start) {
    811		prealloc = alloc_extent_state_atomic(prealloc);
    812		BUG_ON(!prealloc);
    813		err = split_state(tree, state, prealloc, start);
    814		if (err)
    815			extent_io_tree_panic(tree, err);
    816
    817		prealloc = NULL;
    818		if (err)
    819			goto out;
    820		if (state->end <= end) {
    821			state = clear_state_bit(tree, state, &bits, wake,
    822						changeset);
    823			goto next;
    824		}
    825		goto search_again;
    826	}
    827	/*
    828	 * | ---- desired range ---- |
    829	 *                        | state |
    830	 * We need to split the extent, and clear the bit
    831	 * on the first half
    832	 */
    833	if (state->start <= end && state->end > end) {
    834		prealloc = alloc_extent_state_atomic(prealloc);
    835		BUG_ON(!prealloc);
    836		err = split_state(tree, state, prealloc, end + 1);
    837		if (err)
    838			extent_io_tree_panic(tree, err);
    839
    840		if (wake)
    841			wake_up(&state->wq);
    842
    843		clear_state_bit(tree, prealloc, &bits, wake, changeset);
    844
    845		prealloc = NULL;
    846		goto out;
    847	}
    848
    849	state = clear_state_bit(tree, state, &bits, wake, changeset);
    850next:
    851	if (last_end == (u64)-1)
    852		goto out;
    853	start = last_end + 1;
    854	if (start <= end && state && !need_resched())
    855		goto hit_next;
    856
    857search_again:
    858	if (start > end)
    859		goto out;
    860	spin_unlock(&tree->lock);
    861	if (gfpflags_allow_blocking(mask))
    862		cond_resched();
    863	goto again;
    864
    865out:
    866	spin_unlock(&tree->lock);
    867	if (prealloc)
    868		free_extent_state(prealloc);
    869
    870	return 0;
    871
    872}
    873
    874static void wait_on_state(struct extent_io_tree *tree,
    875			  struct extent_state *state)
    876		__releases(tree->lock)
    877		__acquires(tree->lock)
    878{
    879	DEFINE_WAIT(wait);
    880	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
    881	spin_unlock(&tree->lock);
    882	schedule();
    883	spin_lock(&tree->lock);
    884	finish_wait(&state->wq, &wait);
    885}
    886
    887/*
    888 * waits for one or more bits to clear on a range in the state tree.
    889 * The range [start, end] is inclusive.
    890 * The tree lock is taken by this function
    891 */
    892static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
    893			    u32 bits)
    894{
    895	struct extent_state *state;
    896	struct rb_node *node;
    897
    898	btrfs_debug_check_extent_io_range(tree, start, end);
    899
    900	spin_lock(&tree->lock);
    901again:
    902	while (1) {
    903		/*
    904		 * this search will find all the extents that end after
    905		 * our range starts
    906		 */
    907		node = tree_search(tree, start);
    908process_node:
    909		if (!node)
    910			break;
    911
    912		state = rb_entry(node, struct extent_state, rb_node);
    913
    914		if (state->start > end)
    915			goto out;
    916
    917		if (state->state & bits) {
    918			start = state->start;
    919			refcount_inc(&state->refs);
    920			wait_on_state(tree, state);
    921			free_extent_state(state);
    922			goto again;
    923		}
    924		start = state->end + 1;
    925
    926		if (start > end)
    927			break;
    928
    929		if (!cond_resched_lock(&tree->lock)) {
    930			node = rb_next(node);
    931			goto process_node;
    932		}
    933	}
    934out:
    935	spin_unlock(&tree->lock);
    936}
    937
    938static void set_state_bits(struct extent_io_tree *tree,
    939			   struct extent_state *state,
    940			   u32 *bits, struct extent_changeset *changeset)
    941{
    942	u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
    943	int ret;
    944
    945	if (tree->private_data && is_data_inode(tree->private_data))
    946		btrfs_set_delalloc_extent(tree->private_data, state, bits);
    947
    948	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
    949		u64 range = state->end - state->start + 1;
    950		tree->dirty_bytes += range;
    951	}
    952	ret = add_extent_changeset(state, bits_to_set, changeset, 1);
    953	BUG_ON(ret < 0);
    954	state->state |= bits_to_set;
    955}
    956
    957static void cache_state_if_flags(struct extent_state *state,
    958				 struct extent_state **cached_ptr,
    959				 unsigned flags)
    960{
    961	if (cached_ptr && !(*cached_ptr)) {
    962		if (!flags || (state->state & flags)) {
    963			*cached_ptr = state;
    964			refcount_inc(&state->refs);
    965		}
    966	}
    967}
    968
    969static void cache_state(struct extent_state *state,
    970			struct extent_state **cached_ptr)
    971{
    972	return cache_state_if_flags(state, cached_ptr,
    973				    EXTENT_LOCKED | EXTENT_BOUNDARY);
    974}
    975
    976/*
    977 * set some bits on a range in the tree.  This may require allocations or
    978 * sleeping, so the gfp mask is used to indicate what is allowed.
    979 *
    980 * If any of the exclusive bits are set, this will fail with -EEXIST if some
    981 * part of the range already has the desired bits set.  The start of the
    982 * existing range is returned in failed_start in this case.
    983 *
    984 * [start, end] is inclusive This takes the tree lock.
    985 */
    986int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
    987		   u32 exclusive_bits, u64 *failed_start,
    988		   struct extent_state **cached_state, gfp_t mask,
    989		   struct extent_changeset *changeset)
    990{
    991	struct extent_state *state;
    992	struct extent_state *prealloc = NULL;
    993	struct rb_node *node;
    994	struct rb_node **p;
    995	struct rb_node *parent;
    996	int err = 0;
    997	u64 last_start;
    998	u64 last_end;
    999
   1000	btrfs_debug_check_extent_io_range(tree, start, end);
   1001	trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
   1002
   1003	if (exclusive_bits)
   1004		ASSERT(failed_start);
   1005	else
   1006		ASSERT(failed_start == NULL);
   1007again:
   1008	if (!prealloc && gfpflags_allow_blocking(mask)) {
   1009		/*
   1010		 * Don't care for allocation failure here because we might end
   1011		 * up not needing the pre-allocated extent state at all, which
   1012		 * is the case if we only have in the tree extent states that
   1013		 * cover our input range and don't cover too any other range.
   1014		 * If we end up needing a new extent state we allocate it later.
   1015		 */
   1016		prealloc = alloc_extent_state(mask);
   1017	}
   1018
   1019	spin_lock(&tree->lock);
   1020	if (cached_state && *cached_state) {
   1021		state = *cached_state;
   1022		if (state->start <= start && state->end > start &&
   1023		    extent_state_in_tree(state)) {
   1024			node = &state->rb_node;
   1025			goto hit_next;
   1026		}
   1027	}
   1028	/*
   1029	 * this search will find all the extents that end after
   1030	 * our range starts.
   1031	 */
   1032	node = tree_search_for_insert(tree, start, &p, &parent);
   1033	if (!node) {
   1034		prealloc = alloc_extent_state_atomic(prealloc);
   1035		BUG_ON(!prealloc);
   1036		err = insert_state(tree, prealloc, start, end,
   1037				   &p, &parent, &bits, changeset);
   1038		if (err)
   1039			extent_io_tree_panic(tree, err);
   1040
   1041		cache_state(prealloc, cached_state);
   1042		prealloc = NULL;
   1043		goto out;
   1044	}
   1045	state = rb_entry(node, struct extent_state, rb_node);
   1046hit_next:
   1047	last_start = state->start;
   1048	last_end = state->end;
   1049
   1050	/*
   1051	 * | ---- desired range ---- |
   1052	 * | state |
   1053	 *
   1054	 * Just lock what we found and keep going
   1055	 */
   1056	if (state->start == start && state->end <= end) {
   1057		if (state->state & exclusive_bits) {
   1058			*failed_start = state->start;
   1059			err = -EEXIST;
   1060			goto out;
   1061		}
   1062
   1063		set_state_bits(tree, state, &bits, changeset);
   1064		cache_state(state, cached_state);
   1065		merge_state(tree, state);
   1066		if (last_end == (u64)-1)
   1067			goto out;
   1068		start = last_end + 1;
   1069		state = next_state(state);
   1070		if (start < end && state && state->start == start &&
   1071		    !need_resched())
   1072			goto hit_next;
   1073		goto search_again;
   1074	}
   1075
   1076	/*
   1077	 *     | ---- desired range ---- |
   1078	 * | state |
   1079	 *   or
   1080	 * | ------------- state -------------- |
   1081	 *
   1082	 * We need to split the extent we found, and may flip bits on
   1083	 * second half.
   1084	 *
   1085	 * If the extent we found extends past our
   1086	 * range, we just split and search again.  It'll get split
   1087	 * again the next time though.
   1088	 *
   1089	 * If the extent we found is inside our range, we set the
   1090	 * desired bit on it.
   1091	 */
   1092	if (state->start < start) {
   1093		if (state->state & exclusive_bits) {
   1094			*failed_start = start;
   1095			err = -EEXIST;
   1096			goto out;
   1097		}
   1098
   1099		/*
   1100		 * If this extent already has all the bits we want set, then
   1101		 * skip it, not necessary to split it or do anything with it.
   1102		 */
   1103		if ((state->state & bits) == bits) {
   1104			start = state->end + 1;
   1105			cache_state(state, cached_state);
   1106			goto search_again;
   1107		}
   1108
   1109		prealloc = alloc_extent_state_atomic(prealloc);
   1110		BUG_ON(!prealloc);
   1111		err = split_state(tree, state, prealloc, start);
   1112		if (err)
   1113			extent_io_tree_panic(tree, err);
   1114
   1115		prealloc = NULL;
   1116		if (err)
   1117			goto out;
   1118		if (state->end <= end) {
   1119			set_state_bits(tree, state, &bits, changeset);
   1120			cache_state(state, cached_state);
   1121			merge_state(tree, state);
   1122			if (last_end == (u64)-1)
   1123				goto out;
   1124			start = last_end + 1;
   1125			state = next_state(state);
   1126			if (start < end && state && state->start == start &&
   1127			    !need_resched())
   1128				goto hit_next;
   1129		}
   1130		goto search_again;
   1131	}
   1132	/*
   1133	 * | ---- desired range ---- |
   1134	 *     | state | or               | state |
   1135	 *
   1136	 * There's a hole, we need to insert something in it and
   1137	 * ignore the extent we found.
   1138	 */
   1139	if (state->start > start) {
   1140		u64 this_end;
   1141		if (end < last_start)
   1142			this_end = end;
   1143		else
   1144			this_end = last_start - 1;
   1145
   1146		prealloc = alloc_extent_state_atomic(prealloc);
   1147		BUG_ON(!prealloc);
   1148
   1149		/*
   1150		 * Avoid to free 'prealloc' if it can be merged with
   1151		 * the later extent.
   1152		 */
   1153		err = insert_state(tree, prealloc, start, this_end,
   1154				   NULL, NULL, &bits, changeset);
   1155		if (err)
   1156			extent_io_tree_panic(tree, err);
   1157
   1158		cache_state(prealloc, cached_state);
   1159		prealloc = NULL;
   1160		start = this_end + 1;
   1161		goto search_again;
   1162	}
   1163	/*
   1164	 * | ---- desired range ---- |
   1165	 *                        | state |
   1166	 * We need to split the extent, and set the bit
   1167	 * on the first half
   1168	 */
   1169	if (state->start <= end && state->end > end) {
   1170		if (state->state & exclusive_bits) {
   1171			*failed_start = start;
   1172			err = -EEXIST;
   1173			goto out;
   1174		}
   1175
   1176		prealloc = alloc_extent_state_atomic(prealloc);
   1177		BUG_ON(!prealloc);
   1178		err = split_state(tree, state, prealloc, end + 1);
   1179		if (err)
   1180			extent_io_tree_panic(tree, err);
   1181
   1182		set_state_bits(tree, prealloc, &bits, changeset);
   1183		cache_state(prealloc, cached_state);
   1184		merge_state(tree, prealloc);
   1185		prealloc = NULL;
   1186		goto out;
   1187	}
   1188
   1189search_again:
   1190	if (start > end)
   1191		goto out;
   1192	spin_unlock(&tree->lock);
   1193	if (gfpflags_allow_blocking(mask))
   1194		cond_resched();
   1195	goto again;
   1196
   1197out:
   1198	spin_unlock(&tree->lock);
   1199	if (prealloc)
   1200		free_extent_state(prealloc);
   1201
   1202	return err;
   1203
   1204}
   1205
   1206/**
   1207 * convert_extent_bit - convert all bits in a given range from one bit to
   1208 * 			another
   1209 * @tree:	the io tree to search
   1210 * @start:	the start offset in bytes
   1211 * @end:	the end offset in bytes (inclusive)
   1212 * @bits:	the bits to set in this range
   1213 * @clear_bits:	the bits to clear in this range
   1214 * @cached_state:	state that we're going to cache
   1215 *
   1216 * This will go through and set bits for the given range.  If any states exist
   1217 * already in this range they are set with the given bit and cleared of the
   1218 * clear_bits.  This is only meant to be used by things that are mergeable, ie
   1219 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
   1220 * boundary bits like LOCK.
   1221 *
   1222 * All allocations are done with GFP_NOFS.
   1223 */
   1224int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
   1225		       u32 bits, u32 clear_bits,
   1226		       struct extent_state **cached_state)
   1227{
   1228	struct extent_state *state;
   1229	struct extent_state *prealloc = NULL;
   1230	struct rb_node *node;
   1231	struct rb_node **p;
   1232	struct rb_node *parent;
   1233	int err = 0;
   1234	u64 last_start;
   1235	u64 last_end;
   1236	bool first_iteration = true;
   1237
   1238	btrfs_debug_check_extent_io_range(tree, start, end);
   1239	trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
   1240				       clear_bits);
   1241
   1242again:
   1243	if (!prealloc) {
   1244		/*
   1245		 * Best effort, don't worry if extent state allocation fails
   1246		 * here for the first iteration. We might have a cached state
   1247		 * that matches exactly the target range, in which case no
   1248		 * extent state allocations are needed. We'll only know this
   1249		 * after locking the tree.
   1250		 */
   1251		prealloc = alloc_extent_state(GFP_NOFS);
   1252		if (!prealloc && !first_iteration)
   1253			return -ENOMEM;
   1254	}
   1255
   1256	spin_lock(&tree->lock);
   1257	if (cached_state && *cached_state) {
   1258		state = *cached_state;
   1259		if (state->start <= start && state->end > start &&
   1260		    extent_state_in_tree(state)) {
   1261			node = &state->rb_node;
   1262			goto hit_next;
   1263		}
   1264	}
   1265
   1266	/*
   1267	 * this search will find all the extents that end after
   1268	 * our range starts.
   1269	 */
   1270	node = tree_search_for_insert(tree, start, &p, &parent);
   1271	if (!node) {
   1272		prealloc = alloc_extent_state_atomic(prealloc);
   1273		if (!prealloc) {
   1274			err = -ENOMEM;
   1275			goto out;
   1276		}
   1277		err = insert_state(tree, prealloc, start, end,
   1278				   &p, &parent, &bits, NULL);
   1279		if (err)
   1280			extent_io_tree_panic(tree, err);
   1281		cache_state(prealloc, cached_state);
   1282		prealloc = NULL;
   1283		goto out;
   1284	}
   1285	state = rb_entry(node, struct extent_state, rb_node);
   1286hit_next:
   1287	last_start = state->start;
   1288	last_end = state->end;
   1289
   1290	/*
   1291	 * | ---- desired range ---- |
   1292	 * | state |
   1293	 *
   1294	 * Just lock what we found and keep going
   1295	 */
   1296	if (state->start == start && state->end <= end) {
   1297		set_state_bits(tree, state, &bits, NULL);
   1298		cache_state(state, cached_state);
   1299		state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
   1300		if (last_end == (u64)-1)
   1301			goto out;
   1302		start = last_end + 1;
   1303		if (start < end && state && state->start == start &&
   1304		    !need_resched())
   1305			goto hit_next;
   1306		goto search_again;
   1307	}
   1308
   1309	/*
   1310	 *     | ---- desired range ---- |
   1311	 * | state |
   1312	 *   or
   1313	 * | ------------- state -------------- |
   1314	 *
   1315	 * We need to split the extent we found, and may flip bits on
   1316	 * second half.
   1317	 *
   1318	 * If the extent we found extends past our
   1319	 * range, we just split and search again.  It'll get split
   1320	 * again the next time though.
   1321	 *
   1322	 * If the extent we found is inside our range, we set the
   1323	 * desired bit on it.
   1324	 */
   1325	if (state->start < start) {
   1326		prealloc = alloc_extent_state_atomic(prealloc);
   1327		if (!prealloc) {
   1328			err = -ENOMEM;
   1329			goto out;
   1330		}
   1331		err = split_state(tree, state, prealloc, start);
   1332		if (err)
   1333			extent_io_tree_panic(tree, err);
   1334		prealloc = NULL;
   1335		if (err)
   1336			goto out;
   1337		if (state->end <= end) {
   1338			set_state_bits(tree, state, &bits, NULL);
   1339			cache_state(state, cached_state);
   1340			state = clear_state_bit(tree, state, &clear_bits, 0,
   1341						NULL);
   1342			if (last_end == (u64)-1)
   1343				goto out;
   1344			start = last_end + 1;
   1345			if (start < end && state && state->start == start &&
   1346			    !need_resched())
   1347				goto hit_next;
   1348		}
   1349		goto search_again;
   1350	}
   1351	/*
   1352	 * | ---- desired range ---- |
   1353	 *     | state | or               | state |
   1354	 *
   1355	 * There's a hole, we need to insert something in it and
   1356	 * ignore the extent we found.
   1357	 */
   1358	if (state->start > start) {
   1359		u64 this_end;
   1360		if (end < last_start)
   1361			this_end = end;
   1362		else
   1363			this_end = last_start - 1;
   1364
   1365		prealloc = alloc_extent_state_atomic(prealloc);
   1366		if (!prealloc) {
   1367			err = -ENOMEM;
   1368			goto out;
   1369		}
   1370
   1371		/*
   1372		 * Avoid to free 'prealloc' if it can be merged with
   1373		 * the later extent.
   1374		 */
   1375		err = insert_state(tree, prealloc, start, this_end,
   1376				   NULL, NULL, &bits, NULL);
   1377		if (err)
   1378			extent_io_tree_panic(tree, err);
   1379		cache_state(prealloc, cached_state);
   1380		prealloc = NULL;
   1381		start = this_end + 1;
   1382		goto search_again;
   1383	}
   1384	/*
   1385	 * | ---- desired range ---- |
   1386	 *                        | state |
   1387	 * We need to split the extent, and set the bit
   1388	 * on the first half
   1389	 */
   1390	if (state->start <= end && state->end > end) {
   1391		prealloc = alloc_extent_state_atomic(prealloc);
   1392		if (!prealloc) {
   1393			err = -ENOMEM;
   1394			goto out;
   1395		}
   1396
   1397		err = split_state(tree, state, prealloc, end + 1);
   1398		if (err)
   1399			extent_io_tree_panic(tree, err);
   1400
   1401		set_state_bits(tree, prealloc, &bits, NULL);
   1402		cache_state(prealloc, cached_state);
   1403		clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
   1404		prealloc = NULL;
   1405		goto out;
   1406	}
   1407
   1408search_again:
   1409	if (start > end)
   1410		goto out;
   1411	spin_unlock(&tree->lock);
   1412	cond_resched();
   1413	first_iteration = false;
   1414	goto again;
   1415
   1416out:
   1417	spin_unlock(&tree->lock);
   1418	if (prealloc)
   1419		free_extent_state(prealloc);
   1420
   1421	return err;
   1422}
   1423
   1424/* wrappers around set/clear extent bit */
   1425int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
   1426			   u32 bits, struct extent_changeset *changeset)
   1427{
   1428	/*
   1429	 * We don't support EXTENT_LOCKED yet, as current changeset will
   1430	 * record any bits changed, so for EXTENT_LOCKED case, it will
   1431	 * either fail with -EEXIST or changeset will record the whole
   1432	 * range.
   1433	 */
   1434	BUG_ON(bits & EXTENT_LOCKED);
   1435
   1436	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
   1437			      changeset);
   1438}
   1439
   1440int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
   1441			   u32 bits)
   1442{
   1443	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
   1444			      GFP_NOWAIT, NULL);
   1445}
   1446
   1447int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
   1448		     u32 bits, int wake, int delete,
   1449		     struct extent_state **cached)
   1450{
   1451	return __clear_extent_bit(tree, start, end, bits, wake, delete,
   1452				  cached, GFP_NOFS, NULL);
   1453}
   1454
   1455int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
   1456		u32 bits, struct extent_changeset *changeset)
   1457{
   1458	/*
   1459	 * Don't support EXTENT_LOCKED case, same reason as
   1460	 * set_record_extent_bits().
   1461	 */
   1462	BUG_ON(bits & EXTENT_LOCKED);
   1463
   1464	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
   1465				  changeset);
   1466}
   1467
   1468/*
   1469 * either insert or lock state struct between start and end use mask to tell
   1470 * us if waiting is desired.
   1471 */
   1472int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
   1473		     struct extent_state **cached_state)
   1474{
   1475	int err;
   1476	u64 failed_start;
   1477
   1478	while (1) {
   1479		err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
   1480				     EXTENT_LOCKED, &failed_start,
   1481				     cached_state, GFP_NOFS, NULL);
   1482		if (err == -EEXIST) {
   1483			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
   1484			start = failed_start;
   1485		} else
   1486			break;
   1487		WARN_ON(start > end);
   1488	}
   1489	return err;
   1490}
   1491
   1492int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
   1493{
   1494	int err;
   1495	u64 failed_start;
   1496
   1497	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
   1498			     &failed_start, NULL, GFP_NOFS, NULL);
   1499	if (err == -EEXIST) {
   1500		if (failed_start > start)
   1501			clear_extent_bit(tree, start, failed_start - 1,
   1502					 EXTENT_LOCKED, 1, 0, NULL);
   1503		return 0;
   1504	}
   1505	return 1;
   1506}
   1507
   1508void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
   1509{
   1510	unsigned long index = start >> PAGE_SHIFT;
   1511	unsigned long end_index = end >> PAGE_SHIFT;
   1512	struct page *page;
   1513
   1514	while (index <= end_index) {
   1515		page = find_get_page(inode->i_mapping, index);
   1516		BUG_ON(!page); /* Pages should be in the extent_io_tree */
   1517		clear_page_dirty_for_io(page);
   1518		put_page(page);
   1519		index++;
   1520	}
   1521}
   1522
   1523void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
   1524{
   1525	struct address_space *mapping = inode->i_mapping;
   1526	unsigned long index = start >> PAGE_SHIFT;
   1527	unsigned long end_index = end >> PAGE_SHIFT;
   1528	struct folio *folio;
   1529
   1530	while (index <= end_index) {
   1531		folio = filemap_get_folio(mapping, index);
   1532		filemap_dirty_folio(mapping, folio);
   1533		folio_account_redirty(folio);
   1534		index += folio_nr_pages(folio);
   1535		folio_put(folio);
   1536	}
   1537}
   1538
   1539/* find the first state struct with 'bits' set after 'start', and
   1540 * return it.  tree->lock must be held.  NULL will returned if
   1541 * nothing was found after 'start'
   1542 */
   1543static struct extent_state *
   1544find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
   1545{
   1546	struct rb_node *node;
   1547	struct extent_state *state;
   1548
   1549	/*
   1550	 * this search will find all the extents that end after
   1551	 * our range starts.
   1552	 */
   1553	node = tree_search(tree, start);
   1554	if (!node)
   1555		goto out;
   1556
   1557	while (1) {
   1558		state = rb_entry(node, struct extent_state, rb_node);
   1559		if (state->end >= start && (state->state & bits))
   1560			return state;
   1561
   1562		node = rb_next(node);
   1563		if (!node)
   1564			break;
   1565	}
   1566out:
   1567	return NULL;
   1568}
   1569
   1570/*
   1571 * Find the first offset in the io tree with one or more @bits set.
   1572 *
   1573 * Note: If there are multiple bits set in @bits, any of them will match.
   1574 *
   1575 * Return 0 if we find something, and update @start_ret and @end_ret.
   1576 * Return 1 if we found nothing.
   1577 */
   1578int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
   1579			  u64 *start_ret, u64 *end_ret, u32 bits,
   1580			  struct extent_state **cached_state)
   1581{
   1582	struct extent_state *state;
   1583	int ret = 1;
   1584
   1585	spin_lock(&tree->lock);
   1586	if (cached_state && *cached_state) {
   1587		state = *cached_state;
   1588		if (state->end == start - 1 && extent_state_in_tree(state)) {
   1589			while ((state = next_state(state)) != NULL) {
   1590				if (state->state & bits)
   1591					goto got_it;
   1592			}
   1593			free_extent_state(*cached_state);
   1594			*cached_state = NULL;
   1595			goto out;
   1596		}
   1597		free_extent_state(*cached_state);
   1598		*cached_state = NULL;
   1599	}
   1600
   1601	state = find_first_extent_bit_state(tree, start, bits);
   1602got_it:
   1603	if (state) {
   1604		cache_state_if_flags(state, cached_state, 0);
   1605		*start_ret = state->start;
   1606		*end_ret = state->end;
   1607		ret = 0;
   1608	}
   1609out:
   1610	spin_unlock(&tree->lock);
   1611	return ret;
   1612}
   1613
   1614/**
   1615 * Find a contiguous area of bits
   1616 *
   1617 * @tree:      io tree to check
   1618 * @start:     offset to start the search from
   1619 * @start_ret: the first offset we found with the bits set
   1620 * @end_ret:   the final contiguous range of the bits that were set
   1621 * @bits:      bits to look for
   1622 *
   1623 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
   1624 * to set bits appropriately, and then merge them again.  During this time it
   1625 * will drop the tree->lock, so use this helper if you want to find the actual
   1626 * contiguous area for given bits.  We will search to the first bit we find, and
   1627 * then walk down the tree until we find a non-contiguous area.  The area
   1628 * returned will be the full contiguous area with the bits set.
   1629 */
   1630int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
   1631			       u64 *start_ret, u64 *end_ret, u32 bits)
   1632{
   1633	struct extent_state *state;
   1634	int ret = 1;
   1635
   1636	spin_lock(&tree->lock);
   1637	state = find_first_extent_bit_state(tree, start, bits);
   1638	if (state) {
   1639		*start_ret = state->start;
   1640		*end_ret = state->end;
   1641		while ((state = next_state(state)) != NULL) {
   1642			if (state->start > (*end_ret + 1))
   1643				break;
   1644			*end_ret = state->end;
   1645		}
   1646		ret = 0;
   1647	}
   1648	spin_unlock(&tree->lock);
   1649	return ret;
   1650}
   1651
   1652/**
   1653 * Find the first range that has @bits not set. This range could start before
   1654 * @start.
   1655 *
   1656 * @tree:      the tree to search
   1657 * @start:     offset at/after which the found extent should start
   1658 * @start_ret: records the beginning of the range
   1659 * @end_ret:   records the end of the range (inclusive)
   1660 * @bits:      the set of bits which must be unset
   1661 *
   1662 * Since unallocated range is also considered one which doesn't have the bits
   1663 * set it's possible that @end_ret contains -1, this happens in case the range
   1664 * spans (last_range_end, end of device]. In this case it's up to the caller to
   1665 * trim @end_ret to the appropriate size.
   1666 */
   1667void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
   1668				 u64 *start_ret, u64 *end_ret, u32 bits)
   1669{
   1670	struct extent_state *state;
   1671	struct rb_node *node, *prev = NULL, *next;
   1672
   1673	spin_lock(&tree->lock);
   1674
   1675	/* Find first extent with bits cleared */
   1676	while (1) {
   1677		node = __etree_search(tree, start, &next, &prev, NULL, NULL);
   1678		if (!node && !next && !prev) {
   1679			/*
   1680			 * Tree is completely empty, send full range and let
   1681			 * caller deal with it
   1682			 */
   1683			*start_ret = 0;
   1684			*end_ret = -1;
   1685			goto out;
   1686		} else if (!node && !next) {
   1687			/*
   1688			 * We are past the last allocated chunk, set start at
   1689			 * the end of the last extent.
   1690			 */
   1691			state = rb_entry(prev, struct extent_state, rb_node);
   1692			*start_ret = state->end + 1;
   1693			*end_ret = -1;
   1694			goto out;
   1695		} else if (!node) {
   1696			node = next;
   1697		}
   1698		/*
   1699		 * At this point 'node' either contains 'start' or start is
   1700		 * before 'node'
   1701		 */
   1702		state = rb_entry(node, struct extent_state, rb_node);
   1703
   1704		if (in_range(start, state->start, state->end - state->start + 1)) {
   1705			if (state->state & bits) {
   1706				/*
   1707				 * |--range with bits sets--|
   1708				 *    |
   1709				 *    start
   1710				 */
   1711				start = state->end + 1;
   1712			} else {
   1713				/*
   1714				 * 'start' falls within a range that doesn't
   1715				 * have the bits set, so take its start as
   1716				 * the beginning of the desired range
   1717				 *
   1718				 * |--range with bits cleared----|
   1719				 *      |
   1720				 *      start
   1721				 */
   1722				*start_ret = state->start;
   1723				break;
   1724			}
   1725		} else {
   1726			/*
   1727			 * |---prev range---|---hole/unset---|---node range---|
   1728			 *                          |
   1729			 *                        start
   1730			 *
   1731			 *                        or
   1732			 *
   1733			 * |---hole/unset--||--first node--|
   1734			 * 0   |
   1735			 *    start
   1736			 */
   1737			if (prev) {
   1738				state = rb_entry(prev, struct extent_state,
   1739						 rb_node);
   1740				*start_ret = state->end + 1;
   1741			} else {
   1742				*start_ret = 0;
   1743			}
   1744			break;
   1745		}
   1746	}
   1747
   1748	/*
   1749	 * Find the longest stretch from start until an entry which has the
   1750	 * bits set
   1751	 */
   1752	while (1) {
   1753		state = rb_entry(node, struct extent_state, rb_node);
   1754		if (state->end >= start && !(state->state & bits)) {
   1755			*end_ret = state->end;
   1756		} else {
   1757			*end_ret = state->start - 1;
   1758			break;
   1759		}
   1760
   1761		node = rb_next(node);
   1762		if (!node)
   1763			break;
   1764	}
   1765out:
   1766	spin_unlock(&tree->lock);
   1767}
   1768
   1769/*
   1770 * find a contiguous range of bytes in the file marked as delalloc, not
   1771 * more than 'max_bytes'.  start and end are used to return the range,
   1772 *
   1773 * true is returned if we find something, false if nothing was in the tree
   1774 */
   1775bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
   1776			       u64 *end, u64 max_bytes,
   1777			       struct extent_state **cached_state)
   1778{
   1779	struct rb_node *node;
   1780	struct extent_state *state;
   1781	u64 cur_start = *start;
   1782	bool found = false;
   1783	u64 total_bytes = 0;
   1784
   1785	spin_lock(&tree->lock);
   1786
   1787	/*
   1788	 * this search will find all the extents that end after
   1789	 * our range starts.
   1790	 */
   1791	node = tree_search(tree, cur_start);
   1792	if (!node) {
   1793		*end = (u64)-1;
   1794		goto out;
   1795	}
   1796
   1797	while (1) {
   1798		state = rb_entry(node, struct extent_state, rb_node);
   1799		if (found && (state->start != cur_start ||
   1800			      (state->state & EXTENT_BOUNDARY))) {
   1801			goto out;
   1802		}
   1803		if (!(state->state & EXTENT_DELALLOC)) {
   1804			if (!found)
   1805				*end = state->end;
   1806			goto out;
   1807		}
   1808		if (!found) {
   1809			*start = state->start;
   1810			*cached_state = state;
   1811			refcount_inc(&state->refs);
   1812		}
   1813		found = true;
   1814		*end = state->end;
   1815		cur_start = state->end + 1;
   1816		node = rb_next(node);
   1817		total_bytes += state->end - state->start + 1;
   1818		if (total_bytes >= max_bytes)
   1819			break;
   1820		if (!node)
   1821			break;
   1822	}
   1823out:
   1824	spin_unlock(&tree->lock);
   1825	return found;
   1826}
   1827
   1828/*
   1829 * Process one page for __process_pages_contig().
   1830 *
   1831 * Return >0 if we hit @page == @locked_page.
   1832 * Return 0 if we updated the page status.
   1833 * Return -EGAIN if the we need to try again.
   1834 * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
   1835 */
   1836static int process_one_page(struct btrfs_fs_info *fs_info,
   1837			    struct address_space *mapping,
   1838			    struct page *page, struct page *locked_page,
   1839			    unsigned long page_ops, u64 start, u64 end)
   1840{
   1841	u32 len;
   1842
   1843	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
   1844	len = end + 1 - start;
   1845
   1846	if (page_ops & PAGE_SET_ORDERED)
   1847		btrfs_page_clamp_set_ordered(fs_info, page, start, len);
   1848	if (page_ops & PAGE_SET_ERROR)
   1849		btrfs_page_clamp_set_error(fs_info, page, start, len);
   1850	if (page_ops & PAGE_START_WRITEBACK) {
   1851		btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
   1852		btrfs_page_clamp_set_writeback(fs_info, page, start, len);
   1853	}
   1854	if (page_ops & PAGE_END_WRITEBACK)
   1855		btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
   1856
   1857	if (page == locked_page)
   1858		return 1;
   1859
   1860	if (page_ops & PAGE_LOCK) {
   1861		int ret;
   1862
   1863		ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
   1864		if (ret)
   1865			return ret;
   1866		if (!PageDirty(page) || page->mapping != mapping) {
   1867			btrfs_page_end_writer_lock(fs_info, page, start, len);
   1868			return -EAGAIN;
   1869		}
   1870	}
   1871	if (page_ops & PAGE_UNLOCK)
   1872		btrfs_page_end_writer_lock(fs_info, page, start, len);
   1873	return 0;
   1874}
   1875
   1876static int __process_pages_contig(struct address_space *mapping,
   1877				  struct page *locked_page,
   1878				  u64 start, u64 end, unsigned long page_ops,
   1879				  u64 *processed_end)
   1880{
   1881	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
   1882	pgoff_t start_index = start >> PAGE_SHIFT;
   1883	pgoff_t end_index = end >> PAGE_SHIFT;
   1884	pgoff_t index = start_index;
   1885	unsigned long nr_pages = end_index - start_index + 1;
   1886	unsigned long pages_processed = 0;
   1887	struct page *pages[16];
   1888	int err = 0;
   1889	int i;
   1890
   1891	if (page_ops & PAGE_LOCK) {
   1892		ASSERT(page_ops == PAGE_LOCK);
   1893		ASSERT(processed_end && *processed_end == start);
   1894	}
   1895
   1896	if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
   1897		mapping_set_error(mapping, -EIO);
   1898
   1899	while (nr_pages > 0) {
   1900		int found_pages;
   1901
   1902		found_pages = find_get_pages_contig(mapping, index,
   1903				     min_t(unsigned long,
   1904				     nr_pages, ARRAY_SIZE(pages)), pages);
   1905		if (found_pages == 0) {
   1906			/*
   1907			 * Only if we're going to lock these pages, we can find
   1908			 * nothing at @index.
   1909			 */
   1910			ASSERT(page_ops & PAGE_LOCK);
   1911			err = -EAGAIN;
   1912			goto out;
   1913		}
   1914
   1915		for (i = 0; i < found_pages; i++) {
   1916			int process_ret;
   1917
   1918			process_ret = process_one_page(fs_info, mapping,
   1919					pages[i], locked_page, page_ops,
   1920					start, end);
   1921			if (process_ret < 0) {
   1922				for (; i < found_pages; i++)
   1923					put_page(pages[i]);
   1924				err = -EAGAIN;
   1925				goto out;
   1926			}
   1927			put_page(pages[i]);
   1928			pages_processed++;
   1929		}
   1930		nr_pages -= found_pages;
   1931		index += found_pages;
   1932		cond_resched();
   1933	}
   1934out:
   1935	if (err && processed_end) {
   1936		/*
   1937		 * Update @processed_end. I know this is awful since it has
   1938		 * two different return value patterns (inclusive vs exclusive).
   1939		 *
   1940		 * But the exclusive pattern is necessary if @start is 0, or we
   1941		 * underflow and check against processed_end won't work as
   1942		 * expected.
   1943		 */
   1944		if (pages_processed)
   1945			*processed_end = min(end,
   1946			((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
   1947		else
   1948			*processed_end = start;
   1949	}
   1950	return err;
   1951}
   1952
   1953static noinline void __unlock_for_delalloc(struct inode *inode,
   1954					   struct page *locked_page,
   1955					   u64 start, u64 end)
   1956{
   1957	unsigned long index = start >> PAGE_SHIFT;
   1958	unsigned long end_index = end >> PAGE_SHIFT;
   1959
   1960	ASSERT(locked_page);
   1961	if (index == locked_page->index && end_index == index)
   1962		return;
   1963
   1964	__process_pages_contig(inode->i_mapping, locked_page, start, end,
   1965			       PAGE_UNLOCK, NULL);
   1966}
   1967
   1968static noinline int lock_delalloc_pages(struct inode *inode,
   1969					struct page *locked_page,
   1970					u64 delalloc_start,
   1971					u64 delalloc_end)
   1972{
   1973	unsigned long index = delalloc_start >> PAGE_SHIFT;
   1974	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
   1975	u64 processed_end = delalloc_start;
   1976	int ret;
   1977
   1978	ASSERT(locked_page);
   1979	if (index == locked_page->index && index == end_index)
   1980		return 0;
   1981
   1982	ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
   1983				     delalloc_end, PAGE_LOCK, &processed_end);
   1984	if (ret == -EAGAIN && processed_end > delalloc_start)
   1985		__unlock_for_delalloc(inode, locked_page, delalloc_start,
   1986				      processed_end);
   1987	return ret;
   1988}
   1989
   1990/*
   1991 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
   1992 * more than @max_bytes.
   1993 *
   1994 * @start:	The original start bytenr to search.
   1995 *		Will store the extent range start bytenr.
   1996 * @end:	The original end bytenr of the search range
   1997 *		Will store the extent range end bytenr.
   1998 *
   1999 * Return true if we find a delalloc range which starts inside the original
   2000 * range, and @start/@end will store the delalloc range start/end.
   2001 *
   2002 * Return false if we can't find any delalloc range which starts inside the
   2003 * original range, and @start/@end will be the non-delalloc range start/end.
   2004 */
   2005EXPORT_FOR_TESTS
   2006noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
   2007				    struct page *locked_page, u64 *start,
   2008				    u64 *end)
   2009{
   2010	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
   2011	const u64 orig_start = *start;
   2012	const u64 orig_end = *end;
   2013	u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
   2014	u64 delalloc_start;
   2015	u64 delalloc_end;
   2016	bool found;
   2017	struct extent_state *cached_state = NULL;
   2018	int ret;
   2019	int loops = 0;
   2020
   2021	/* Caller should pass a valid @end to indicate the search range end */
   2022	ASSERT(orig_end > orig_start);
   2023
   2024	/* The range should at least cover part of the page */
   2025	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
   2026		 orig_end <= page_offset(locked_page)));
   2027again:
   2028	/* step one, find a bunch of delalloc bytes starting at start */
   2029	delalloc_start = *start;
   2030	delalloc_end = 0;
   2031	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
   2032					  max_bytes, &cached_state);
   2033	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
   2034		*start = delalloc_start;
   2035
   2036		/* @delalloc_end can be -1, never go beyond @orig_end */
   2037		*end = min(delalloc_end, orig_end);
   2038		free_extent_state(cached_state);
   2039		return false;
   2040	}
   2041
   2042	/*
   2043	 * start comes from the offset of locked_page.  We have to lock
   2044	 * pages in order, so we can't process delalloc bytes before
   2045	 * locked_page
   2046	 */
   2047	if (delalloc_start < *start)
   2048		delalloc_start = *start;
   2049
   2050	/*
   2051	 * make sure to limit the number of pages we try to lock down
   2052	 */
   2053	if (delalloc_end + 1 - delalloc_start > max_bytes)
   2054		delalloc_end = delalloc_start + max_bytes - 1;
   2055
   2056	/* step two, lock all the pages after the page that has start */
   2057	ret = lock_delalloc_pages(inode, locked_page,
   2058				  delalloc_start, delalloc_end);
   2059	ASSERT(!ret || ret == -EAGAIN);
   2060	if (ret == -EAGAIN) {
   2061		/* some of the pages are gone, lets avoid looping by
   2062		 * shortening the size of the delalloc range we're searching
   2063		 */
   2064		free_extent_state(cached_state);
   2065		cached_state = NULL;
   2066		if (!loops) {
   2067			max_bytes = PAGE_SIZE;
   2068			loops = 1;
   2069			goto again;
   2070		} else {
   2071			found = false;
   2072			goto out_failed;
   2073		}
   2074	}
   2075
   2076	/* step three, lock the state bits for the whole range */
   2077	lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
   2078
   2079	/* then test to make sure it is all still delalloc */
   2080	ret = test_range_bit(tree, delalloc_start, delalloc_end,
   2081			     EXTENT_DELALLOC, 1, cached_state);
   2082	if (!ret) {
   2083		unlock_extent_cached(tree, delalloc_start, delalloc_end,
   2084				     &cached_state);
   2085		__unlock_for_delalloc(inode, locked_page,
   2086			      delalloc_start, delalloc_end);
   2087		cond_resched();
   2088		goto again;
   2089	}
   2090	free_extent_state(cached_state);
   2091	*start = delalloc_start;
   2092	*end = delalloc_end;
   2093out_failed:
   2094	return found;
   2095}
   2096
   2097void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
   2098				  struct page *locked_page,
   2099				  u32 clear_bits, unsigned long page_ops)
   2100{
   2101	clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
   2102
   2103	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
   2104			       start, end, page_ops, NULL);
   2105}
   2106
   2107/*
   2108 * count the number of bytes in the tree that have a given bit(s)
   2109 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
   2110 * cached.  The total number found is returned.
   2111 */
   2112u64 count_range_bits(struct extent_io_tree *tree,
   2113		     u64 *start, u64 search_end, u64 max_bytes,
   2114		     u32 bits, int contig)
   2115{
   2116	struct rb_node *node;
   2117	struct extent_state *state;
   2118	u64 cur_start = *start;
   2119	u64 total_bytes = 0;
   2120	u64 last = 0;
   2121	int found = 0;
   2122
   2123	if (WARN_ON(search_end <= cur_start))
   2124		return 0;
   2125
   2126	spin_lock(&tree->lock);
   2127	if (cur_start == 0 && bits == EXTENT_DIRTY) {
   2128		total_bytes = tree->dirty_bytes;
   2129		goto out;
   2130	}
   2131	/*
   2132	 * this search will find all the extents that end after
   2133	 * our range starts.
   2134	 */
   2135	node = tree_search(tree, cur_start);
   2136	if (!node)
   2137		goto out;
   2138
   2139	while (1) {
   2140		state = rb_entry(node, struct extent_state, rb_node);
   2141		if (state->start > search_end)
   2142			break;
   2143		if (contig && found && state->start > last + 1)
   2144			break;
   2145		if (state->end >= cur_start && (state->state & bits) == bits) {
   2146			total_bytes += min(search_end, state->end) + 1 -
   2147				       max(cur_start, state->start);
   2148			if (total_bytes >= max_bytes)
   2149				break;
   2150			if (!found) {
   2151				*start = max(cur_start, state->start);
   2152				found = 1;
   2153			}
   2154			last = state->end;
   2155		} else if (contig && found) {
   2156			break;
   2157		}
   2158		node = rb_next(node);
   2159		if (!node)
   2160			break;
   2161	}
   2162out:
   2163	spin_unlock(&tree->lock);
   2164	return total_bytes;
   2165}
   2166
   2167/*
   2168 * set the private field for a given byte offset in the tree.  If there isn't
   2169 * an extent_state there already, this does nothing.
   2170 */
   2171int set_state_failrec(struct extent_io_tree *tree, u64 start,
   2172		      struct io_failure_record *failrec)
   2173{
   2174	struct rb_node *node;
   2175	struct extent_state *state;
   2176	int ret = 0;
   2177
   2178	spin_lock(&tree->lock);
   2179	/*
   2180	 * this search will find all the extents that end after
   2181	 * our range starts.
   2182	 */
   2183	node = tree_search(tree, start);
   2184	if (!node) {
   2185		ret = -ENOENT;
   2186		goto out;
   2187	}
   2188	state = rb_entry(node, struct extent_state, rb_node);
   2189	if (state->start != start) {
   2190		ret = -ENOENT;
   2191		goto out;
   2192	}
   2193	state->failrec = failrec;
   2194out:
   2195	spin_unlock(&tree->lock);
   2196	return ret;
   2197}
   2198
   2199struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
   2200{
   2201	struct rb_node *node;
   2202	struct extent_state *state;
   2203	struct io_failure_record *failrec;
   2204
   2205	spin_lock(&tree->lock);
   2206	/*
   2207	 * this search will find all the extents that end after
   2208	 * our range starts.
   2209	 */
   2210	node = tree_search(tree, start);
   2211	if (!node) {
   2212		failrec = ERR_PTR(-ENOENT);
   2213		goto out;
   2214	}
   2215	state = rb_entry(node, struct extent_state, rb_node);
   2216	if (state->start != start) {
   2217		failrec = ERR_PTR(-ENOENT);
   2218		goto out;
   2219	}
   2220
   2221	failrec = state->failrec;
   2222out:
   2223	spin_unlock(&tree->lock);
   2224	return failrec;
   2225}
   2226
   2227/*
   2228 * searches a range in the state tree for a given mask.
   2229 * If 'filled' == 1, this returns 1 only if every extent in the tree
   2230 * has the bits set.  Otherwise, 1 is returned if any bit in the
   2231 * range is found set.
   2232 */
   2233int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
   2234		   u32 bits, int filled, struct extent_state *cached)
   2235{
   2236	struct extent_state *state = NULL;
   2237	struct rb_node *node;
   2238	int bitset = 0;
   2239
   2240	spin_lock(&tree->lock);
   2241	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
   2242	    cached->end > start)
   2243		node = &cached->rb_node;
   2244	else
   2245		node = tree_search(tree, start);
   2246	while (node && start <= end) {
   2247		state = rb_entry(node, struct extent_state, rb_node);
   2248
   2249		if (filled && state->start > start) {
   2250			bitset = 0;
   2251			break;
   2252		}
   2253
   2254		if (state->start > end)
   2255			break;
   2256
   2257		if (state->state & bits) {
   2258			bitset = 1;
   2259			if (!filled)
   2260				break;
   2261		} else if (filled) {
   2262			bitset = 0;
   2263			break;
   2264		}
   2265
   2266		if (state->end == (u64)-1)
   2267			break;
   2268
   2269		start = state->end + 1;
   2270		if (start > end)
   2271			break;
   2272		node = rb_next(node);
   2273		if (!node) {
   2274			if (filled)
   2275				bitset = 0;
   2276			break;
   2277		}
   2278	}
   2279	spin_unlock(&tree->lock);
   2280	return bitset;
   2281}
   2282
   2283int free_io_failure(struct extent_io_tree *failure_tree,
   2284		    struct extent_io_tree *io_tree,
   2285		    struct io_failure_record *rec)
   2286{
   2287	int ret;
   2288	int err = 0;
   2289
   2290	set_state_failrec(failure_tree, rec->start, NULL);
   2291	ret = clear_extent_bits(failure_tree, rec->start,
   2292				rec->start + rec->len - 1,
   2293				EXTENT_LOCKED | EXTENT_DIRTY);
   2294	if (ret)
   2295		err = ret;
   2296
   2297	ret = clear_extent_bits(io_tree, rec->start,
   2298				rec->start + rec->len - 1,
   2299				EXTENT_DAMAGED);
   2300	if (ret && !err)
   2301		err = ret;
   2302
   2303	kfree(rec);
   2304	return err;
   2305}
   2306
   2307/*
   2308 * this bypasses the standard btrfs submit functions deliberately, as
   2309 * the standard behavior is to write all copies in a raid setup. here we only
   2310 * want to write the one bad copy. so we do the mapping for ourselves and issue
   2311 * submit_bio directly.
   2312 * to avoid any synchronization issues, wait for the data after writing, which
   2313 * actually prevents the read that triggered the error from finishing.
   2314 * currently, there can be no more than two copies of every data bit. thus,
   2315 * exactly one rewrite is required.
   2316 */
   2317static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
   2318			     u64 length, u64 logical, struct page *page,
   2319			     unsigned int pg_offset, int mirror_num)
   2320{
   2321	struct btrfs_device *dev;
   2322	struct bio_vec bvec;
   2323	struct bio bio;
   2324	u64 map_length = 0;
   2325	u64 sector;
   2326	struct btrfs_io_context *bioc = NULL;
   2327	int ret = 0;
   2328
   2329	ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
   2330	BUG_ON(!mirror_num);
   2331
   2332	if (btrfs_repair_one_zone(fs_info, logical))
   2333		return 0;
   2334
   2335	map_length = length;
   2336
   2337	/*
   2338	 * Avoid races with device replace and make sure our bioc has devices
   2339	 * associated to its stripes that don't go away while we are doing the
   2340	 * read repair operation.
   2341	 */
   2342	btrfs_bio_counter_inc_blocked(fs_info);
   2343	if (btrfs_is_parity_mirror(fs_info, logical, length)) {
   2344		/*
   2345		 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
   2346		 * to update all raid stripes, but here we just want to correct
   2347		 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
   2348		 * stripe's dev and sector.
   2349		 */
   2350		ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
   2351				      &map_length, &bioc, 0);
   2352		if (ret)
   2353			goto out_counter_dec;
   2354		ASSERT(bioc->mirror_num == 1);
   2355	} else {
   2356		ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
   2357				      &map_length, &bioc, mirror_num);
   2358		if (ret)
   2359			goto out_counter_dec;
   2360		BUG_ON(mirror_num != bioc->mirror_num);
   2361	}
   2362
   2363	sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
   2364	dev = bioc->stripes[bioc->mirror_num - 1].dev;
   2365	btrfs_put_bioc(bioc);
   2366
   2367	if (!dev || !dev->bdev ||
   2368	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
   2369		ret = -EIO;
   2370		goto out_counter_dec;
   2371	}
   2372
   2373	bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC);
   2374	bio.bi_iter.bi_sector = sector;
   2375	__bio_add_page(&bio, page, length, pg_offset);
   2376
   2377	btrfsic_check_bio(&bio);
   2378	ret = submit_bio_wait(&bio);
   2379	if (ret) {
   2380		/* try to remap that extent elsewhere? */
   2381		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
   2382		goto out_bio_uninit;
   2383	}
   2384
   2385	btrfs_info_rl_in_rcu(fs_info,
   2386		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
   2387				  ino, start,
   2388				  rcu_str_deref(dev->name), sector);
   2389	ret = 0;
   2390
   2391out_bio_uninit:
   2392	bio_uninit(&bio);
   2393out_counter_dec:
   2394	btrfs_bio_counter_dec(fs_info);
   2395	return ret;
   2396}
   2397
   2398int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
   2399{
   2400	struct btrfs_fs_info *fs_info = eb->fs_info;
   2401	u64 start = eb->start;
   2402	int i, num_pages = num_extent_pages(eb);
   2403	int ret = 0;
   2404
   2405	if (sb_rdonly(fs_info->sb))
   2406		return -EROFS;
   2407
   2408	for (i = 0; i < num_pages; i++) {
   2409		struct page *p = eb->pages[i];
   2410
   2411		ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
   2412					start - page_offset(p), mirror_num);
   2413		if (ret)
   2414			break;
   2415		start += PAGE_SIZE;
   2416	}
   2417
   2418	return ret;
   2419}
   2420
   2421/*
   2422 * each time an IO finishes, we do a fast check in the IO failure tree
   2423 * to see if we need to process or clean up an io_failure_record
   2424 */
   2425int clean_io_failure(struct btrfs_fs_info *fs_info,
   2426		     struct extent_io_tree *failure_tree,
   2427		     struct extent_io_tree *io_tree, u64 start,
   2428		     struct page *page, u64 ino, unsigned int pg_offset)
   2429{
   2430	u64 private;
   2431	struct io_failure_record *failrec;
   2432	struct extent_state *state;
   2433	int num_copies;
   2434	int ret;
   2435
   2436	private = 0;
   2437	ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
   2438			       EXTENT_DIRTY, 0);
   2439	if (!ret)
   2440		return 0;
   2441
   2442	failrec = get_state_failrec(failure_tree, start);
   2443	if (IS_ERR(failrec))
   2444		return 0;
   2445
   2446	BUG_ON(!failrec->this_mirror);
   2447
   2448	if (sb_rdonly(fs_info->sb))
   2449		goto out;
   2450
   2451	spin_lock(&io_tree->lock);
   2452	state = find_first_extent_bit_state(io_tree,
   2453					    failrec->start,
   2454					    EXTENT_LOCKED);
   2455	spin_unlock(&io_tree->lock);
   2456
   2457	if (state && state->start <= failrec->start &&
   2458	    state->end >= failrec->start + failrec->len - 1) {
   2459		num_copies = btrfs_num_copies(fs_info, failrec->logical,
   2460					      failrec->len);
   2461		if (num_copies > 1)  {
   2462			repair_io_failure(fs_info, ino, start, failrec->len,
   2463					  failrec->logical, page, pg_offset,
   2464					  failrec->failed_mirror);
   2465		}
   2466	}
   2467
   2468out:
   2469	free_io_failure(failure_tree, io_tree, failrec);
   2470
   2471	return 0;
   2472}
   2473
   2474/*
   2475 * Can be called when
   2476 * - hold extent lock
   2477 * - under ordered extent
   2478 * - the inode is freeing
   2479 */
   2480void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
   2481{
   2482	struct extent_io_tree *failure_tree = &inode->io_failure_tree;
   2483	struct io_failure_record *failrec;
   2484	struct extent_state *state, *next;
   2485
   2486	if (RB_EMPTY_ROOT(&failure_tree->state))
   2487		return;
   2488
   2489	spin_lock(&failure_tree->lock);
   2490	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
   2491	while (state) {
   2492		if (state->start > end)
   2493			break;
   2494
   2495		ASSERT(state->end <= end);
   2496
   2497		next = next_state(state);
   2498
   2499		failrec = state->failrec;
   2500		free_extent_state(state);
   2501		kfree(failrec);
   2502
   2503		state = next;
   2504	}
   2505	spin_unlock(&failure_tree->lock);
   2506}
   2507
   2508static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
   2509							     u64 start)
   2510{
   2511	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
   2512	struct io_failure_record *failrec;
   2513	struct extent_map *em;
   2514	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
   2515	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
   2516	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
   2517	const u32 sectorsize = fs_info->sectorsize;
   2518	int ret;
   2519	u64 logical;
   2520
   2521	failrec = get_state_failrec(failure_tree, start);
   2522	if (!IS_ERR(failrec)) {
   2523		btrfs_debug(fs_info,
   2524	"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
   2525			failrec->logical, failrec->start, failrec->len);
   2526		/*
   2527		 * when data can be on disk more than twice, add to failrec here
   2528		 * (e.g. with a list for failed_mirror) to make
   2529		 * clean_io_failure() clean all those errors at once.
   2530		 */
   2531
   2532		return failrec;
   2533	}
   2534
   2535	failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
   2536	if (!failrec)
   2537		return ERR_PTR(-ENOMEM);
   2538
   2539	failrec->start = start;
   2540	failrec->len = sectorsize;
   2541	failrec->this_mirror = 0;
   2542	failrec->compress_type = BTRFS_COMPRESS_NONE;
   2543
   2544	read_lock(&em_tree->lock);
   2545	em = lookup_extent_mapping(em_tree, start, failrec->len);
   2546	if (!em) {
   2547		read_unlock(&em_tree->lock);
   2548		kfree(failrec);
   2549		return ERR_PTR(-EIO);
   2550	}
   2551
   2552	if (em->start > start || em->start + em->len <= start) {
   2553		free_extent_map(em);
   2554		em = NULL;
   2555	}
   2556	read_unlock(&em_tree->lock);
   2557	if (!em) {
   2558		kfree(failrec);
   2559		return ERR_PTR(-EIO);
   2560	}
   2561
   2562	logical = start - em->start;
   2563	logical = em->block_start + logical;
   2564	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
   2565		logical = em->block_start;
   2566		failrec->compress_type = em->compress_type;
   2567	}
   2568
   2569	btrfs_debug(fs_info,
   2570		    "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
   2571		    logical, start, failrec->len);
   2572
   2573	failrec->logical = logical;
   2574	free_extent_map(em);
   2575
   2576	/* Set the bits in the private failure tree */
   2577	ret = set_extent_bits(failure_tree, start, start + sectorsize - 1,
   2578			      EXTENT_LOCKED | EXTENT_DIRTY);
   2579	if (ret >= 0) {
   2580		ret = set_state_failrec(failure_tree, start, failrec);
   2581		/* Set the bits in the inode's tree */
   2582		ret = set_extent_bits(tree, start, start + sectorsize - 1,
   2583				      EXTENT_DAMAGED);
   2584	} else if (ret < 0) {
   2585		kfree(failrec);
   2586		return ERR_PTR(ret);
   2587	}
   2588
   2589	return failrec;
   2590}
   2591
   2592static bool btrfs_check_repairable(struct inode *inode,
   2593				   struct io_failure_record *failrec,
   2594				   int failed_mirror)
   2595{
   2596	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
   2597	int num_copies;
   2598
   2599	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
   2600	if (num_copies == 1) {
   2601		/*
   2602		 * we only have a single copy of the data, so don't bother with
   2603		 * all the retry and error correction code that follows. no
   2604		 * matter what the error is, it is very likely to persist.
   2605		 */
   2606		btrfs_debug(fs_info,
   2607			"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
   2608			num_copies, failrec->this_mirror, failed_mirror);
   2609		return false;
   2610	}
   2611
   2612	/* The failure record should only contain one sector */
   2613	ASSERT(failrec->len == fs_info->sectorsize);
   2614
   2615	/*
   2616	 * There are two premises:
   2617	 * a) deliver good data to the caller
   2618	 * b) correct the bad sectors on disk
   2619	 *
   2620	 * Since we're only doing repair for one sector, we only need to get
   2621	 * a good copy of the failed sector and if we succeed, we have setup
   2622	 * everything for repair_io_failure to do the rest for us.
   2623	 */
   2624	ASSERT(failed_mirror);
   2625	failrec->failed_mirror = failed_mirror;
   2626	failrec->this_mirror++;
   2627	if (failrec->this_mirror == failed_mirror)
   2628		failrec->this_mirror++;
   2629
   2630	if (failrec->this_mirror > num_copies) {
   2631		btrfs_debug(fs_info,
   2632			"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
   2633			num_copies, failrec->this_mirror, failed_mirror);
   2634		return false;
   2635	}
   2636
   2637	return true;
   2638}
   2639
   2640int btrfs_repair_one_sector(struct inode *inode,
   2641			    struct bio *failed_bio, u32 bio_offset,
   2642			    struct page *page, unsigned int pgoff,
   2643			    u64 start, int failed_mirror,
   2644			    submit_bio_hook_t *submit_bio_hook)
   2645{
   2646	struct io_failure_record *failrec;
   2647	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
   2648	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
   2649	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
   2650	struct btrfs_bio *failed_bbio = btrfs_bio(failed_bio);
   2651	const int icsum = bio_offset >> fs_info->sectorsize_bits;
   2652	struct bio *repair_bio;
   2653	struct btrfs_bio *repair_bbio;
   2654
   2655	btrfs_debug(fs_info,
   2656		   "repair read error: read error at %llu", start);
   2657
   2658	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
   2659
   2660	failrec = btrfs_get_io_failure_record(inode, start);
   2661	if (IS_ERR(failrec))
   2662		return PTR_ERR(failrec);
   2663
   2664
   2665	if (!btrfs_check_repairable(inode, failrec, failed_mirror)) {
   2666		free_io_failure(failure_tree, tree, failrec);
   2667		return -EIO;
   2668	}
   2669
   2670	repair_bio = btrfs_bio_alloc(1);
   2671	repair_bbio = btrfs_bio(repair_bio);
   2672	repair_bbio->file_offset = start;
   2673	repair_bio->bi_opf = REQ_OP_READ;
   2674	repair_bio->bi_end_io = failed_bio->bi_end_io;
   2675	repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
   2676	repair_bio->bi_private = failed_bio->bi_private;
   2677
   2678	if (failed_bbio->csum) {
   2679		const u32 csum_size = fs_info->csum_size;
   2680
   2681		repair_bbio->csum = repair_bbio->csum_inline;
   2682		memcpy(repair_bbio->csum,
   2683		       failed_bbio->csum + csum_size * icsum, csum_size);
   2684	}
   2685
   2686	bio_add_page(repair_bio, page, failrec->len, pgoff);
   2687	repair_bbio->iter = repair_bio->bi_iter;
   2688
   2689	btrfs_debug(btrfs_sb(inode->i_sb),
   2690		    "repair read error: submitting new read to mirror %d",
   2691		    failrec->this_mirror);
   2692
   2693	/*
   2694	 * At this point we have a bio, so any errors from submit_bio_hook()
   2695	 * will be handled by the endio on the repair_bio, so we can't return an
   2696	 * error here.
   2697	 */
   2698	submit_bio_hook(inode, repair_bio, failrec->this_mirror, failrec->compress_type);
   2699	return BLK_STS_OK;
   2700}
   2701
   2702static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
   2703{
   2704	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
   2705
   2706	ASSERT(page_offset(page) <= start &&
   2707	       start + len <= page_offset(page) + PAGE_SIZE);
   2708
   2709	if (uptodate) {
   2710		if (fsverity_active(page->mapping->host) &&
   2711		    !PageError(page) &&
   2712		    !PageUptodate(page) &&
   2713		    start < i_size_read(page->mapping->host) &&
   2714		    !fsverity_verify_page(page)) {
   2715			btrfs_page_set_error(fs_info, page, start, len);
   2716		} else {
   2717			btrfs_page_set_uptodate(fs_info, page, start, len);
   2718		}
   2719	} else {
   2720		btrfs_page_clear_uptodate(fs_info, page, start, len);
   2721		btrfs_page_set_error(fs_info, page, start, len);
   2722	}
   2723
   2724	if (!btrfs_is_subpage(fs_info, page))
   2725		unlock_page(page);
   2726	else
   2727		btrfs_subpage_end_reader(fs_info, page, start, len);
   2728}
   2729
   2730static blk_status_t submit_data_read_repair(struct inode *inode,
   2731					    struct bio *failed_bio,
   2732					    u32 bio_offset, struct page *page,
   2733					    unsigned int pgoff,
   2734					    u64 start, u64 end,
   2735					    int failed_mirror,
   2736					    unsigned int error_bitmap)
   2737{
   2738	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
   2739	const u32 sectorsize = fs_info->sectorsize;
   2740	const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
   2741	int error = 0;
   2742	int i;
   2743
   2744	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
   2745
   2746	/* This repair is only for data */
   2747	ASSERT(is_data_inode(inode));
   2748
   2749	/* We're here because we had some read errors or csum mismatch */
   2750	ASSERT(error_bitmap);
   2751
   2752	/*
   2753	 * We only get called on buffered IO, thus page must be mapped and bio
   2754	 * must not be cloned.
   2755	 */
   2756	ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED));
   2757
   2758	/* Iterate through all the sectors in the range */
   2759	for (i = 0; i < nr_bits; i++) {
   2760		const unsigned int offset = i * sectorsize;
   2761		struct extent_state *cached = NULL;
   2762		bool uptodate = false;
   2763		int ret;
   2764
   2765		if (!(error_bitmap & (1U << i))) {
   2766			/*
   2767			 * This sector has no error, just end the page read
   2768			 * and unlock the range.
   2769			 */
   2770			uptodate = true;
   2771			goto next;
   2772		}
   2773
   2774		ret = btrfs_repair_one_sector(inode, failed_bio,
   2775				bio_offset + offset,
   2776				page, pgoff + offset, start + offset,
   2777				failed_mirror, btrfs_submit_data_bio);
   2778		if (!ret) {
   2779			/*
   2780			 * We have submitted the read repair, the page release
   2781			 * will be handled by the endio function of the
   2782			 * submitted repair bio.
   2783			 * Thus we don't need to do any thing here.
   2784			 */
   2785			continue;
   2786		}
   2787		/*
   2788		 * Repair failed, just record the error but still continue.
   2789		 * Or the remaining sectors will not be properly unlocked.
   2790		 */
   2791		if (!error)
   2792			error = ret;
   2793next:
   2794		end_page_read(page, uptodate, start + offset, sectorsize);
   2795		if (uptodate)
   2796			set_extent_uptodate(&BTRFS_I(inode)->io_tree,
   2797					start + offset,
   2798					start + offset + sectorsize - 1,
   2799					&cached, GFP_ATOMIC);
   2800		unlock_extent_cached_atomic(&BTRFS_I(inode)->io_tree,
   2801				start + offset,
   2802				start + offset + sectorsize - 1,
   2803				&cached);
   2804	}
   2805	return errno_to_blk_status(error);
   2806}
   2807
   2808/* lots and lots of room for performance fixes in the end_bio funcs */
   2809
   2810void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
   2811{
   2812	struct btrfs_inode *inode;
   2813	const bool uptodate = (err == 0);
   2814	int ret = 0;
   2815
   2816	ASSERT(page && page->mapping);
   2817	inode = BTRFS_I(page->mapping->host);
   2818	btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
   2819
   2820	if (!uptodate) {
   2821		const struct btrfs_fs_info *fs_info = inode->root->fs_info;
   2822		u32 len;
   2823
   2824		ASSERT(end + 1 - start <= U32_MAX);
   2825		len = end + 1 - start;
   2826
   2827		btrfs_page_clear_uptodate(fs_info, page, start, len);
   2828		btrfs_page_set_error(fs_info, page, start, len);
   2829		ret = err < 0 ? err : -EIO;
   2830		mapping_set_error(page->mapping, ret);
   2831	}
   2832}
   2833
   2834/*
   2835 * after a writepage IO is done, we need to:
   2836 * clear the uptodate bits on error
   2837 * clear the writeback bits in the extent tree for this IO
   2838 * end_page_writeback if the page has no more pending IO
   2839 *
   2840 * Scheduling is not allowed, so the extent state tree is expected
   2841 * to have one and only one object corresponding to this IO.
   2842 */
   2843static void end_bio_extent_writepage(struct bio *bio)
   2844{
   2845	int error = blk_status_to_errno(bio->bi_status);
   2846	struct bio_vec *bvec;
   2847	u64 start;
   2848	u64 end;
   2849	struct bvec_iter_all iter_all;
   2850	bool first_bvec = true;
   2851
   2852	ASSERT(!bio_flagged(bio, BIO_CLONED));
   2853	bio_for_each_segment_all(bvec, bio, iter_all) {
   2854		struct page *page = bvec->bv_page;
   2855		struct inode *inode = page->mapping->host;
   2856		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
   2857		const u32 sectorsize = fs_info->sectorsize;
   2858
   2859		/* Our read/write should always be sector aligned. */
   2860		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
   2861			btrfs_err(fs_info,
   2862		"partial page write in btrfs with offset %u and length %u",
   2863				  bvec->bv_offset, bvec->bv_len);
   2864		else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
   2865			btrfs_info(fs_info,
   2866		"incomplete page write with offset %u and length %u",
   2867				   bvec->bv_offset, bvec->bv_len);
   2868
   2869		start = page_offset(page) + bvec->bv_offset;
   2870		end = start + bvec->bv_len - 1;
   2871
   2872		if (first_bvec) {
   2873			btrfs_record_physical_zoned(inode, start, bio);
   2874			first_bvec = false;
   2875		}
   2876
   2877		end_extent_writepage(page, error, start, end);
   2878
   2879		btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
   2880	}
   2881
   2882	bio_put(bio);
   2883}
   2884
   2885/*
   2886 * Record previously processed extent range
   2887 *
   2888 * For endio_readpage_release_extent() to handle a full extent range, reducing
   2889 * the extent io operations.
   2890 */
   2891struct processed_extent {
   2892	struct btrfs_inode *inode;
   2893	/* Start of the range in @inode */
   2894	u64 start;
   2895	/* End of the range in @inode */
   2896	u64 end;
   2897	bool uptodate;
   2898};
   2899
   2900/*
   2901 * Try to release processed extent range
   2902 *
   2903 * May not release the extent range right now if the current range is
   2904 * contiguous to processed extent.
   2905 *
   2906 * Will release processed extent when any of @inode, @uptodate, the range is
   2907 * no longer contiguous to the processed range.
   2908 *
   2909 * Passing @inode == NULL will force processed extent to be released.
   2910 */
   2911static void endio_readpage_release_extent(struct processed_extent *processed,
   2912			      struct btrfs_inode *inode, u64 start, u64 end,
   2913			      bool uptodate)
   2914{
   2915	struct extent_state *cached = NULL;
   2916	struct extent_io_tree *tree;
   2917
   2918	/* The first extent, initialize @processed */
   2919	if (!processed->inode)
   2920		goto update;
   2921
   2922	/*
   2923	 * Contiguous to processed extent, just uptodate the end.
   2924	 *
   2925	 * Several things to notice:
   2926	 *
   2927	 * - bio can be merged as long as on-disk bytenr is contiguous
   2928	 *   This means we can have page belonging to other inodes, thus need to
   2929	 *   check if the inode still matches.
   2930	 * - bvec can contain range beyond current page for multi-page bvec
   2931	 *   Thus we need to do processed->end + 1 >= start check
   2932	 */
   2933	if (processed->inode == inode && processed->uptodate == uptodate &&
   2934	    processed->end + 1 >= start && end >= processed->end) {
   2935		processed->end = end;
   2936		return;
   2937	}
   2938
   2939	tree = &processed->inode->io_tree;
   2940	/*
   2941	 * Now we don't have range contiguous to the processed range, release
   2942	 * the processed range now.
   2943	 */
   2944	if (processed->uptodate && tree->track_uptodate)
   2945		set_extent_uptodate(tree, processed->start, processed->end,
   2946				    &cached, GFP_ATOMIC);
   2947	unlock_extent_cached_atomic(tree, processed->start, processed->end,
   2948				    &cached);
   2949
   2950update:
   2951	/* Update processed to current range */
   2952	processed->inode = inode;
   2953	processed->start = start;
   2954	processed->end = end;
   2955	processed->uptodate = uptodate;
   2956}
   2957
   2958static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
   2959{
   2960	ASSERT(PageLocked(page));
   2961	if (!btrfs_is_subpage(fs_info, page))
   2962		return;
   2963
   2964	ASSERT(PagePrivate(page));
   2965	btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
   2966}
   2967
   2968/*
   2969 * Find extent buffer for a given bytenr.
   2970 *
   2971 * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
   2972 * in endio context.
   2973 */
   2974static struct extent_buffer *find_extent_buffer_readpage(
   2975		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
   2976{
   2977	struct extent_buffer *eb;
   2978
   2979	/*
   2980	 * For regular sectorsize, we can use page->private to grab extent
   2981	 * buffer
   2982	 */
   2983	if (fs_info->nodesize >= PAGE_SIZE) {
   2984		ASSERT(PagePrivate(page) && page->private);
   2985		return (struct extent_buffer *)page->private;
   2986	}
   2987
   2988	/* For subpage case, we need to lookup extent buffer xarray */
   2989	eb = xa_load(&fs_info->extent_buffers,
   2990		     bytenr >> fs_info->sectorsize_bits);
   2991	ASSERT(eb);
   2992	return eb;
   2993}
   2994
   2995/*
   2996 * after a readpage IO is done, we need to:
   2997 * clear the uptodate bits on error
   2998 * set the uptodate bits if things worked
   2999 * set the page up to date if all extents in the tree are uptodate
   3000 * clear the lock bit in the extent tree
   3001 * unlock the page if there are no other extents locked for it
   3002 *
   3003 * Scheduling is not allowed, so the extent state tree is expected
   3004 * to have one and only one object corresponding to this IO.
   3005 */
   3006static void end_bio_extent_readpage(struct bio *bio)
   3007{
   3008	struct bio_vec *bvec;
   3009	struct btrfs_bio *bbio = btrfs_bio(bio);
   3010	struct extent_io_tree *tree, *failure_tree;
   3011	struct processed_extent processed = { 0 };
   3012	/*
   3013	 * The offset to the beginning of a bio, since one bio can never be
   3014	 * larger than UINT_MAX, u32 here is enough.
   3015	 */
   3016	u32 bio_offset = 0;
   3017	int mirror;
   3018	int ret;
   3019	struct bvec_iter_all iter_all;
   3020
   3021	ASSERT(!bio_flagged(bio, BIO_CLONED));
   3022	bio_for_each_segment_all(bvec, bio, iter_all) {
   3023		bool uptodate = !bio->bi_status;
   3024		struct page *page = bvec->bv_page;
   3025		struct inode *inode = page->mapping->host;
   3026		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
   3027		const u32 sectorsize = fs_info->sectorsize;
   3028		unsigned int error_bitmap = (unsigned int)-1;
   3029		u64 start;
   3030		u64 end;
   3031		u32 len;
   3032
   3033		btrfs_debug(fs_info,
   3034			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
   3035			bio->bi_iter.bi_sector, bio->bi_status,
   3036			bbio->mirror_num);
   3037		tree = &BTRFS_I(inode)->io_tree;
   3038		failure_tree = &BTRFS_I(inode)->io_failure_tree;
   3039
   3040		/*
   3041		 * We always issue full-sector reads, but if some block in a
   3042		 * page fails to read, blk_update_request() will advance
   3043		 * bv_offset and adjust bv_len to compensate.  Print a warning
   3044		 * for unaligned offsets, and an error if they don't add up to
   3045		 * a full sector.
   3046		 */
   3047		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
   3048			btrfs_err(fs_info,
   3049		"partial page read in btrfs with offset %u and length %u",
   3050				  bvec->bv_offset, bvec->bv_len);
   3051		else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
   3052				     sectorsize))
   3053			btrfs_info(fs_info,
   3054		"incomplete page read with offset %u and length %u",
   3055				   bvec->bv_offset, bvec->bv_len);
   3056
   3057		start = page_offset(page) + bvec->bv_offset;
   3058		end = start + bvec->bv_len - 1;
   3059		len = bvec->bv_len;
   3060
   3061		mirror = bbio->mirror_num;
   3062		if (likely(uptodate)) {
   3063			if (is_data_inode(inode)) {
   3064				error_bitmap = btrfs_verify_data_csum(bbio,
   3065						bio_offset, page, start, end);
   3066				ret = error_bitmap;
   3067			} else {
   3068				ret = btrfs_validate_metadata_buffer(bbio,
   3069					page, start, end, mirror);
   3070			}
   3071			if (ret)
   3072				uptodate = false;
   3073			else
   3074				clean_io_failure(BTRFS_I(inode)->root->fs_info,
   3075						 failure_tree, tree, start,
   3076						 page,
   3077						 btrfs_ino(BTRFS_I(inode)), 0);
   3078		}
   3079
   3080		if (likely(uptodate))
   3081			goto readpage_ok;
   3082
   3083		if (is_data_inode(inode)) {
   3084			/*
   3085			 * If we failed to submit the IO at all we'll have a
   3086			 * mirror_num == 0, in which case we need to just mark
   3087			 * the page with an error and unlock it and carry on.
   3088			 */
   3089			if (mirror == 0)
   3090				goto readpage_ok;
   3091
   3092			/*
   3093			 * submit_data_read_repair() will handle all the good
   3094			 * and bad sectors, we just continue to the next bvec.
   3095			 */
   3096			submit_data_read_repair(inode, bio, bio_offset, page,
   3097						start - page_offset(page),
   3098						start, end, mirror,
   3099						error_bitmap);
   3100
   3101			ASSERT(bio_offset + len > bio_offset);
   3102			bio_offset += len;
   3103			continue;
   3104		} else {
   3105			struct extent_buffer *eb;
   3106
   3107			eb = find_extent_buffer_readpage(fs_info, page, start);
   3108			set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
   3109			eb->read_mirror = mirror;
   3110			atomic_dec(&eb->io_pages);
   3111		}
   3112readpage_ok:
   3113		if (likely(uptodate)) {
   3114			loff_t i_size = i_size_read(inode);
   3115			pgoff_t end_index = i_size >> PAGE_SHIFT;
   3116
   3117			/*
   3118			 * Zero out the remaining part if this range straddles
   3119			 * i_size.
   3120			 *
   3121			 * Here we should only zero the range inside the bvec,
   3122			 * not touch anything else.
   3123			 *
   3124			 * NOTE: i_size is exclusive while end is inclusive.
   3125			 */
   3126			if (page->index == end_index && i_size <= end) {
   3127				u32 zero_start = max(offset_in_page(i_size),
   3128						     offset_in_page(start));
   3129
   3130				zero_user_segment(page, zero_start,
   3131						  offset_in_page(end) + 1);
   3132			}
   3133		}
   3134		ASSERT(bio_offset + len > bio_offset);
   3135		bio_offset += len;
   3136
   3137		/* Update page status and unlock */
   3138		end_page_read(page, uptodate, start, len);
   3139		endio_readpage_release_extent(&processed, BTRFS_I(inode),
   3140					      start, end, PageUptodate(page));
   3141	}
   3142	/* Release the last extent */
   3143	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
   3144	btrfs_bio_free_csum(bbio);
   3145	bio_put(bio);
   3146}
   3147
   3148/**
   3149 * Populate every free slot in a provided array with pages.
   3150 *
   3151 * @nr_pages:   number of pages to allocate
   3152 * @page_array: the array to fill with pages; any existing non-null entries in
   3153 * 		the array will be skipped
   3154 *
   3155 * Return: 0        if all pages were able to be allocated;
   3156 *         -ENOMEM  otherwise, and the caller is responsible for freeing all
   3157 *                  non-null page pointers in the array.
   3158 */
   3159int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
   3160{
   3161	unsigned int allocated;
   3162
   3163	for (allocated = 0; allocated < nr_pages;) {
   3164		unsigned int last = allocated;
   3165
   3166		allocated = alloc_pages_bulk_array(GFP_NOFS, nr_pages, page_array);
   3167
   3168		if (allocated == nr_pages)
   3169			return 0;
   3170
   3171		/*
   3172		 * During this iteration, no page could be allocated, even
   3173		 * though alloc_pages_bulk_array() falls back to alloc_page()
   3174		 * if  it could not bulk-allocate. So we must be out of memory.
   3175		 */
   3176		if (allocated == last)
   3177			return -ENOMEM;
   3178
   3179		memalloc_retry_wait(GFP_NOFS);
   3180	}
   3181	return 0;
   3182}
   3183
   3184/*
   3185 * Initialize the members up to but not including 'bio'. Use after allocating a
   3186 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
   3187 * 'bio' because use of __GFP_ZERO is not supported.
   3188 */
   3189static inline void btrfs_bio_init(struct btrfs_bio *bbio)
   3190{
   3191	memset(bbio, 0, offsetof(struct btrfs_bio, bio));
   3192}
   3193
   3194/*
   3195 * Allocate a btrfs_io_bio, with @nr_iovecs as maximum number of iovecs.
   3196 *
   3197 * The bio allocation is backed by bioset and does not fail.
   3198 */
   3199struct bio *btrfs_bio_alloc(unsigned int nr_iovecs)
   3200{
   3201	struct bio *bio;
   3202
   3203	ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
   3204	bio = bio_alloc_bioset(NULL, nr_iovecs, 0, GFP_NOFS, &btrfs_bioset);
   3205	btrfs_bio_init(btrfs_bio(bio));
   3206	return bio;
   3207}
   3208
   3209struct bio *btrfs_bio_clone(struct block_device *bdev, struct bio *bio)
   3210{
   3211	struct btrfs_bio *bbio;
   3212	struct bio *new;
   3213
   3214	/* Bio allocation backed by a bioset does not fail */
   3215	new = bio_alloc_clone(bdev, bio, GFP_NOFS, &btrfs_bioset);
   3216	bbio = btrfs_bio(new);
   3217	btrfs_bio_init(bbio);
   3218	bbio->iter = bio->bi_iter;
   3219	return new;
   3220}
   3221
   3222struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
   3223{
   3224	struct bio *bio;
   3225	struct btrfs_bio *bbio;
   3226
   3227	ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
   3228
   3229	/* this will never fail when it's backed by a bioset */
   3230	bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
   3231	ASSERT(bio);
   3232
   3233	bbio = btrfs_bio(bio);
   3234	btrfs_bio_init(bbio);
   3235
   3236	bio_trim(bio, offset >> 9, size >> 9);
   3237	bbio->iter = bio->bi_iter;
   3238	return bio;
   3239}
   3240
   3241/**
   3242 * Attempt to add a page to bio
   3243 *
   3244 * @bio_ctrl:	record both the bio, and its bio_flags
   3245 * @page:	page to add to the bio
   3246 * @disk_bytenr:  offset of the new bio or to check whether we are adding
   3247 *                a contiguous page to the previous one
   3248 * @size:	portion of page that we want to write
   3249 * @pg_offset:	starting offset in the page
   3250 * @compress_type:   compression type of the current bio to see if we can merge them
   3251 *
   3252 * Attempt to add a page to bio considering stripe alignment etc.
   3253 *
   3254 * Return >= 0 for the number of bytes added to the bio.
   3255 * Can return 0 if the current bio is already at stripe/zone boundary.
   3256 * Return <0 for error.
   3257 */
   3258static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
   3259			      struct page *page,
   3260			      u64 disk_bytenr, unsigned int size,
   3261			      unsigned int pg_offset,
   3262			      enum btrfs_compression_type compress_type)
   3263{
   3264	struct bio *bio = bio_ctrl->bio;
   3265	u32 bio_size = bio->bi_iter.bi_size;
   3266	u32 real_size;
   3267	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
   3268	bool contig;
   3269	int ret;
   3270
   3271	ASSERT(bio);
   3272	/* The limit should be calculated when bio_ctrl->bio is allocated */
   3273	ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
   3274	if (bio_ctrl->compress_type != compress_type)
   3275		return 0;
   3276
   3277	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
   3278		contig = bio->bi_iter.bi_sector == sector;
   3279	else
   3280		contig = bio_end_sector(bio) == sector;
   3281	if (!contig)
   3282		return 0;
   3283
   3284	real_size = min(bio_ctrl->len_to_oe_boundary,
   3285			bio_ctrl->len_to_stripe_boundary) - bio_size;
   3286	real_size = min(real_size, size);
   3287
   3288	/*
   3289	 * If real_size is 0, never call bio_add_*_page(), as even size is 0,
   3290	 * bio will still execute its endio function on the page!
   3291	 */
   3292	if (real_size == 0)
   3293		return 0;
   3294
   3295	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
   3296		ret = bio_add_zone_append_page(bio, page, real_size, pg_offset);
   3297	else
   3298		ret = bio_add_page(bio, page, real_size, pg_offset);
   3299
   3300	return ret;
   3301}
   3302
   3303static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
   3304			       struct btrfs_inode *inode, u64 file_offset)
   3305{
   3306	struct btrfs_fs_info *fs_info = inode->root->fs_info;
   3307	struct btrfs_io_geometry geom;
   3308	struct btrfs_ordered_extent *ordered;
   3309	struct extent_map *em;
   3310	u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
   3311	int ret;
   3312
   3313	/*
   3314	 * Pages for compressed extent are never submitted to disk directly,
   3315	 * thus it has no real boundary, just set them to U32_MAX.
   3316	 *
   3317	 * The split happens for real compressed bio, which happens in
   3318	 * btrfs_submit_compressed_read/write().
   3319	 */
   3320	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
   3321		bio_ctrl->len_to_oe_boundary = U32_MAX;
   3322		bio_ctrl->len_to_stripe_boundary = U32_MAX;
   3323		return 0;
   3324	}
   3325	em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
   3326	if (IS_ERR(em))
   3327		return PTR_ERR(em);
   3328	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
   3329				    logical, &geom);
   3330	free_extent_map(em);
   3331	if (ret < 0) {
   3332		return ret;
   3333	}
   3334	if (geom.len > U32_MAX)
   3335		bio_ctrl->len_to_stripe_boundary = U32_MAX;
   3336	else
   3337		bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
   3338
   3339	if (bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
   3340		bio_ctrl->len_to_oe_boundary = U32_MAX;
   3341		return 0;
   3342	}
   3343
   3344	/* Ordered extent not yet created, so we're good */
   3345	ordered = btrfs_lookup_ordered_extent(inode, file_offset);
   3346	if (!ordered) {
   3347		bio_ctrl->len_to_oe_boundary = U32_MAX;
   3348		return 0;
   3349	}
   3350
   3351	bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
   3352		ordered->disk_bytenr + ordered->disk_num_bytes - logical);
   3353	btrfs_put_ordered_extent(ordered);
   3354	return 0;
   3355}
   3356
   3357static int alloc_new_bio(struct btrfs_inode *inode,
   3358			 struct btrfs_bio_ctrl *bio_ctrl,
   3359			 struct writeback_control *wbc,
   3360			 unsigned int opf,
   3361			 bio_end_io_t end_io_func,
   3362			 u64 disk_bytenr, u32 offset, u64 file_offset,
   3363			 enum btrfs_compression_type compress_type)
   3364{
   3365	struct btrfs_fs_info *fs_info = inode->root->fs_info;
   3366	struct bio *bio;
   3367	int ret;
   3368
   3369	bio = btrfs_bio_alloc(BIO_MAX_VECS);
   3370	/*
   3371	 * For compressed page range, its disk_bytenr is always @disk_bytenr
   3372	 * passed in, no matter if we have added any range into previous bio.
   3373	 */
   3374	if (compress_type != BTRFS_COMPRESS_NONE)
   3375		bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
   3376	else
   3377		bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
   3378	bio_ctrl->bio = bio;
   3379	bio_ctrl->compress_type = compress_type;
   3380	bio->bi_end_io = end_io_func;
   3381	bio->bi_private = &inode->io_tree;
   3382	bio->bi_opf = opf;
   3383	ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
   3384	if (ret < 0)
   3385		goto error;
   3386
   3387	if (wbc) {
   3388		/*
   3389		 * For Zone append we need the correct block_device that we are
   3390		 * going to write to set in the bio to be able to respect the
   3391		 * hardware limitation.  Look it up here:
   3392		 */
   3393		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
   3394			struct btrfs_device *dev;
   3395
   3396			dev = btrfs_zoned_get_device(fs_info, disk_bytenr,
   3397						     fs_info->sectorsize);
   3398			if (IS_ERR(dev)) {
   3399				ret = PTR_ERR(dev);
   3400				goto error;
   3401			}
   3402
   3403			bio_set_dev(bio, dev->bdev);
   3404		} else {
   3405			/*
   3406			 * Otherwise pick the last added device to support
   3407			 * cgroup writeback.  For multi-device file systems this
   3408			 * means blk-cgroup policies have to always be set on the
   3409			 * last added/replaced device.  This is a bit odd but has
   3410			 * been like that for a long time.
   3411			 */
   3412			bio_set_dev(bio, fs_info->fs_devices->latest_dev->bdev);
   3413		}
   3414		wbc_init_bio(wbc, bio);
   3415	} else {
   3416		ASSERT(bio_op(bio) != REQ_OP_ZONE_APPEND);
   3417	}
   3418	return 0;
   3419error:
   3420	bio_ctrl->bio = NULL;
   3421	bio->bi_status = errno_to_blk_status(ret);
   3422	bio_endio(bio);
   3423	return ret;
   3424}
   3425
   3426/*
   3427 * @opf:	bio REQ_OP_* and REQ_* flags as one value
   3428 * @wbc:	optional writeback control for io accounting
   3429 * @page:	page to add to the bio
   3430 * @disk_bytenr: logical bytenr where the write will be
   3431 * @size:	portion of page that we want to write to
   3432 * @pg_offset:	offset of the new bio or to check whether we are adding
   3433 *              a contiguous page to the previous one
   3434 * @bio_ret:	must be valid pointer, newly allocated bio will be stored there
   3435 * @end_io_func:     end_io callback for new bio
   3436 * @mirror_num:	     desired mirror to read/write
   3437 * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
   3438 * @compress_type:   compress type for current bio
   3439 */
   3440static int submit_extent_page(unsigned int opf,
   3441			      struct writeback_control *wbc,
   3442			      struct btrfs_bio_ctrl *bio_ctrl,
   3443			      struct page *page, u64 disk_bytenr,
   3444			      size_t size, unsigned long pg_offset,
   3445			      bio_end_io_t end_io_func,
   3446			      int mirror_num,
   3447			      enum btrfs_compression_type compress_type,
   3448			      bool force_bio_submit)
   3449{
   3450	int ret = 0;
   3451	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
   3452	unsigned int cur = pg_offset;
   3453
   3454	ASSERT(bio_ctrl);
   3455
   3456	ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
   3457	       pg_offset + size <= PAGE_SIZE);
   3458	if (force_bio_submit && bio_ctrl->bio) {
   3459		submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->compress_type);
   3460		bio_ctrl->bio = NULL;
   3461	}
   3462
   3463	while (cur < pg_offset + size) {
   3464		u32 offset = cur - pg_offset;
   3465		int added;
   3466
   3467		/* Allocate new bio if needed */
   3468		if (!bio_ctrl->bio) {
   3469			ret = alloc_new_bio(inode, bio_ctrl, wbc, opf,
   3470					    end_io_func, disk_bytenr, offset,
   3471					    page_offset(page) + cur,
   3472					    compress_type);
   3473			if (ret < 0)
   3474				return ret;
   3475		}
   3476		/*
   3477		 * We must go through btrfs_bio_add_page() to ensure each
   3478		 * page range won't cross various boundaries.
   3479		 */
   3480		if (compress_type != BTRFS_COMPRESS_NONE)
   3481			added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr,
   3482					size - offset, pg_offset + offset,
   3483					compress_type);
   3484		else
   3485			added = btrfs_bio_add_page(bio_ctrl, page,
   3486					disk_bytenr + offset, size - offset,
   3487					pg_offset + offset, compress_type);
   3488
   3489		/* Metadata page range should never be split */
   3490		if (!is_data_inode(&inode->vfs_inode))
   3491			ASSERT(added == 0 || added == size - offset);
   3492
   3493		/* At least we added some page, update the account */
   3494		if (wbc && added)
   3495			wbc_account_cgroup_owner(wbc, page, added);
   3496
   3497		/* We have reached boundary, submit right now */
   3498		if (added < size - offset) {
   3499			/* The bio should contain some page(s) */
   3500			ASSERT(bio_ctrl->bio->bi_iter.bi_size);
   3501			submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->compress_type);
   3502			bio_ctrl->bio = NULL;
   3503		}
   3504		cur += added;
   3505	}
   3506	return 0;
   3507}
   3508
   3509static int attach_extent_buffer_page(struct extent_buffer *eb,
   3510				     struct page *page,
   3511				     struct btrfs_subpage *prealloc)
   3512{
   3513	struct btrfs_fs_info *fs_info = eb->fs_info;
   3514	int ret = 0;
   3515
   3516	/*
   3517	 * If the page is mapped to btree inode, we should hold the private
   3518	 * lock to prevent race.
   3519	 * For cloned or dummy extent buffers, their pages are not mapped and
   3520	 * will not race with any other ebs.
   3521	 */
   3522	if (page->mapping)
   3523		lockdep_assert_held(&page->mapping->private_lock);
   3524
   3525	if (fs_info->nodesize >= PAGE_SIZE) {
   3526		if (!PagePrivate(page))
   3527			attach_page_private(page, eb);
   3528		else
   3529			WARN_ON(page->private != (unsigned long)eb);
   3530		return 0;
   3531	}
   3532
   3533	/* Already mapped, just free prealloc */
   3534	if (PagePrivate(page)) {
   3535		btrfs_free_subpage(prealloc);
   3536		return 0;
   3537	}
   3538
   3539	if (prealloc)
   3540		/* Has preallocated memory for subpage */
   3541		attach_page_private(page, prealloc);
   3542	else
   3543		/* Do new allocation to attach subpage */
   3544		ret = btrfs_attach_subpage(fs_info, page,
   3545					   BTRFS_SUBPAGE_METADATA);
   3546	return ret;
   3547}
   3548
   3549int set_page_extent_mapped(struct page *page)
   3550{
   3551	struct btrfs_fs_info *fs_info;
   3552
   3553	ASSERT(page->mapping);
   3554
   3555	if (PagePrivate(page))
   3556		return 0;
   3557
   3558	fs_info = btrfs_sb(page->mapping->host->i_sb);
   3559
   3560	if (btrfs_is_subpage(fs_info, page))
   3561		return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
   3562
   3563	attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
   3564	return 0;
   3565}
   3566
   3567void clear_page_extent_mapped(struct page *page)
   3568{
   3569	struct btrfs_fs_info *fs_info;
   3570
   3571	ASSERT(page->mapping);
   3572
   3573	if (!PagePrivate(page))
   3574		return;
   3575
   3576	fs_info = btrfs_sb(page->mapping->host->i_sb);
   3577	if (btrfs_is_subpage(fs_info, page))
   3578		return btrfs_detach_subpage(fs_info, page);
   3579
   3580	detach_page_private(page);
   3581}
   3582
   3583static struct extent_map *
   3584__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
   3585		 u64 start, u64 len, struct extent_map **em_cached)
   3586{
   3587	struct extent_map *em;
   3588
   3589	if (em_cached && *em_cached) {
   3590		em = *em_cached;
   3591		if (extent_map_in_tree(em) && start >= em->start &&
   3592		    start < extent_map_end(em)) {
   3593			refcount_inc(&em->refs);
   3594			return em;
   3595		}
   3596
   3597		free_extent_map(em);
   3598		*em_cached = NULL;
   3599	}
   3600
   3601	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
   3602	if (em_cached && !IS_ERR(em)) {
   3603		BUG_ON(*em_cached);
   3604		refcount_inc(&em->refs);
   3605		*em_cached = em;
   3606	}
   3607	return em;
   3608}
   3609/*
   3610 * basic readpage implementation.  Locked extent state structs are inserted
   3611 * into the tree that are removed when the IO is done (by the end_io
   3612 * handlers)
   3613 * XXX JDM: This needs looking at to ensure proper page locking
   3614 * return 0 on success, otherwise return error
   3615 */
   3616static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
   3617		      struct btrfs_bio_ctrl *bio_ctrl,
   3618		      unsigned int read_flags, u64 *prev_em_start)
   3619{
   3620	struct inode *inode = page->mapping->host;
   3621	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
   3622	u64 start = page_offset(page);
   3623	const u64 end = start + PAGE_SIZE - 1;
   3624	u64 cur = start;
   3625	u64 extent_offset;
   3626	u64 last_byte = i_size_read(inode);
   3627	u64 block_start;
   3628	u64 cur_end;
   3629	struct extent_map *em;
   3630	int ret = 0;
   3631	size_t pg_offset = 0;
   3632	size_t iosize;
   3633	size_t blocksize = inode->i_sb->s_blocksize;
   3634	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
   3635
   3636	ret = set_page_extent_mapped(page);
   3637	if (ret < 0) {
   3638		unlock_extent(tree, start, end);
   3639		btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
   3640		unlock_page(page);
   3641		goto out;
   3642	}
   3643
   3644	if (page->index == last_byte >> PAGE_SHIFT) {
   3645		size_t zero_offset = offset_in_page(last_byte);
   3646
   3647		if (zero_offset) {
   3648			iosize = PAGE_SIZE - zero_offset;
   3649			memzero_page(page, zero_offset, iosize);
   3650			flush_dcache_page(page);
   3651		}
   3652	}
   3653	begin_page_read(fs_info, page);
   3654	while (cur <= end) {
   3655		unsigned long this_bio_flag = 0;
   3656		bool force_bio_submit = false;
   3657		u64 disk_bytenr;
   3658
   3659		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
   3660		if (cur >= last_byte) {
   3661			struct extent_state *cached = NULL;
   3662
   3663			iosize = PAGE_SIZE - pg_offset;
   3664			memzero_page(page, pg_offset, iosize);
   3665			flush_dcache_page(page);
   3666			set_extent_uptodate(tree, cur, cur + iosize - 1,
   3667					    &cached, GFP_NOFS);
   3668			unlock_extent_cached(tree, cur,
   3669					     cur + iosize - 1, &cached);
   3670			end_page_read(page, true, cur, iosize);
   3671			break;
   3672		}
   3673		em = __get_extent_map(inode, page, pg_offset, cur,
   3674				      end - cur + 1, em_cached);
   3675		if (IS_ERR(em)) {
   3676			unlock_extent(tree, cur, end);
   3677			end_page_read(page, false, cur, end + 1 - cur);
   3678			ret = PTR_ERR(em);
   3679			break;
   3680		}
   3681		extent_offset = cur - em->start;
   3682		BUG_ON(extent_map_end(em) <= cur);
   3683		BUG_ON(end < cur);
   3684
   3685		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
   3686			this_bio_flag = em->compress_type;
   3687
   3688		iosize = min(extent_map_end(em) - cur, end - cur + 1);
   3689		cur_end = min(extent_map_end(em) - 1, end);
   3690		iosize = ALIGN(iosize, blocksize);
   3691		if (this_bio_flag != BTRFS_COMPRESS_NONE)
   3692			disk_bytenr = em->block_start;
   3693		else
   3694			disk_bytenr = em->block_start + extent_offset;
   3695		block_start = em->block_start;
   3696		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
   3697			block_start = EXTENT_MAP_HOLE;
   3698
   3699		/*
   3700		 * If we have a file range that points to a compressed extent
   3701		 * and it's followed by a consecutive file range that points
   3702		 * to the same compressed extent (possibly with a different
   3703		 * offset and/or length, so it either points to the whole extent
   3704		 * or only part of it), we must make sure we do not submit a
   3705		 * single bio to populate the pages for the 2 ranges because
   3706		 * this makes the compressed extent read zero out the pages
   3707		 * belonging to the 2nd range. Imagine the following scenario:
   3708		 *
   3709		 *  File layout
   3710		 *  [0 - 8K]                     [8K - 24K]
   3711		 *    |                               |
   3712		 *    |                               |
   3713		 * points to extent X,         points to extent X,
   3714		 * offset 4K, length of 8K     offset 0, length 16K
   3715		 *
   3716		 * [extent X, compressed length = 4K uncompressed length = 16K]
   3717		 *
   3718		 * If the bio to read the compressed extent covers both ranges,
   3719		 * it will decompress extent X into the pages belonging to the
   3720		 * first range and then it will stop, zeroing out the remaining
   3721		 * pages that belong to the other range that points to extent X.
   3722		 * So here we make sure we submit 2 bios, one for the first
   3723		 * range and another one for the third range. Both will target
   3724		 * the same physical extent from disk, but we can't currently
   3725		 * make the compressed bio endio callback populate the pages
   3726		 * for both ranges because each compressed bio is tightly
   3727		 * coupled with a single extent map, and each range can have
   3728		 * an extent map with a different offset value relative to the
   3729		 * uncompressed data of our extent and different lengths. This
   3730		 * is a corner case so we prioritize correctness over
   3731		 * non-optimal behavior (submitting 2 bios for the same extent).
   3732		 */
   3733		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
   3734		    prev_em_start && *prev_em_start != (u64)-1 &&
   3735		    *prev_em_start != em->start)
   3736			force_bio_submit = true;
   3737
   3738		if (prev_em_start)
   3739			*prev_em_start = em->start;
   3740
   3741		free_extent_map(em);
   3742		em = NULL;
   3743
   3744		/* we've found a hole, just zero and go on */
   3745		if (block_start == EXTENT_MAP_HOLE) {
   3746			struct extent_state *cached = NULL;
   3747
   3748			memzero_page(page, pg_offset, iosize);
   3749			flush_dcache_page(page);
   3750
   3751			set_extent_uptodate(tree, cur, cur + iosize - 1,
   3752					    &cached, GFP_NOFS);
   3753			unlock_extent_cached(tree, cur,
   3754					     cur + iosize - 1, &cached);
   3755			end_page_read(page, true, cur, iosize);
   3756			cur = cur + iosize;
   3757			pg_offset += iosize;
   3758			continue;
   3759		}
   3760		/* the get_extent function already copied into the page */
   3761		if (test_range_bit(tree, cur, cur_end,
   3762				   EXTENT_UPTODATE, 1, NULL)) {
   3763			unlock_extent(tree, cur, cur + iosize - 1);
   3764			end_page_read(page, true, cur, iosize);
   3765			cur = cur + iosize;
   3766			pg_offset += iosize;
   3767			continue;
   3768		}
   3769		/* we have an inline extent but it didn't get marked up
   3770		 * to date.  Error out
   3771		 */
   3772		if (block_start == EXTENT_MAP_INLINE) {
   3773			unlock_extent(tree, cur, cur + iosize - 1);
   3774			end_page_read(page, false, cur, iosize);
   3775			cur = cur + iosize;
   3776			pg_offset += iosize;
   3777			continue;
   3778		}
   3779
   3780		ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
   3781					 bio_ctrl, page, disk_bytenr, iosize,
   3782					 pg_offset,
   3783					 end_bio_extent_readpage, 0,
   3784					 this_bio_flag,
   3785					 force_bio_submit);
   3786		if (ret) {
   3787			/*
   3788			 * We have to unlock the remaining range, or the page
   3789			 * will never be unlocked.
   3790			 */
   3791			unlock_extent(tree, cur, end);
   3792			end_page_read(page, false, cur, end + 1 - cur);
   3793			goto out;
   3794		}
   3795		cur = cur + iosize;
   3796		pg_offset += iosize;
   3797	}
   3798out:
   3799	return ret;
   3800}
   3801
   3802int btrfs_read_folio(struct file *file, struct folio *folio)
   3803{
   3804	struct page *page = &folio->page;
   3805	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
   3806	u64 start = page_offset(page);
   3807	u64 end = start + PAGE_SIZE - 1;
   3808	struct btrfs_bio_ctrl bio_ctrl = { 0 };
   3809	int ret;
   3810
   3811	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
   3812
   3813	ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL);
   3814	/*
   3815	 * If btrfs_do_readpage() failed we will want to submit the assembled
   3816	 * bio to do the cleanup.
   3817	 */
   3818	if (bio_ctrl.bio)
   3819		submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.compress_type);
   3820	return ret;
   3821}
   3822
   3823static inline void contiguous_readpages(struct page *pages[], int nr_pages,
   3824					u64 start, u64 end,
   3825					struct extent_map **em_cached,
   3826					struct btrfs_bio_ctrl *bio_ctrl,
   3827					u64 *prev_em_start)
   3828{
   3829	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
   3830	int index;
   3831
   3832	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
   3833
   3834	for (index = 0; index < nr_pages; index++) {
   3835		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
   3836				  REQ_RAHEAD, prev_em_start);
   3837		put_page(pages[index]);
   3838	}
   3839}
   3840
   3841/*
   3842 * helper for __extent_writepage, doing all of the delayed allocation setup.
   3843 *
   3844 * This returns 1 if btrfs_run_delalloc_range function did all the work required
   3845 * to write the page (copy into inline extent).  In this case the IO has
   3846 * been started and the page is already unlocked.
   3847 *
   3848 * This returns 0 if all went well (page still locked)
   3849 * This returns < 0 if there were errors (page still locked)
   3850 */
   3851static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
   3852		struct page *page, struct writeback_control *wbc)
   3853{
   3854	const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
   3855	u64 delalloc_start = page_offset(page);
   3856	u64 delalloc_to_write = 0;
   3857	/* How many pages are started by btrfs_run_delalloc_range() */
   3858	unsigned long nr_written = 0;
   3859	int ret;
   3860	int page_started = 0;
   3861
   3862	while (delalloc_start < page_end) {
   3863		u64 delalloc_end = page_end;
   3864		bool found;
   3865
   3866		found = find_lock_delalloc_range(&inode->vfs_inode, page,
   3867					       &delalloc_start,
   3868					       &delalloc_end);
   3869		if (!found) {
   3870			delalloc_start = delalloc_end + 1;
   3871			continue;
   3872		}
   3873		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
   3874				delalloc_end, &page_started, &nr_written, wbc);
   3875		if (ret) {
   3876			btrfs_page_set_error(inode->root->fs_info, page,
   3877					     page_offset(page), PAGE_SIZE);
   3878			return ret;
   3879		}
   3880		/*
   3881		 * delalloc_end is already one less than the total length, so
   3882		 * we don't subtract one from PAGE_SIZE
   3883		 */
   3884		delalloc_to_write += (delalloc_end - delalloc_start +
   3885				      PAGE_SIZE) >> PAGE_SHIFT;
   3886		delalloc_start = delalloc_end + 1;
   3887	}
   3888	if (wbc->nr_to_write < delalloc_to_write) {
   3889		int thresh = 8192;
   3890
   3891		if (delalloc_to_write < thresh * 2)
   3892			thresh = delalloc_to_write;
   3893		wbc->nr_to_write = min_t(u64, delalloc_to_write,
   3894					 thresh);
   3895	}
   3896
   3897	/* Did btrfs_run_dealloc_range() already unlock and start the IO? */
   3898	if (page_started) {
   3899		/*
   3900		 * We've unlocked the page, so we can't update the mapping's
   3901		 * writeback index, just update nr_to_write.
   3902		 */
   3903		wbc->nr_to_write -= nr_written;
   3904		return 1;
   3905	}
   3906
   3907	return 0;
   3908}
   3909
   3910/*
   3911 * Find the first byte we need to write.
   3912 *
   3913 * For subpage, one page can contain several sectors, and
   3914 * __extent_writepage_io() will just grab all extent maps in the page
   3915 * range and try to submit all non-inline/non-compressed extents.
   3916 *
   3917 * This is a big problem for subpage, we shouldn't re-submit already written
   3918 * data at all.
   3919 * This function will lookup subpage dirty bit to find which range we really
   3920 * need to submit.
   3921 *
   3922 * Return the next dirty range in [@start, @end).
   3923 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
   3924 */
   3925static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
   3926				 struct page *page, u64 *start, u64 *end)
   3927{
   3928	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
   3929	struct btrfs_subpage_info *spi = fs_info->subpage_info;
   3930	u64 orig_start = *start;
   3931	/* Declare as unsigned long so we can use bitmap ops */
   3932	unsigned long flags;
   3933	int range_start_bit;
   3934	int range_end_bit;
   3935
   3936	/*
   3937	 * For regular sector size == page size case, since one page only
   3938	 * contains one sector, we return the page offset directly.
   3939	 */
   3940	if (!btrfs_is_subpage(fs_info, page)) {
   3941		*start = page_offset(page);
   3942		*end = page_offset(page) + PAGE_SIZE;
   3943		return;
   3944	}
   3945
   3946	range_start_bit = spi->dirty_offset +
   3947			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
   3948
   3949	/* We should have the page locked, but just in case */
   3950	spin_lock_irqsave(&subpage->lock, flags);
   3951	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
   3952			       spi->dirty_offset + spi->bitmap_nr_bits);
   3953	spin_unlock_irqrestore(&subpage->lock, flags);
   3954
   3955	range_start_bit -= spi->dirty_offset;
   3956	range_end_bit -= spi->dirty_offset;
   3957
   3958	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
   3959	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
   3960}
   3961
   3962/*
   3963 * helper for __extent_writepage.  This calls the writepage start hooks,
   3964 * and does the loop to map the page into extents and bios.
   3965 *
   3966 * We return 1 if the IO is started and the page is unlocked,
   3967 * 0 if all went well (page still locked)
   3968 * < 0 if there were errors (page still locked)
   3969 */
   3970static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
   3971				 struct page *page,
   3972				 struct writeback_control *wbc,
   3973				 struct extent_page_data *epd,
   3974				 loff_t i_size,
   3975				 int *nr_ret)
   3976{
   3977	struct btrfs_fs_info *fs_info = inode->root->fs_info;
   3978	u64 cur = page_offset(page);
   3979	u64 end = cur + PAGE_SIZE - 1;
   3980	u64 extent_offset;
   3981	u64 block_start;
   3982	struct extent_map *em;
   3983	int saved_ret = 0;
   3984	int ret = 0;
   3985	int nr = 0;
   3986	u32 opf = REQ_OP_WRITE;
   3987	const unsigned int write_flags = wbc_to_write_flags(wbc);
   3988	bool has_error = false;
   3989	bool compressed;
   3990
   3991	ret = btrfs_writepage_cow_fixup(page);
   3992	if (ret) {
   3993		/* Fixup worker will requeue */
   3994		redirty_page_for_writepage(wbc, page);
   3995		unlock_page(page);
   3996		return 1;
   3997	}
   3998
   3999	/*
   4000	 * we don't want to touch the inode after unlocking the page,
   4001	 * so we update the mapping writeback index now
   4002	 */
   4003	wbc->nr_to_write--;
   4004
   4005	while (cur <= end) {
   4006		u64 disk_bytenr;
   4007		u64 em_end;
   4008		u64 dirty_range_start = cur;
   4009		u64 dirty_range_end;
   4010		u32 iosize;
   4011
   4012		if (cur >= i_size) {
   4013			btrfs_writepage_endio_finish_ordered(inode, page, cur,
   4014							     end, true);
   4015			/*
   4016			 * This range is beyond i_size, thus we don't need to
   4017			 * bother writing back.
   4018			 * But we still need to clear the dirty subpage bit, or
   4019			 * the next time the page gets dirtied, we will try to
   4020			 * writeback the sectors with subpage dirty bits,
   4021			 * causing writeback without ordered extent.
   4022			 */
   4023			btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
   4024			break;
   4025		}
   4026
   4027		find_next_dirty_byte(fs_info, page, &dirty_range_start,
   4028				     &dirty_range_end);
   4029		if (cur < dirty_range_start) {
   4030			cur = dirty_range_start;
   4031			continue;
   4032		}
   4033
   4034		em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
   4035		if (IS_ERR(em)) {
   4036			btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
   4037			ret = PTR_ERR_OR_ZERO(em);
   4038			has_error = true;
   4039			if (!saved_ret)
   4040				saved_ret = ret;
   4041			break;
   4042		}
   4043
   4044		extent_offset = cur - em->start;
   4045		em_end = extent_map_end(em);
   4046		ASSERT(cur <= em_end);
   4047		ASSERT(cur < end);
   4048		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
   4049		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
   4050		block_start = em->block_start;
   4051		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
   4052		disk_bytenr = em->block_start + extent_offset;
   4053
   4054		/*
   4055		 * Note that em_end from extent_map_end() and dirty_range_end from
   4056		 * find_next_dirty_byte() are all exclusive
   4057		 */
   4058		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
   4059
   4060		if (btrfs_use_zone_append(inode, em->block_start))
   4061			opf = REQ_OP_ZONE_APPEND;
   4062
   4063		free_extent_map(em);
   4064		em = NULL;
   4065
   4066		/*
   4067		 * compressed and inline extents are written through other
   4068		 * paths in the FS
   4069		 */
   4070		if (compressed || block_start == EXTENT_MAP_HOLE ||
   4071		    block_start == EXTENT_MAP_INLINE) {
   4072			if (compressed)
   4073				nr++;
   4074			else
   4075				btrfs_writepage_endio_finish_ordered(inode,
   4076						page, cur, cur + iosize - 1, true);
   4077			btrfs_page_clear_dirty(fs_info, page, cur, iosize);
   4078			cur += iosize;
   4079			continue;
   4080		}
   4081
   4082		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
   4083		if (!PageWriteback(page)) {
   4084			btrfs_err(inode->root->fs_info,
   4085				   "page %lu not writeback, cur %llu end %llu",
   4086			       page->index, cur, end);
   4087		}
   4088
   4089		/*
   4090		 * Although the PageDirty bit is cleared before entering this
   4091		 * function, subpage dirty bit is not cleared.
   4092		 * So clear subpage dirty bit here so next time we won't submit
   4093		 * page for range already written to disk.
   4094		 */
   4095		btrfs_page_clear_dirty(fs_info, page, cur, iosize);
   4096
   4097		ret = submit_extent_page(opf | write_flags, wbc,
   4098					 &epd->bio_ctrl, page,
   4099					 disk_bytenr, iosize,
   4100					 cur - page_offset(page),
   4101					 end_bio_extent_writepage,
   4102					 0, 0, false);
   4103		if (ret) {
   4104			has_error = true;
   4105			if (!saved_ret)
   4106				saved_ret = ret;
   4107
   4108			btrfs_page_set_error(fs_info, page, cur, iosize);
   4109			if (PageWriteback(page))
   4110				btrfs_page_clear_writeback(fs_info, page, cur,
   4111							   iosize);
   4112		}
   4113
   4114		cur += iosize;
   4115		nr++;
   4116	}
   4117	/*
   4118	 * If we finish without problem, we should not only clear page dirty,
   4119	 * but also empty subpage dirty bits
   4120	 */
   4121	if (!has_error)
   4122		btrfs_page_assert_not_dirty(fs_info, page);
   4123	else
   4124		ret = saved_ret;
   4125	*nr_ret = nr;
   4126	return ret;
   4127}
   4128
   4129/*
   4130 * the writepage semantics are similar to regular writepage.  extent
   4131 * records are inserted to lock ranges in the tree, and as dirty areas
   4132 * are found, they are marked writeback.  Then the lock bits are removed
   4133 * and the end_io handler clears the writeback ranges
   4134 *
   4135 * Return 0 if everything goes well.
   4136 * Return <0 for error.
   4137 */
   4138static int __extent_writepage(struct page *page, struct writeback_control *wbc,
   4139			      struct extent_page_data *epd)
   4140{
   4141	struct folio *folio = page_folio(page);
   4142	struct inode *inode = page->mapping->host;
   4143	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
   4144	const u64 page_start = page_offset(page);
   4145	const u64 page_end = page_start + PAGE_SIZE - 1;
   4146	int ret;
   4147	int nr = 0;
   4148	size_t pg_offset;
   4149	loff_t i_size = i_size_read(inode);
   4150	unsigned long end_index = i_size >> PAGE_SHIFT;
   4151
   4152	trace___extent_writepage(page, inode, wbc);
   4153
   4154	WARN_ON(!PageLocked(page));
   4155
   4156	btrfs_page_clear_error(btrfs_sb(inode->i_sb), page,
   4157			       page_offset(page), PAGE_SIZE);
   4158
   4159	pg_offset = offset_in_page(i_size);
   4160	if (page->index > end_index ||
   4161	   (page->index == end_index && !pg_offset)) {
   4162		folio_invalidate(folio, 0, folio_size(folio));
   4163		folio_unlock(folio);
   4164		return 0;
   4165	}
   4166
   4167	if (page->index == end_index) {
   4168		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
   4169		flush_dcache_page(page);
   4170	}
   4171
   4172	ret = set_page_extent_mapped(page);
   4173	if (ret < 0) {
   4174		SetPageError(page);
   4175		goto done;
   4176	}
   4177
   4178	if (!epd->extent_locked) {
   4179		ret = writepage_delalloc(BTRFS_I(inode), page, wbc);
   4180		if (ret == 1)
   4181			return 0;
   4182		if (ret)
   4183			goto done;
   4184	}
   4185
   4186	ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
   4187				    &nr);
   4188	if (ret == 1)
   4189		return 0;
   4190
   4191done:
   4192	if (nr == 0) {
   4193		/* make sure the mapping tag for page dirty gets cleared */
   4194		set_page_writeback(page);
   4195		end_page_writeback(page);
   4196	}
   4197	/*
   4198	 * Here we used to have a check for PageError() and then set @ret and
   4199	 * call end_extent_writepage().
   4200	 *
   4201	 * But in fact setting @ret here will cause different error paths
   4202	 * between subpage and regular sectorsize.
   4203	 *
   4204	 * For regular page size, we never submit current page, but only add
   4205	 * current page to current bio.
   4206	 * The bio submission can only happen in next page.
   4207	 * Thus if we hit the PageError() branch, @ret is already set to
   4208	 * non-zero value and will not get updated for regular sectorsize.
   4209	 *
   4210	 * But for subpage case, it's possible we submit part of current page,
   4211	 * thus can get PageError() set by submitted bio of the same page,
   4212	 * while our @ret is still 0.
   4213	 *
   4214	 * So here we unify the behavior and don't set @ret.
   4215	 * Error can still be properly passed to higher layer as page will
   4216	 * be set error, here we just don't handle the IO failure.
   4217	 *
   4218	 * NOTE: This is just a hotfix for subpage.
   4219	 * The root fix will be properly ending ordered extent when we hit
   4220	 * an error during writeback.
   4221	 *
   4222	 * But that needs a bigger refactoring, as we not only need to grab the
   4223	 * submitted OE, but also need to know exactly at which bytenr we hit
   4224	 * the error.
   4225	 * Currently the full page based __extent_writepage_io() is not
   4226	 * capable of that.
   4227	 */
   4228	if (PageError(page))
   4229		end_extent_writepage(page, ret, page_start, page_end);
   4230	if (epd->extent_locked) {
   4231		/*
   4232		 * If epd->extent_locked, it's from extent_write_locked_range(),
   4233		 * the page can either be locked by lock_page() or
   4234		 * process_one_page().
   4235		 * Let btrfs_page_unlock_writer() handle both cases.
   4236		 */
   4237		ASSERT(wbc);
   4238		btrfs_page_unlock_writer(fs_info, page, wbc->range_start,
   4239					 wbc->range_end + 1 - wbc->range_start);
   4240	} else {
   4241		unlock_page(page);
   4242	}
   4243	ASSERT(ret <= 0);
   4244	return ret;
   4245}
   4246
   4247void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
   4248{
   4249	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
   4250		       TASK_UNINTERRUPTIBLE);
   4251}
   4252
   4253static void end_extent_buffer_writeback(struct extent_buffer *eb)
   4254{
   4255	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
   4256	smp_mb__after_atomic();
   4257	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
   4258}
   4259
   4260/*
   4261 * Lock extent buffer status and pages for writeback.
   4262 *
   4263 * May try to flush write bio if we can't get the lock.
   4264 *
   4265 * Return  0 if the extent buffer doesn't need to be submitted.
   4266 *           (E.g. the extent buffer is not dirty)
   4267 * Return >0 is the extent buffer is submitted to bio.
   4268 * Return <0 if something went wrong, no page is locked.
   4269 */
   4270static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
   4271			  struct extent_page_data *epd)
   4272{
   4273	struct btrfs_fs_info *fs_info = eb->fs_info;
   4274	int i, num_pages;
   4275	int flush = 0;
   4276	int ret = 0;
   4277
   4278	if (!btrfs_try_tree_write_lock(eb)) {
   4279		flush_write_bio(epd);
   4280		flush = 1;
   4281		btrfs_tree_lock(eb);
   4282	}
   4283
   4284	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
   4285		btrfs_tree_unlock(eb);
   4286		if (!epd->sync_io)
   4287			return 0;
   4288		if (!flush) {
   4289			flush_write_bio(epd);
   4290			flush = 1;
   4291		}
   4292		while (1) {
   4293			wait_on_extent_buffer_writeback(eb);
   4294			btrfs_tree_lock(eb);
   4295			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
   4296				break;
   4297			btrfs_tree_unlock(eb);
   4298		}
   4299	}
   4300
   4301	/*
   4302	 * We need to do this to prevent races in people who check if the eb is
   4303	 * under IO since we can end up having no IO bits set for a short period
   4304	 * of time.
   4305	 */
   4306	spin_lock(&eb->refs_lock);
   4307	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
   4308		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
   4309		spin_unlock(&eb->refs_lock);
   4310		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
   4311		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
   4312					 -eb->len,
   4313					 fs_info->dirty_metadata_batch);
   4314		ret = 1;
   4315	} else {
   4316		spin_unlock(&eb->refs_lock);
   4317	}
   4318
   4319	btrfs_tree_unlock(eb);
   4320
   4321	/*
   4322	 * Either we don't need to submit any tree block, or we're submitting
   4323	 * subpage eb.
   4324	 * Subpage metadata doesn't use page locking at all, so we can skip
   4325	 * the page locking.
   4326	 */
   4327	if (!ret || fs_info->nodesize < PAGE_SIZE)
   4328		return ret;
   4329
   4330	num_pages = num_extent_pages(eb);
   4331	for (i = 0; i < num_pages; i++) {
   4332		struct page *p = eb->pages[i];
   4333
   4334		if (!trylock_page(p)) {
   4335			if (!flush) {
   4336				flush_write_bio(epd);
   4337				flush = 1;
   4338			}
   4339			lock_page(p);
   4340		}
   4341	}
   4342
   4343	return ret;
   4344}
   4345
   4346static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
   4347{
   4348	struct btrfs_fs_info *fs_info = eb->fs_info;
   4349
   4350	btrfs_page_set_error(fs_info, page, eb->start, eb->len);
   4351	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
   4352		return;
   4353
   4354	/*
   4355	 * A read may stumble upon this buffer later, make sure that it gets an
   4356	 * error and knows there was an error.
   4357	 */
   4358	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
   4359
   4360	/*
   4361	 * We need to set the mapping with the io error as well because a write
   4362	 * error will flip the file system readonly, and then syncfs() will
   4363	 * return a 0 because we are readonly if we don't modify the err seq for
   4364	 * the superblock.
   4365	 */
   4366	mapping_set_error(page->mapping, -EIO);
   4367
   4368	/*
   4369	 * If we error out, we should add back the dirty_metadata_bytes
   4370	 * to make it consistent.
   4371	 */
   4372	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
   4373				 eb->len, fs_info->dirty_metadata_batch);
   4374
   4375	/*
   4376	 * If writeback for a btree extent that doesn't belong to a log tree
   4377	 * failed, increment the counter transaction->eb_write_errors.
   4378	 * We do this because while the transaction is running and before it's
   4379	 * committing (when we call filemap_fdata[write|wait]_range against
   4380	 * the btree inode), we might have
   4381	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
   4382	 * returns an error or an error happens during writeback, when we're
   4383	 * committing the transaction we wouldn't know about it, since the pages
   4384	 * can be no longer dirty nor marked anymore for writeback (if a
   4385	 * subsequent modification to the extent buffer didn't happen before the
   4386	 * transaction commit), which makes filemap_fdata[write|wait]_range not
   4387	 * able to find the pages tagged with SetPageError at transaction
   4388	 * commit time. So if this happens we must abort the transaction,
   4389	 * otherwise we commit a super block with btree roots that point to
   4390	 * btree nodes/leafs whose content on disk is invalid - either garbage
   4391	 * or the content of some node/leaf from a past generation that got
   4392	 * cowed or deleted and is no longer valid.
   4393	 *
   4394	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
   4395	 * not be enough - we need to distinguish between log tree extents vs
   4396	 * non-log tree extents, and the next filemap_fdatawait_range() call
   4397	 * will catch and clear such errors in the mapping - and that call might
   4398	 * be from a log sync and not from a transaction commit. Also, checking
   4399	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
   4400	 * not done and would not be reliable - the eb might have been released
   4401	 * from memory and reading it back again means that flag would not be
   4402	 * set (since it's a runtime flag, not persisted on disk).
   4403	 *
   4404	 * Using the flags below in the btree inode also makes us achieve the
   4405	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
   4406	 * writeback for all dirty pages and before filemap_fdatawait_range()
   4407	 * is called, the writeback for all dirty pages had already finished
   4408	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
   4409	 * filemap_fdatawait_range() would return success, as it could not know
   4410	 * that writeback errors happened (the pages were no longer tagged for
   4411	 * writeback).
   4412	 */
   4413	switch (eb->log_index) {
   4414	case -1:
   4415		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
   4416		break;
   4417	case 0:
   4418		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
   4419		break;
   4420	case 1:
   4421		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
   4422		break;
   4423	default:
   4424		BUG(); /* unexpected, logic error */
   4425	}
   4426}
   4427
   4428/*
   4429 * The endio specific version which won't touch any unsafe spinlock in endio
   4430 * context.
   4431 */
   4432static struct extent_buffer *find_extent_buffer_nolock(
   4433		struct btrfs_fs_info *fs_info, u64 start)
   4434{
   4435	struct extent_buffer *eb;
   4436
   4437	rcu_read_lock();
   4438	eb = xa_load(&fs_info->extent_buffers,
   4439		     start >> fs_info->sectorsize_bits);
   4440	if (eb && atomic_inc_not_zero(&eb->refs)) {
   4441		rcu_read_unlock();
   4442		return eb;
   4443	}
   4444	rcu_read_unlock();
   4445	return NULL;
   4446}
   4447
   4448/*
   4449 * The endio function for subpage extent buffer write.
   4450 *
   4451 * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
   4452 * after all extent buffers in the page has finished their writeback.
   4453 */
   4454static void end_bio_subpage_eb_writepage(struct bio *bio)
   4455{
   4456	struct btrfs_fs_info *fs_info;
   4457	struct bio_vec *bvec;
   4458	struct bvec_iter_all iter_all;
   4459
   4460	fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
   4461	ASSERT(fs_info->nodesize < PAGE_SIZE);
   4462
   4463	ASSERT(!bio_flagged(bio, BIO_CLONED));
   4464	bio_for_each_segment_all(bvec, bio, iter_all) {
   4465		struct page *page = bvec->bv_page;
   4466		u64 bvec_start = page_offset(page) + bvec->bv_offset;
   4467		u64 bvec_end = bvec_start + bvec->bv_len - 1;
   4468		u64 cur_bytenr = bvec_start;
   4469
   4470		ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
   4471
   4472		/* Iterate through all extent buffers in the range */
   4473		while (cur_bytenr <= bvec_end) {
   4474			struct extent_buffer *eb;
   4475			int done;
   4476
   4477			/*
   4478			 * Here we can't use find_extent_buffer(), as it may
   4479			 * try to lock eb->refs_lock, which is not safe in endio
   4480			 * context.
   4481			 */
   4482			eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
   4483			ASSERT(eb);
   4484
   4485			cur_bytenr = eb->start + eb->len;
   4486
   4487			ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
   4488			done = atomic_dec_and_test(&eb->io_pages);
   4489			ASSERT(done);
   4490
   4491			if (bio->bi_status ||
   4492			    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
   4493				ClearPageUptodate(page);
   4494				set_btree_ioerr(page, eb);
   4495			}
   4496
   4497			btrfs_subpage_clear_writeback(fs_info, page, eb->start,
   4498						      eb->len);
   4499			end_extent_buffer_writeback(eb);
   4500			/*
   4501			 * free_extent_buffer() will grab spinlock which is not
   4502			 * safe in endio context. Thus here we manually dec
   4503			 * the ref.
   4504			 */
   4505			atomic_dec(&eb->refs);
   4506		}
   4507	}
   4508	bio_put(bio);
   4509}
   4510
   4511static void end_bio_extent_buffer_writepage(struct bio *bio)
   4512{
   4513	struct bio_vec *bvec;
   4514	struct extent_buffer *eb;
   4515	int done;
   4516	struct bvec_iter_all iter_all;
   4517
   4518	ASSERT(!bio_flagged(bio, BIO_CLONED));
   4519	bio_for_each_segment_all(bvec, bio, iter_all) {
   4520		struct page *page = bvec->bv_page;
   4521
   4522		eb = (struct extent_buffer *)page->private;
   4523		BUG_ON(!eb);
   4524		done = atomic_dec_and_test(&eb->io_pages);
   4525
   4526		if (bio->bi_status ||
   4527		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
   4528			ClearPageUptodate(page);
   4529			set_btree_ioerr(page, eb);
   4530		}
   4531
   4532		end_page_writeback(page);
   4533
   4534		if (!done)
   4535			continue;
   4536
   4537		end_extent_buffer_writeback(eb);
   4538	}
   4539
   4540	bio_put(bio);
   4541}
   4542
   4543static void prepare_eb_write(struct extent_buffer *eb)
   4544{
   4545	u32 nritems;
   4546	unsigned long start;
   4547	unsigned long end;
   4548
   4549	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
   4550	atomic_set(&eb->io_pages, num_extent_pages(eb));
   4551
   4552	/* Set btree blocks beyond nritems with 0 to avoid stale content */
   4553	nritems = btrfs_header_nritems(eb);
   4554	if (btrfs_header_level(eb) > 0) {
   4555		end = btrfs_node_key_ptr_offset(nritems);
   4556		memzero_extent_buffer(eb, end, eb->len - end);
   4557	} else {
   4558		/*
   4559		 * Leaf:
   4560		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
   4561		 */
   4562		start = btrfs_item_nr_offset(nritems);
   4563		end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
   4564		memzero_extent_buffer(eb, start, end - start);
   4565	}
   4566}
   4567
   4568/*
   4569 * Unlike the work in write_one_eb(), we rely completely on extent locking.
   4570 * Page locking is only utilized at minimum to keep the VMM code happy.
   4571 */
   4572static int write_one_subpage_eb(struct extent_buffer *eb,
   4573				struct writeback_control *wbc,
   4574				struct extent_page_data *epd)
   4575{
   4576	struct btrfs_fs_info *fs_info = eb->fs_info;
   4577	struct page *page = eb->pages[0];
   4578	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
   4579	bool no_dirty_ebs = false;
   4580	int ret;
   4581
   4582	prepare_eb_write(eb);
   4583
   4584	/* clear_page_dirty_for_io() in subpage helper needs page locked */
   4585	lock_page(page);
   4586	btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
   4587
   4588	/* Check if this is the last dirty bit to update nr_written */
   4589	no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
   4590							  eb->start, eb->len);
   4591	if (no_dirty_ebs)
   4592		clear_page_dirty_for_io(page);
   4593
   4594	ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
   4595			&epd->bio_ctrl, page, eb->start, eb->len,
   4596			eb->start - page_offset(page),
   4597			end_bio_subpage_eb_writepage, 0, 0, false);
   4598	if (ret) {
   4599		btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
   4600		set_btree_ioerr(page, eb);
   4601		unlock_page(page);
   4602
   4603		if (atomic_dec_and_test(&eb->io_pages))
   4604			end_extent_buffer_writeback(eb);
   4605		return -EIO;
   4606	}
   4607	unlock_page(page);
   4608	/*
   4609	 * Submission finished without problem, if no range of the page is
   4610	 * dirty anymore, we have submitted a page.  Update nr_written in wbc.
   4611	 */
   4612	if (no_dirty_ebs)
   4613		wbc->nr_to_write--;
   4614	return ret;
   4615}
   4616
   4617static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
   4618			struct writeback_control *wbc,
   4619			struct extent_page_data *epd)
   4620{
   4621	u64 disk_bytenr = eb->start;
   4622	int i, num_pages;
   4623	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
   4624	int ret = 0;
   4625
   4626	prepare_eb_write(eb);
   4627
   4628	num_pages = num_extent_pages(eb);
   4629	for (i = 0; i < num_pages; i++) {
   4630		struct page *p = eb->pages[i];
   4631
   4632		clear_page_dirty_for_io(p);
   4633		set_page_writeback(p);
   4634		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
   4635					 &epd->bio_ctrl, p, disk_bytenr,
   4636					 PAGE_SIZE, 0,
   4637					 end_bio_extent_buffer_writepage,
   4638					 0, 0, false);
   4639		if (ret) {
   4640			set_btree_ioerr(p, eb);
   4641			if (PageWriteback(p))
   4642				end_page_writeback(p);
   4643			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
   4644				end_extent_buffer_writeback(eb);
   4645			ret = -EIO;
   4646			break;
   4647		}
   4648		disk_bytenr += PAGE_SIZE;
   4649		wbc->nr_to_write--;
   4650		unlock_page(p);
   4651	}
   4652
   4653	if (unlikely(ret)) {
   4654		for (; i < num_pages; i++) {
   4655			struct page *p = eb->pages[i];
   4656			clear_page_dirty_for_io(p);
   4657			unlock_page(p);
   4658		}
   4659	}
   4660
   4661	return ret;
   4662}
   4663
   4664/*
   4665 * Submit one subpage btree page.
   4666 *
   4667 * The main difference to submit_eb_page() is:
   4668 * - Page locking
   4669 *   For subpage, we don't rely on page locking at all.
   4670 *
   4671 * - Flush write bio
   4672 *   We only flush bio if we may be unable to fit current extent buffers into
   4673 *   current bio.
   4674 *
   4675 * Return >=0 for the number of submitted extent buffers.
   4676 * Return <0 for fatal error.
   4677 */
   4678static int submit_eb_subpage(struct page *page,
   4679			     struct writeback_control *wbc,
   4680			     struct extent_page_data *epd)
   4681{
   4682	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
   4683	int submitted = 0;
   4684	u64 page_start = page_offset(page);
   4685	int bit_start = 0;
   4686	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
   4687	int ret;
   4688
   4689	/* Lock and write each dirty extent buffers in the range */
   4690	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
   4691		struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
   4692		struct extent_buffer *eb;
   4693		unsigned long flags;
   4694		u64 start;
   4695
   4696		/*
   4697		 * Take private lock to ensure the subpage won't be detached
   4698		 * in the meantime.
   4699		 */
   4700		spin_lock(&page->mapping->private_lock);
   4701		if (!PagePrivate(page)) {
   4702			spin_unlock(&page->mapping->private_lock);
   4703			break;
   4704		}
   4705		spin_lock_irqsave(&subpage->lock, flags);
   4706		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
   4707			      subpage->bitmaps)) {
   4708			spin_unlock_irqrestore(&subpage->lock, flags);
   4709			spin_unlock(&page->mapping->private_lock);
   4710			bit_start++;
   4711			continue;
   4712		}
   4713
   4714		start = page_start + bit_start * fs_info->sectorsize;
   4715		bit_start += sectors_per_node;
   4716
   4717		/*
   4718		 * Here we just want to grab the eb without touching extra
   4719		 * spin locks, so call find_extent_buffer_nolock().
   4720		 */
   4721		eb = find_extent_buffer_nolock(fs_info, start);
   4722		spin_unlock_irqrestore(&subpage->lock, flags);
   4723		spin_unlock(&page->mapping->private_lock);
   4724
   4725		/*
   4726		 * The eb has already reached 0 refs thus find_extent_buffer()
   4727		 * doesn't return it. We don't need to write back such eb
   4728		 * anyway.
   4729		 */
   4730		if (!eb)
   4731			continue;
   4732
   4733		ret = lock_extent_buffer_for_io(eb, epd);
   4734		if (ret == 0) {
   4735			free_extent_buffer(eb);
   4736			continue;
   4737		}
   4738		if (ret < 0) {
   4739			free_extent_buffer(eb);
   4740			goto cleanup;
   4741		}
   4742		ret = write_one_subpage_eb(eb, wbc, epd);
   4743		free_extent_buffer(eb);
   4744		if (ret < 0)
   4745			goto cleanup;
   4746		submitted++;
   4747	}
   4748	return submitted;
   4749
   4750cleanup:
   4751	/* We hit error, end bio for the submitted extent buffers */
   4752	end_write_bio(epd, ret);
   4753	return ret;
   4754}
   4755
   4756/*
   4757 * Submit all page(s) of one extent buffer.
   4758 *
   4759 * @page:	the page of one extent buffer
   4760 * @eb_context:	to determine if we need to submit this page, if current page
   4761 *		belongs to this eb, we don't need to submit
   4762 *
   4763 * The caller should pass each page in their bytenr order, and here we use
   4764 * @eb_context to determine if we have submitted pages of one extent buffer.
   4765 *
   4766 * If we have, we just skip until we hit a new page that doesn't belong to
   4767 * current @eb_context.
   4768 *
   4769 * If not, we submit all the page(s) of the extent buffer.
   4770 *
   4771 * Return >0 if we have submitted the extent buffer successfully.
   4772 * Return 0 if we don't need to submit the page, as it's already submitted by
   4773 * previous call.
   4774 * Return <0 for fatal error.
   4775 */
   4776static int submit_eb_page(struct page *page, struct writeback_control *wbc,
   4777			  struct extent_page_data *epd,
   4778			  struct extent_buffer **eb_context)
   4779{
   4780	struct address_space *mapping = page->mapping;
   4781	struct btrfs_block_group *cache = NULL;
   4782	struct extent_buffer *eb;
   4783	int ret;
   4784
   4785	if (!PagePrivate(page))
   4786		return 0;
   4787
   4788	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
   4789		return submit_eb_subpage(page, wbc, epd);
   4790
   4791	spin_lock(&mapping->private_lock);
   4792	if (!PagePrivate(page)) {
   4793		spin_unlock(&mapping->private_lock);
   4794		return 0;
   4795	}
   4796
   4797	eb = (struct extent_buffer *)page->private;
   4798
   4799	/*
   4800	 * Shouldn't happen and normally this would be a BUG_ON but no point
   4801	 * crashing the machine for something we can survive anyway.
   4802	 */
   4803	if (WARN_ON(!eb)) {
   4804		spin_unlock(&mapping->private_lock);
   4805		return 0;
   4806	}
   4807
   4808	if (eb == *eb_context) {
   4809		spin_unlock(&mapping->private_lock);
   4810		return 0;
   4811	}
   4812	ret = atomic_inc_not_zero(&eb->refs);
   4813	spin_unlock(&mapping->private_lock);
   4814	if (!ret)
   4815		return 0;
   4816
   4817	if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
   4818		/*
   4819		 * If for_sync, this hole will be filled with
   4820		 * trasnsaction commit.
   4821		 */
   4822		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
   4823			ret = -EAGAIN;
   4824		else
   4825			ret = 0;
   4826		free_extent_buffer(eb);
   4827		return ret;
   4828	}
   4829
   4830	*eb_context = eb;
   4831
   4832	ret = lock_extent_buffer_for_io(eb, epd);
   4833	if (ret <= 0) {
   4834		btrfs_revert_meta_write_pointer(cache, eb);
   4835		if (cache)
   4836			btrfs_put_block_group(cache);
   4837		free_extent_buffer(eb);
   4838		return ret;
   4839	}
   4840	if (cache) {
   4841		/*
   4842		 * Implies write in zoned mode. Mark the last eb in a block group.
   4843		 */
   4844		btrfs_schedule_zone_finish_bg(cache, eb);
   4845		btrfs_put_block_group(cache);
   4846	}
   4847	ret = write_one_eb(eb, wbc, epd);
   4848	free_extent_buffer(eb);
   4849	if (ret < 0)
   4850		return ret;
   4851	return 1;
   4852}
   4853
   4854int btree_write_cache_pages(struct address_space *mapping,
   4855				   struct writeback_control *wbc)
   4856{
   4857	struct extent_buffer *eb_context = NULL;
   4858	struct extent_page_data epd = {
   4859		.bio_ctrl = { 0 },
   4860		.extent_locked = 0,
   4861		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
   4862	};
   4863	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
   4864	int ret = 0;
   4865	int done = 0;
   4866	int nr_to_write_done = 0;
   4867	struct pagevec pvec;
   4868	int nr_pages;
   4869	pgoff_t index;
   4870	pgoff_t end;		/* Inclusive */
   4871	int scanned = 0;
   4872	xa_mark_t tag;
   4873
   4874	pagevec_init(&pvec);
   4875	if (wbc->range_cyclic) {
   4876		index = mapping->writeback_index; /* Start from prev offset */
   4877		end = -1;
   4878		/*
   4879		 * Start from the beginning does not need to cycle over the
   4880		 * range, mark it as scanned.
   4881		 */
   4882		scanned = (index == 0);
   4883	} else {
   4884		index = wbc->range_start >> PAGE_SHIFT;
   4885		end = wbc->range_end >> PAGE_SHIFT;
   4886		scanned = 1;
   4887	}
   4888	if (wbc->sync_mode == WB_SYNC_ALL)
   4889		tag = PAGECACHE_TAG_TOWRITE;
   4890	else
   4891		tag = PAGECACHE_TAG_DIRTY;
   4892	btrfs_zoned_meta_io_lock(fs_info);
   4893retry:
   4894	if (wbc->sync_mode == WB_SYNC_ALL)
   4895		tag_pages_for_writeback(mapping, index, end);
   4896	while (!done && !nr_to_write_done && (index <= end) &&
   4897	       (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
   4898			tag))) {
   4899		unsigned i;
   4900
   4901		for (i = 0; i < nr_pages; i++) {
   4902			struct page *page = pvec.pages[i];
   4903
   4904			ret = submit_eb_page(page, wbc, &epd, &eb_context);
   4905			if (ret == 0)
   4906				continue;
   4907			if (ret < 0) {
   4908				done = 1;
   4909				break;
   4910			}
   4911
   4912			/*
   4913			 * the filesystem may choose to bump up nr_to_write.
   4914			 * We have to make sure to honor the new nr_to_write
   4915			 * at any time
   4916			 */
   4917			nr_to_write_done = wbc->nr_to_write <= 0;
   4918		}
   4919		pagevec_release(&pvec);
   4920		cond_resched();
   4921	}
   4922	if (!scanned && !done) {
   4923		/*
   4924		 * We hit the last page and there is more work to be done: wrap
   4925		 * back to the start of the file
   4926		 */
   4927		scanned = 1;
   4928		index = 0;
   4929		goto retry;
   4930	}
   4931	if (ret < 0) {
   4932		end_write_bio(&epd, ret);
   4933		goto out;
   4934	}
   4935	/*
   4936	 * If something went wrong, don't allow any metadata write bio to be
   4937	 * submitted.
   4938	 *
   4939	 * This would prevent use-after-free if we had dirty pages not
   4940	 * cleaned up, which can still happen by fuzzed images.
   4941	 *
   4942	 * - Bad extent tree
   4943	 *   Allowing existing tree block to be allocated for other trees.
   4944	 *
   4945	 * - Log tree operations
   4946	 *   Exiting tree blocks get allocated to log tree, bumps its
   4947	 *   generation, then get cleaned in tree re-balance.
   4948	 *   Such tree block will not be written back, since it's clean,
   4949	 *   thus no WRITTEN flag set.
   4950	 *   And after log writes back, this tree block is not traced by
   4951	 *   any dirty extent_io_tree.
   4952	 *
   4953	 * - Offending tree block gets re-dirtied from its original owner
   4954	 *   Since it has bumped generation, no WRITTEN flag, it can be
   4955	 *   reused without COWing. This tree block will not be traced
   4956	 *   by btrfs_transaction::dirty_pages.
   4957	 *
   4958	 *   Now such dirty tree block will not be cleaned by any dirty
   4959	 *   extent io tree. Thus we don't want to submit such wild eb
   4960	 *   if the fs already has error.
   4961	 */
   4962	if (!BTRFS_FS_ERROR(fs_info)) {
   4963		flush_write_bio(&epd);
   4964	} else {
   4965		ret = -EROFS;
   4966		end_write_bio(&epd, ret);
   4967	}
   4968out:
   4969	btrfs_zoned_meta_io_unlock(fs_info);
   4970	/*
   4971	 * We can get ret > 0 from submit_extent_page() indicating how many ebs
   4972	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
   4973	 */
   4974	if (ret > 0)
   4975		ret = 0;
   4976	return ret;
   4977}
   4978
   4979/**
   4980 * Walk the list of dirty pages of the given address space and write all of them.
   4981 *
   4982 * @mapping: address space structure to write
   4983 * @wbc:     subtract the number of written pages from *@wbc->nr_to_write
   4984 * @epd:     holds context for the write, namely the bio
   4985 *
   4986 * If a page is already under I/O, write_cache_pages() skips it, even
   4987 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
   4988 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
   4989 * and msync() need to guarantee that all the data which was dirty at the time
   4990 * the call was made get new I/O started against them.  If wbc->sync_mode is
   4991 * WB_SYNC_ALL then we were called for data integrity and we must wait for
   4992 * existing IO to complete.
   4993 */
   4994static int extent_write_cache_pages(struct address_space *mapping,
   4995			     struct writeback_control *wbc,
   4996			     struct extent_page_data *epd)
   4997{
   4998	struct inode *inode = mapping->host;
   4999	int ret = 0;
   5000	int done = 0;
   5001	int nr_to_write_done = 0;
   5002	struct pagevec pvec;
   5003	int nr_pages;
   5004	pgoff_t index;
   5005	pgoff_t end;		/* Inclusive */
   5006	pgoff_t done_index;
   5007	int range_whole = 0;
   5008	int scanned = 0;
   5009	xa_mark_t tag;
   5010
   5011	/*
   5012	 * We have to hold onto the inode so that ordered extents can do their
   5013	 * work when the IO finishes.  The alternative to this is failing to add
   5014	 * an ordered extent if the igrab() fails there and that is a huge pain
   5015	 * to deal with, so instead just hold onto the inode throughout the
   5016	 * writepages operation.  If it fails here we are freeing up the inode
   5017	 * anyway and we'd rather not waste our time writing out stuff that is
   5018	 * going to be truncated anyway.
   5019	 */
   5020	if (!igrab(inode))
   5021		return 0;
   5022
   5023	pagevec_init(&pvec);
   5024	if (wbc->range_cyclic) {
   5025		index = mapping->writeback_index; /* Start from prev offset */
   5026		end = -1;
   5027		/*
   5028		 * Start from the beginning does not need to cycle over the
   5029		 * range, mark it as scanned.
   5030		 */
   5031		scanned = (index == 0);
   5032	} else {
   5033		index = wbc->range_start >> PAGE_SHIFT;
   5034		end = wbc->range_end >> PAGE_SHIFT;
   5035		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
   5036			range_whole = 1;
   5037		scanned = 1;
   5038	}
   5039
   5040	/*
   5041	 * We do the tagged writepage as long as the snapshot flush bit is set
   5042	 * and we are the first one who do the filemap_flush() on this inode.
   5043	 *
   5044	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
   5045	 * not race in and drop the bit.
   5046	 */
   5047	if (range_whole && wbc->nr_to_write == LONG_MAX &&
   5048	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
   5049			       &BTRFS_I(inode)->runtime_flags))
   5050		wbc->tagged_writepages = 1;
   5051
   5052	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
   5053		tag = PAGECACHE_TAG_TOWRITE;
   5054	else
   5055		tag = PAGECACHE_TAG_DIRTY;
   5056retry:
   5057	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
   5058		tag_pages_for_writeback(mapping, index, end);
   5059	done_index = index;
   5060	while (!done && !nr_to_write_done && (index <= end) &&
   5061			(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
   5062						&index, end, tag))) {
   5063		unsigned i;
   5064
   5065		for (i = 0; i < nr_pages; i++) {
   5066			struct page *page = pvec.pages[i];
   5067
   5068			done_index = page->index + 1;
   5069			/*
   5070			 * At this point we hold neither the i_pages lock nor
   5071			 * the page lock: the page may be truncated or
   5072			 * invalidated (changing page->mapping to NULL),
   5073			 * or even swizzled back from swapper_space to
   5074			 * tmpfs file mapping
   5075			 */
   5076			if (!trylock_page(page)) {
   5077				flush_write_bio(epd);
   5078				lock_page(page);
   5079			}
   5080
   5081			if (unlikely(page->mapping != mapping)) {
   5082				unlock_page(page);
   5083				continue;
   5084			}
   5085
   5086			if (wbc->sync_mode != WB_SYNC_NONE) {
   5087				if (PageWriteback(page))
   5088					flush_write_bio(epd);
   5089				wait_on_page_writeback(page);
   5090			}
   5091
   5092			if (PageWriteback(page) ||
   5093			    !clear_page_dirty_for_io(page)) {
   5094				unlock_page(page);
   5095				continue;
   5096			}
   5097
   5098			ret = __extent_writepage(page, wbc, epd);
   5099			if (ret < 0) {
   5100				done = 1;
   5101				break;
   5102			}
   5103
   5104			/*
   5105			 * the filesystem may choose to bump up nr_to_write.
   5106			 * We have to make sure to honor the new nr_to_write
   5107			 * at any time
   5108			 */
   5109			nr_to_write_done = wbc->nr_to_write <= 0;
   5110		}
   5111		pagevec_release(&pvec);
   5112		cond_resched();
   5113	}
   5114	if (!scanned && !done) {
   5115		/*
   5116		 * We hit the last page and there is more work to be done: wrap
   5117		 * back to the start of the file
   5118		 */
   5119		scanned = 1;
   5120		index = 0;
   5121
   5122		/*
   5123		 * If we're looping we could run into a page that is locked by a
   5124		 * writer and that writer could be waiting on writeback for a
   5125		 * page in our current bio, and thus deadlock, so flush the
   5126		 * write bio here.
   5127		 */
   5128		flush_write_bio(epd);
   5129		goto retry;
   5130	}
   5131
   5132	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
   5133		mapping->writeback_index = done_index;
   5134
   5135	btrfs_add_delayed_iput(inode);
   5136	return ret;
   5137}
   5138
   5139int extent_write_full_page(struct page *page, struct writeback_control *wbc)
   5140{
   5141	int ret;
   5142	struct extent_page_data epd = {
   5143		.bio_ctrl = { 0 },
   5144		.extent_locked = 0,
   5145		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
   5146	};
   5147
   5148	ret = __extent_writepage(page, wbc, &epd);
   5149	ASSERT(ret <= 0);
   5150	if (ret < 0) {
   5151		end_write_bio(&epd, ret);
   5152		return ret;
   5153	}
   5154
   5155	flush_write_bio(&epd);
   5156	return ret;
   5157}
   5158
   5159/*
   5160 * Submit the pages in the range to bio for call sites which delalloc range has
   5161 * already been ran (aka, ordered extent inserted) and all pages are still
   5162 * locked.
   5163 */
   5164int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
   5165{
   5166	bool found_error = false;
   5167	int first_error = 0;
   5168	int ret = 0;
   5169	struct address_space *mapping = inode->i_mapping;
   5170	struct page *page;
   5171	u64 cur = start;
   5172	unsigned long nr_pages;
   5173	const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
   5174	struct extent_page_data epd = {
   5175		.bio_ctrl = { 0 },
   5176		.extent_locked = 1,
   5177		.sync_io = 1,
   5178	};
   5179	struct writeback_control wbc_writepages = {
   5180		.sync_mode	= WB_SYNC_ALL,
   5181		.range_start	= start,
   5182		.range_end	= end + 1,
   5183		/* We're called from an async helper function */
   5184		.punt_to_cgroup	= 1,
   5185		.no_cgroup_owner = 1,
   5186	};
   5187
   5188	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
   5189	nr_pages = (round_up(end, PAGE_SIZE) - round_down(start, PAGE_SIZE)) >>
   5190		   PAGE_SHIFT;
   5191	wbc_writepages.nr_to_write = nr_pages * 2;
   5192
   5193	wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
   5194	while (cur <= end) {
   5195		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
   5196
   5197		page = find_get_page(mapping, cur >> PAGE_SHIFT);
   5198		/*
   5199		 * All pages in the range are locked since
   5200		 * btrfs_run_delalloc_range(), thus there is no way to clear
   5201		 * the page dirty flag.
   5202		 */
   5203		ASSERT(PageLocked(page));
   5204		ASSERT(PageDirty(page));
   5205		clear_page_dirty_for_io(page);
   5206		ret = __extent_writepage(page, &wbc_writepages, &epd);
   5207		ASSERT(ret <= 0);
   5208		if (ret < 0) {
   5209			found_error = true;
   5210			first_error = ret;
   5211		}
   5212		put_page(page);
   5213		cur = cur_end + 1;
   5214	}
   5215
   5216	if (!found_error)
   5217		flush_write_bio(&epd);
   5218	else
   5219		end_write_bio(&epd, ret);
   5220
   5221	wbc_detach_inode(&wbc_writepages);
   5222	if (found_error)
   5223		return first_error;
   5224	return ret;
   5225}
   5226
   5227int extent_writepages(struct address_space *mapping,
   5228		      struct writeback_control *wbc)
   5229{
   5230	struct inode *inode = mapping->host;
   5231	int ret = 0;
   5232	struct extent_page_data epd = {
   5233		.bio_ctrl = { 0 },
   5234		.extent_locked = 0,
   5235		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
   5236	};
   5237
   5238	/*
   5239	 * Allow only a single thread to do the reloc work in zoned mode to
   5240	 * protect the write pointer updates.
   5241	 */
   5242	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
   5243	ret = extent_write_cache_pages(mapping, wbc, &epd);
   5244	ASSERT(ret <= 0);
   5245	if (ret < 0) {
   5246		btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
   5247		end_write_bio(&epd, ret);
   5248		return ret;
   5249	}
   5250	flush_write_bio(&epd);
   5251	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
   5252	return ret;
   5253}
   5254
   5255void extent_readahead(struct readahead_control *rac)
   5256{
   5257	struct btrfs_bio_ctrl bio_ctrl = { 0 };
   5258	struct page *pagepool[16];
   5259	struct extent_map *em_cached = NULL;
   5260	u64 prev_em_start = (u64)-1;
   5261	int nr;
   5262
   5263	while ((nr = readahead_page_batch(rac, pagepool))) {
   5264		u64 contig_start = readahead_pos(rac);
   5265		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
   5266
   5267		contiguous_readpages(pagepool, nr, contig_start, contig_end,
   5268				&em_cached, &bio_ctrl, &prev_em_start);
   5269	}
   5270
   5271	if (em_cached)
   5272		free_extent_map(em_cached);
   5273
   5274	if (bio_ctrl.bio)
   5275		submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.compress_type);
   5276}
   5277
   5278/*
   5279 * basic invalidate_folio code, this waits on any locked or writeback
   5280 * ranges corresponding to the folio, and then deletes any extent state
   5281 * records from the tree
   5282 */
   5283int extent_invalidate_folio(struct extent_io_tree *tree,
   5284			  struct folio *folio, size_t offset)
   5285{
   5286	struct extent_state *cached_state = NULL;
   5287	u64 start = folio_pos(folio);
   5288	u64 end = start + folio_size(folio) - 1;
   5289	size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
   5290
   5291	/* This function is only called for the btree inode */
   5292	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
   5293
   5294	start += ALIGN(offset, blocksize);
   5295	if (start > end)
   5296		return 0;
   5297
   5298	lock_extent_bits(tree, start, end, &cached_state);
   5299	folio_wait_writeback(folio);
   5300
   5301	/*
   5302	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
   5303	 * so here we only need to unlock the extent range to free any
   5304	 * existing extent state.
   5305	 */
   5306	unlock_extent_cached(tree, start, end, &cached_state);
   5307	return 0;
   5308}
   5309
   5310/*
   5311 * a helper for release_folio, this tests for areas of the page that
   5312 * are locked or under IO and drops the related state bits if it is safe
   5313 * to drop the page.
   5314 */
   5315static int try_release_extent_state(struct extent_io_tree *tree,
   5316				    struct page *page, gfp_t mask)
   5317{
   5318	u64 start = page_offset(page);
   5319	u64 end = start + PAGE_SIZE - 1;
   5320	int ret = 1;
   5321
   5322	if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
   5323		ret = 0;
   5324	} else {
   5325		/*
   5326		 * At this point we can safely clear everything except the
   5327		 * locked bit, the nodatasum bit and the delalloc new bit.
   5328		 * The delalloc new bit will be cleared by ordered extent
   5329		 * completion.
   5330		 */
   5331		ret = __clear_extent_bit(tree, start, end,
   5332			 ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
   5333			 0, 0, NULL, mask, NULL);
   5334
   5335		/* if clear_extent_bit failed for enomem reasons,
   5336		 * we can't allow the release to continue.
   5337		 */
   5338		if (ret < 0)
   5339			ret = 0;
   5340		else
   5341			ret = 1;
   5342	}
   5343	return ret;
   5344}
   5345
   5346/*
   5347 * a helper for release_folio.  As long as there are no locked extents
   5348 * in the range corresponding to the page, both state records and extent
   5349 * map records are removed
   5350 */
   5351int try_release_extent_mapping(struct page *page, gfp_t mask)
   5352{
   5353	struct extent_map *em;
   5354	u64 start = page_offset(page);
   5355	u64 end = start + PAGE_SIZE - 1;
   5356	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
   5357	struct extent_io_tree *tree = &btrfs_inode->io_tree;
   5358	struct extent_map_tree *map = &btrfs_inode->extent_tree;
   5359
   5360	if (gfpflags_allow_blocking(mask) &&
   5361	    page->mapping->host->i_size > SZ_16M) {
   5362		u64 len;
   5363		while (start <= end) {
   5364			struct btrfs_fs_info *fs_info;
   5365			u64 cur_gen;
   5366
   5367			len = end - start + 1;
   5368			write_lock(&map->lock);
   5369			em = lookup_extent_mapping(map, start, len);
   5370			if (!em) {
   5371				write_unlock(&map->lock);
   5372				break;
   5373			}
   5374			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
   5375			    em->start != start) {
   5376				write_unlock(&map->lock);
   5377				free_extent_map(em);
   5378				break;
   5379			}
   5380			if (test_range_bit(tree, em->start,
   5381					   extent_map_end(em) - 1,
   5382					   EXTENT_LOCKED, 0, NULL))
   5383				goto next;
   5384			/*
   5385			 * If it's not in the list of modified extents, used
   5386			 * by a fast fsync, we can remove it. If it's being
   5387			 * logged we can safely remove it since fsync took an
   5388			 * extra reference on the em.
   5389			 */
   5390			if (list_empty(&em->list) ||
   5391			    test_bit(EXTENT_FLAG_LOGGING, &em->flags))
   5392				goto remove_em;
   5393			/*
   5394			 * If it's in the list of modified extents, remove it
   5395			 * only if its generation is older then the current one,
   5396			 * in which case we don't need it for a fast fsync.
   5397			 * Otherwise don't remove it, we could be racing with an
   5398			 * ongoing fast fsync that could miss the new extent.
   5399			 */
   5400			fs_info = btrfs_inode->root->fs_info;
   5401			spin_lock(&fs_info->trans_lock);
   5402			cur_gen = fs_info->generation;
   5403			spin_unlock(&fs_info->trans_lock);
   5404			if (em->generation >= cur_gen)
   5405				goto next;
   5406remove_em:
   5407			/*
   5408			 * We only remove extent maps that are not in the list of
   5409			 * modified extents or that are in the list but with a
   5410			 * generation lower then the current generation, so there
   5411			 * is no need to set the full fsync flag on the inode (it
   5412			 * hurts the fsync performance for workloads with a data
   5413			 * size that exceeds or is close to the system's memory).
   5414			 */
   5415			remove_extent_mapping(map, em);
   5416			/* once for the rb tree */
   5417			free_extent_map(em);
   5418next:
   5419			start = extent_map_end(em);
   5420			write_unlock(&map->lock);
   5421
   5422			/* once for us */
   5423			free_extent_map(em);
   5424
   5425			cond_resched(); /* Allow large-extent preemption. */
   5426		}
   5427	}
   5428	return try_release_extent_state(tree, page, mask);
   5429}
   5430
   5431/*
   5432 * helper function for fiemap, which doesn't want to see any holes.
   5433 * This maps until we find something past 'last'
   5434 */
   5435static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
   5436						u64 offset, u64 last)
   5437{
   5438	u64 sectorsize = btrfs_inode_sectorsize(inode);
   5439	struct extent_map *em;
   5440	u64 len;
   5441
   5442	if (offset >= last)
   5443		return NULL;
   5444
   5445	while (1) {
   5446		len = last - offset;
   5447		if (len == 0)
   5448			break;
   5449		len = ALIGN(len, sectorsize);
   5450		em = btrfs_get_extent_fiemap(inode, offset, len);
   5451		if (IS_ERR(em))
   5452			return em;
   5453
   5454		/* if this isn't a hole return it */
   5455		if (em->block_start != EXTENT_MAP_HOLE)
   5456			return em;
   5457
   5458		/* this is a hole, advance to the next extent */
   5459		offset = extent_map_end(em);
   5460		free_extent_map(em);
   5461		if (offset >= last)
   5462			break;
   5463	}
   5464	return NULL;
   5465}
   5466
   5467/*
   5468 * To cache previous fiemap extent
   5469 *
   5470 * Will be used for merging fiemap extent
   5471 */
   5472struct fiemap_cache {
   5473	u64 offset;
   5474	u64 phys;
   5475	u64 len;
   5476	u32 flags;
   5477	bool cached;
   5478};
   5479
   5480/*
   5481 * Helper to submit fiemap extent.
   5482 *
   5483 * Will try to merge current fiemap extent specified by @offset, @phys,
   5484 * @len and @flags with cached one.
   5485 * And only when we fails to merge, cached one will be submitted as
   5486 * fiemap extent.
   5487 *
   5488 * Return value is the same as fiemap_fill_next_extent().
   5489 */
   5490static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
   5491				struct fiemap_cache *cache,
   5492				u64 offset, u64 phys, u64 len, u32 flags)
   5493{
   5494	int ret = 0;
   5495
   5496	if (!cache->cached)
   5497		goto assign;
   5498
   5499	/*
   5500	 * Sanity check, extent_fiemap() should have ensured that new
   5501	 * fiemap extent won't overlap with cached one.
   5502	 * Not recoverable.
   5503	 *
   5504	 * NOTE: Physical address can overlap, due to compression
   5505	 */
   5506	if (cache->offset + cache->len > offset) {
   5507		WARN_ON(1);
   5508		return -EINVAL;
   5509	}
   5510
   5511	/*
   5512	 * Only merges fiemap extents if
   5513	 * 1) Their logical addresses are continuous
   5514	 *
   5515	 * 2) Their physical addresses are continuous
   5516	 *    So truly compressed (physical size smaller than logical size)
   5517	 *    extents won't get merged with each other
   5518	 *
   5519	 * 3) Share same flags except FIEMAP_EXTENT_LAST
   5520	 *    So regular extent won't get merged with prealloc extent
   5521	 */
   5522	if (cache->offset + cache->len  == offset &&
   5523	    cache->phys + cache->len == phys  &&
   5524	    (cache->flags & ~FIEMAP_EXTENT_LAST) ==
   5525			(flags & ~FIEMAP_EXTENT_LAST)) {
   5526		cache->len += len;
   5527		cache->flags |= flags;
   5528		goto try_submit_last;
   5529	}
   5530
   5531	/* Not mergeable, need to submit cached one */
   5532	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
   5533				      cache->len, cache->flags);
   5534	cache->cached = false;
   5535	if (ret)
   5536		return ret;
   5537assign:
   5538	cache->cached = true;
   5539	cache->offset = offset;
   5540	cache->phys = phys;
   5541	cache->len = len;
   5542	cache->flags = flags;
   5543try_submit_last:
   5544	if (cache->flags & FIEMAP_EXTENT_LAST) {
   5545		ret = fiemap_fill_next_extent(fieinfo, cache->offset,
   5546				cache->phys, cache->len, cache->flags);
   5547		cache->cached = false;
   5548	}
   5549	return ret;
   5550}
   5551
   5552/*
   5553 * Emit last fiemap cache
   5554 *
   5555 * The last fiemap cache may still be cached in the following case:
   5556 * 0		      4k		    8k
   5557 * |<- Fiemap range ->|
   5558 * |<------------  First extent ----------->|
   5559 *
   5560 * In this case, the first extent range will be cached but not emitted.
   5561 * So we must emit it before ending extent_fiemap().
   5562 */
   5563static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
   5564				  struct fiemap_cache *cache)
   5565{
   5566	int ret;
   5567
   5568	if (!cache->cached)
   5569		return 0;
   5570
   5571	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
   5572				      cache->len, cache->flags);
   5573	cache->cached = false;
   5574	if (ret > 0)
   5575		ret = 0;
   5576	return ret;
   5577}
   5578
   5579int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
   5580		  u64 start, u64 len)
   5581{
   5582	int ret = 0;
   5583	u64 off;
   5584	u64 max = start + len;
   5585	u32 flags = 0;
   5586	u32 found_type;
   5587	u64 last;
   5588	u64 last_for_get_extent = 0;
   5589	u64 disko = 0;
   5590	u64 isize = i_size_read(&inode->vfs_inode);
   5591	struct btrfs_key found_key;
   5592	struct extent_map *em = NULL;
   5593	struct extent_state *cached_state = NULL;
   5594	struct btrfs_path *path;
   5595	struct btrfs_root *root = inode->root;
   5596	struct fiemap_cache cache = { 0 };
   5597	struct ulist *roots;
   5598	struct ulist *tmp_ulist;
   5599	int end = 0;
   5600	u64 em_start = 0;
   5601	u64 em_len = 0;
   5602	u64 em_end = 0;
   5603
   5604	if (len == 0)
   5605		return -EINVAL;
   5606
   5607	path = btrfs_alloc_path();
   5608	if (!path)
   5609		return -ENOMEM;
   5610
   5611	roots = ulist_alloc(GFP_KERNEL);
   5612	tmp_ulist = ulist_alloc(GFP_KERNEL);
   5613	if (!roots || !tmp_ulist) {
   5614		ret = -ENOMEM;
   5615		goto out_free_ulist;
   5616	}
   5617
   5618	/*
   5619	 * We can't initialize that to 'start' as this could miss extents due
   5620	 * to extent item merging
   5621	 */
   5622	off = 0;
   5623	start = round_down(start, btrfs_inode_sectorsize(inode));
   5624	len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
   5625
   5626	/*
   5627	 * lookup the last file extent.  We're not using i_size here
   5628	 * because there might be preallocation past i_size
   5629	 */
   5630	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
   5631				       0);
   5632	if (ret < 0) {
   5633		goto out_free_ulist;
   5634	} else {
   5635		WARN_ON(!ret);
   5636		if (ret == 1)
   5637			ret = 0;
   5638	}
   5639
   5640	path->slots[0]--;
   5641	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
   5642	found_type = found_key.type;
   5643
   5644	/* No extents, but there might be delalloc bits */
   5645	if (found_key.objectid != btrfs_ino(inode) ||
   5646	    found_type != BTRFS_EXTENT_DATA_KEY) {
   5647		/* have to trust i_size as the end */
   5648		last = (u64)-1;
   5649		last_for_get_extent = isize;
   5650	} else {
   5651		/*
   5652		 * remember the start of the last extent.  There are a
   5653		 * bunch of different factors that go into the length of the
   5654		 * extent, so its much less complex to remember where it started
   5655		 */
   5656		last = found_key.offset;
   5657		last_for_get_extent = last + 1;
   5658	}
   5659	btrfs_release_path(path);
   5660
   5661	/*
   5662	 * we might have some extents allocated but more delalloc past those
   5663	 * extents.  so, we trust isize unless the start of the last extent is
   5664	 * beyond isize
   5665	 */
   5666	if (last < isize) {
   5667		last = (u64)-1;
   5668		last_for_get_extent = isize;
   5669	}
   5670
   5671	lock_extent_bits(&inode->io_tree, start, start + len - 1,
   5672			 &cached_state);
   5673
   5674	em = get_extent_skip_holes(inode, start, last_for_get_extent);
   5675	if (!em)
   5676		goto out;
   5677	if (IS_ERR(em)) {
   5678		ret = PTR_ERR(em);
   5679		goto out;
   5680	}
   5681
   5682	while (!end) {
   5683		u64 offset_in_extent = 0;
   5684
   5685		/* break if the extent we found is outside the range */
   5686		if (em->start >= max || extent_map_end(em) < off)
   5687			break;
   5688
   5689		/*
   5690		 * get_extent may return an extent that starts before our
   5691		 * requested range.  We have to make sure the ranges
   5692		 * we return to fiemap always move forward and don't
   5693		 * overlap, so adjust the offsets here
   5694		 */
   5695		em_start = max(em->start, off);
   5696
   5697		/*
   5698		 * record the offset from the start of the extent
   5699		 * for adjusting the disk offset below.  Only do this if the
   5700		 * extent isn't compressed since our in ram offset may be past
   5701		 * what we have actually allocated on disk.
   5702		 */
   5703		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
   5704			offset_in_extent = em_start - em->start;
   5705		em_end = extent_map_end(em);
   5706		em_len = em_end - em_start;
   5707		flags = 0;
   5708		if (em->block_start < EXTENT_MAP_LAST_BYTE)
   5709			disko = em->block_start + offset_in_extent;
   5710		else
   5711			disko = 0;
   5712
   5713		/*
   5714		 * bump off for our next call to get_extent
   5715		 */
   5716		off = extent_map_end(em);
   5717		if (off >= max)
   5718			end = 1;
   5719
   5720		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
   5721			end = 1;
   5722			flags |= FIEMAP_EXTENT_LAST;
   5723		} else if (em->block_start == EXTENT_MAP_INLINE) {
   5724			flags |= (FIEMAP_EXTENT_DATA_INLINE |
   5725				  FIEMAP_EXTENT_NOT_ALIGNED);
   5726		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
   5727			flags |= (FIEMAP_EXTENT_DELALLOC |
   5728				  FIEMAP_EXTENT_UNKNOWN);
   5729		} else if (fieinfo->fi_extents_max) {
   5730			u64 bytenr = em->block_start -
   5731				(em->start - em->orig_start);
   5732
   5733			/*
   5734			 * As btrfs supports shared space, this information
   5735			 * can be exported to userspace tools via
   5736			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
   5737			 * then we're just getting a count and we can skip the
   5738			 * lookup stuff.
   5739			 */
   5740			ret = btrfs_check_shared(root, btrfs_ino(inode),
   5741						 bytenr, roots, tmp_ulist);
   5742			if (ret < 0)
   5743				goto out_free;
   5744			if (ret)
   5745				flags |= FIEMAP_EXTENT_SHARED;
   5746			ret = 0;
   5747		}
   5748		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
   5749			flags |= FIEMAP_EXTENT_ENCODED;
   5750		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
   5751			flags |= FIEMAP_EXTENT_UNWRITTEN;
   5752
   5753		free_extent_map(em);
   5754		em = NULL;
   5755		if ((em_start >= last) || em_len == (u64)-1 ||
   5756		   (last == (u64)-1 && isize <= em_end)) {
   5757			flags |= FIEMAP_EXTENT_LAST;
   5758			end = 1;
   5759		}
   5760
   5761		/* now scan forward to see if this is really the last extent. */
   5762		em = get_extent_skip_holes(inode, off, last_for_get_extent);
   5763		if (IS_ERR(em)) {
   5764			ret = PTR_ERR(em);
   5765			goto out;
   5766		}
   5767		if (!em) {
   5768			flags |= FIEMAP_EXTENT_LAST;
   5769			end = 1;
   5770		}
   5771		ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
   5772					   em_len, flags);
   5773		if (ret) {
   5774			if (ret == 1)
   5775				ret = 0;
   5776			goto out_free;
   5777		}
   5778	}
   5779out_free:
   5780	if (!ret)
   5781		ret = emit_last_fiemap_cache(fieinfo, &cache);
   5782	free_extent_map(em);
   5783out:
   5784	unlock_extent_cached(&inode->io_tree, start, start + len - 1,
   5785			     &cached_state);
   5786
   5787out_free_ulist:
   5788	btrfs_free_path(path);
   5789	ulist_free(roots);
   5790	ulist_free(tmp_ulist);
   5791	return ret;
   5792}
   5793
   5794static void __free_extent_buffer(struct extent_buffer *eb)
   5795{
   5796	kmem_cache_free(extent_buffer_cache, eb);
   5797}
   5798
   5799int extent_buffer_under_io(const struct extent_buffer *eb)
   5800{
   5801	return (atomic_read(&eb->io_pages) ||
   5802		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
   5803		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
   5804}
   5805
   5806static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
   5807{
   5808	struct btrfs_subpage *subpage;
   5809
   5810	lockdep_assert_held(&page->mapping->private_lock);
   5811
   5812	if (PagePrivate(page)) {
   5813		subpage = (struct btrfs_subpage *)page->private;
   5814		if (atomic_read(&subpage->eb_refs))
   5815			return true;
   5816		/*
   5817		 * Even there is no eb refs here, we may still have
   5818		 * end_page_read() call relying on page::private.
   5819		 */
   5820		if (atomic_read(&subpage->readers))
   5821			return true;
   5822	}
   5823	return false;
   5824}
   5825
   5826static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
   5827{
   5828	struct btrfs_fs_info *fs_info = eb->fs_info;
   5829	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
   5830
   5831	/*
   5832	 * For mapped eb, we're going to change the page private, which should
   5833	 * be done under the private_lock.
   5834	 */
   5835	if (mapped)
   5836		spin_lock(&page->mapping->private_lock);
   5837
   5838	if (!PagePrivate(page)) {
   5839		if (mapped)
   5840			spin_unlock(&page->mapping->private_lock);
   5841		return;
   5842	}
   5843
   5844	if (fs_info->nodesize >= PAGE_SIZE) {
   5845		/*
   5846		 * We do this since we'll remove the pages after we've
   5847		 * removed the eb from the radix tree, so we could race
   5848		 * and have this page now attached to the new eb.  So
   5849		 * only clear page_private if it's still connected to
   5850		 * this eb.
   5851		 */
   5852		if (PagePrivate(page) &&
   5853		    page->private == (unsigned long)eb) {
   5854			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
   5855			BUG_ON(PageDirty(page));
   5856			BUG_ON(PageWriteback(page));
   5857			/*
   5858			 * We need to make sure we haven't be attached
   5859			 * to a new eb.
   5860			 */
   5861			detach_page_private(page);
   5862		}
   5863		if (mapped)
   5864			spin_unlock(&page->mapping->private_lock);
   5865		return;
   5866	}
   5867
   5868	/*
   5869	 * For subpage, we can have dummy eb with page private.  In this case,
   5870	 * we can directly detach the private as such page is only attached to
   5871	 * one dummy eb, no sharing.
   5872	 */
   5873	if (!mapped) {
   5874		btrfs_detach_subpage(fs_info, page);
   5875		return;
   5876	}
   5877
   5878	btrfs_page_dec_eb_refs(fs_info, page);
   5879
   5880	/*
   5881	 * We can only detach the page private if there are no other ebs in the
   5882	 * page range and no unfinished IO.
   5883	 */
   5884	if (!page_range_has_eb(fs_info, page))
   5885		btrfs_detach_subpage(fs_info, page);
   5886
   5887	spin_unlock(&page->mapping->private_lock);
   5888}
   5889
   5890/* Release all pages attached to the extent buffer */
   5891static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
   5892{
   5893	int i;
   5894	int num_pages;
   5895
   5896	ASSERT(!extent_buffer_under_io(eb));
   5897
   5898	num_pages = num_extent_pages(eb);
   5899	for (i = 0; i < num_pages; i++) {
   5900		struct page *page = eb->pages[i];
   5901
   5902		if (!page)
   5903			continue;
   5904
   5905		detach_extent_buffer_page(eb, page);
   5906
   5907		/* One for when we allocated the page */
   5908		put_page(page);
   5909	}
   5910}
   5911
   5912/*
   5913 * Helper for releasing the extent buffer.
   5914 */
   5915static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
   5916{
   5917	btrfs_release_extent_buffer_pages(eb);
   5918	btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
   5919	__free_extent_buffer(eb);
   5920}
   5921
   5922static struct extent_buffer *
   5923__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
   5924		      unsigned long len)
   5925{
   5926	struct extent_buffer *eb = NULL;
   5927
   5928	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
   5929	eb->start = start;
   5930	eb->len = len;
   5931	eb->fs_info = fs_info;
   5932	eb->bflags = 0;
   5933	init_rwsem(&eb->lock);
   5934
   5935	btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
   5936			     &fs_info->allocated_ebs);
   5937	INIT_LIST_HEAD(&eb->release_list);
   5938
   5939	spin_lock_init(&eb->refs_lock);
   5940	atomic_set(&eb->refs, 1);
   5941	atomic_set(&eb->io_pages, 0);
   5942
   5943	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
   5944
   5945	return eb;
   5946}
   5947
   5948struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
   5949{
   5950	int i;
   5951	struct extent_buffer *new;
   5952	int num_pages = num_extent_pages(src);
   5953	int ret;
   5954
   5955	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
   5956	if (new == NULL)
   5957		return NULL;
   5958
   5959	/*
   5960	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
   5961	 * btrfs_release_extent_buffer() have different behavior for
   5962	 * UNMAPPED subpage extent buffer.
   5963	 */
   5964	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
   5965
   5966	memset(new->pages, 0, sizeof(*new->pages) * num_pages);
   5967	ret = btrfs_alloc_page_array(num_pages, new->pages);
   5968	if (ret) {
   5969		btrfs_release_extent_buffer(new);
   5970		return NULL;
   5971	}
   5972
   5973	for (i = 0; i < num_pages; i++) {
   5974		int ret;
   5975		struct page *p = new->pages[i];
   5976
   5977		ret = attach_extent_buffer_page(new, p, NULL);
   5978		if (ret < 0) {
   5979			btrfs_release_extent_buffer(new);
   5980			return NULL;
   5981		}
   5982		WARN_ON(PageDirty(p));
   5983		copy_page(page_address(p), page_address(src->pages[i]));
   5984	}
   5985	set_extent_buffer_uptodate(new);
   5986
   5987	return new;
   5988}
   5989
   5990struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
   5991						  u64 start, unsigned long len)
   5992{
   5993	struct extent_buffer *eb;
   5994	int num_pages;
   5995	int i;
   5996	int ret;
   5997
   5998	eb = __alloc_extent_buffer(fs_info, start, len);
   5999	if (!eb)
   6000		return NULL;
   6001
   6002	num_pages = num_extent_pages(eb);
   6003	ret = btrfs_alloc_page_array(num_pages, eb->pages);
   6004	if (ret)
   6005		goto err;
   6006
   6007	for (i = 0; i < num_pages; i++) {
   6008		struct page *p = eb->pages[i];
   6009
   6010		ret = attach_extent_buffer_page(eb, p, NULL);
   6011		if (ret < 0)
   6012			goto err;
   6013	}
   6014
   6015	set_extent_buffer_uptodate(eb);
   6016	btrfs_set_header_nritems(eb, 0);
   6017	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
   6018
   6019	return eb;
   6020err:
   6021	for (i = 0; i < num_pages; i++) {
   6022		if (eb->pages[i]) {
   6023			detach_extent_buffer_page(eb, eb->pages[i]);
   6024			__free_page(eb->pages[i]);
   6025		}
   6026	}
   6027	__free_extent_buffer(eb);
   6028	return NULL;
   6029}
   6030
   6031struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
   6032						u64 start)
   6033{
   6034	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
   6035}
   6036
   6037static void check_buffer_tree_ref(struct extent_buffer *eb)
   6038{
   6039	int refs;
   6040	/*
   6041	 * The TREE_REF bit is first set when the extent_buffer is added
   6042	 * to the radix tree. It is also reset, if unset, when a new reference
   6043	 * is created by find_extent_buffer.
   6044	 *
   6045	 * It is only cleared in two cases: freeing the last non-tree
   6046	 * reference to the extent_buffer when its STALE bit is set or
   6047	 * calling release_folio when the tree reference is the only reference.
   6048	 *
   6049	 * In both cases, care is taken to ensure that the extent_buffer's
   6050	 * pages are not under io. However, release_folio can be concurrently
   6051	 * called with creating new references, which is prone to race
   6052	 * conditions between the calls to check_buffer_tree_ref in those
   6053	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
   6054	 *
   6055	 * The actual lifetime of the extent_buffer in the radix tree is
   6056	 * adequately protected by the refcount, but the TREE_REF bit and
   6057	 * its corresponding reference are not. To protect against this
   6058	 * class of races, we call check_buffer_tree_ref from the codepaths
   6059	 * which trigger io after they set eb->io_pages. Note that once io is
   6060	 * initiated, TREE_REF can no longer be cleared, so that is the
   6061	 * moment at which any such race is best fixed.
   6062	 */
   6063	refs = atomic_read(&eb->refs);
   6064	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
   6065		return;
   6066
   6067	spin_lock(&eb->refs_lock);
   6068	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
   6069		atomic_inc(&eb->refs);
   6070	spin_unlock(&eb->refs_lock);
   6071}
   6072
   6073static void mark_extent_buffer_accessed(struct extent_buffer *eb,
   6074		struct page *accessed)
   6075{
   6076	int num_pages, i;
   6077
   6078	check_buffer_tree_ref(eb);
   6079
   6080	num_pages = num_extent_pages(eb);
   6081	for (i = 0; i < num_pages; i++) {
   6082		struct page *p = eb->pages[i];
   6083
   6084		if (p != accessed)
   6085			mark_page_accessed(p);
   6086	}
   6087}
   6088
   6089struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
   6090					 u64 start)
   6091{
   6092	struct extent_buffer *eb;
   6093
   6094	eb = find_extent_buffer_nolock(fs_info, start);
   6095	if (!eb)
   6096		return NULL;
   6097	/*
   6098	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
   6099	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
   6100	 * another task running free_extent_buffer() might have seen that flag
   6101	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
   6102	 * writeback flags not set) and it's still in the tree (flag
   6103	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
   6104	 * decrementing the extent buffer's reference count twice.  So here we
   6105	 * could race and increment the eb's reference count, clear its stale
   6106	 * flag, mark it as dirty and drop our reference before the other task
   6107	 * finishes executing free_extent_buffer, which would later result in
   6108	 * an attempt to free an extent buffer that is dirty.
   6109	 */
   6110	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
   6111		spin_lock(&eb->refs_lock);
   6112		spin_unlock(&eb->refs_lock);
   6113	}
   6114	mark_extent_buffer_accessed(eb, NULL);
   6115	return eb;
   6116}
   6117
   6118#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
   6119struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
   6120					u64 start)
   6121{
   6122	struct extent_buffer *eb, *exists = NULL;
   6123	int ret;
   6124
   6125	eb = find_extent_buffer(fs_info, start);
   6126	if (eb)
   6127		return eb;
   6128	eb = alloc_dummy_extent_buffer(fs_info, start);
   6129	if (!eb)
   6130		return ERR_PTR(-ENOMEM);
   6131	eb->fs_info = fs_info;
   6132
   6133	do {
   6134		ret = xa_insert(&fs_info->extent_buffers,
   6135				start >> fs_info->sectorsize_bits,
   6136				eb, GFP_NOFS);
   6137		if (ret == -ENOMEM) {
   6138			exists = ERR_PTR(ret);
   6139			goto free_eb;
   6140		}
   6141		if (ret == -EBUSY) {
   6142			exists = find_extent_buffer(fs_info, start);
   6143			if (exists)
   6144				goto free_eb;
   6145		}
   6146	} while (ret);
   6147
   6148	check_buffer_tree_ref(eb);
   6149	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
   6150
   6151	return eb;
   6152free_eb:
   6153	btrfs_release_extent_buffer(eb);
   6154	return exists;
   6155}
   6156#endif
   6157
   6158static struct extent_buffer *grab_extent_buffer(
   6159		struct btrfs_fs_info *fs_info, struct page *page)
   6160{
   6161	struct extent_buffer *exists;
   6162
   6163	/*
   6164	 * For subpage case, we completely rely on radix tree to ensure we
   6165	 * don't try to insert two ebs for the same bytenr.  So here we always
   6166	 * return NULL and just continue.
   6167	 */
   6168	if (fs_info->nodesize < PAGE_SIZE)
   6169		return NULL;
   6170
   6171	/* Page not yet attached to an extent buffer */
   6172	if (!PagePrivate(page))
   6173		return NULL;
   6174
   6175	/*
   6176	 * We could have already allocated an eb for this page and attached one
   6177	 * so lets see if we can get a ref on the existing eb, and if we can we
   6178	 * know it's good and we can just return that one, else we know we can
   6179	 * just overwrite page->private.
   6180	 */
   6181	exists = (struct extent_buffer *)page->private;
   6182	if (atomic_inc_not_zero(&exists->refs))
   6183		return exists;
   6184
   6185	WARN_ON(PageDirty(page));
   6186	detach_page_private(page);
   6187	return NULL;
   6188}
   6189
   6190static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
   6191{
   6192	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
   6193		btrfs_err(fs_info, "bad tree block start %llu", start);
   6194		return -EINVAL;
   6195	}
   6196
   6197	if (fs_info->nodesize < PAGE_SIZE &&
   6198	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
   6199		btrfs_err(fs_info,
   6200		"tree block crosses page boundary, start %llu nodesize %u",
   6201			  start, fs_info->nodesize);
   6202		return -EINVAL;
   6203	}
   6204	if (fs_info->nodesize >= PAGE_SIZE &&
   6205	    !IS_ALIGNED(start, PAGE_SIZE)) {
   6206		btrfs_err(fs_info,
   6207		"tree block is not page aligned, start %llu nodesize %u",
   6208			  start, fs_info->nodesize);
   6209		return -EINVAL;
   6210	}
   6211	return 0;
   6212}
   6213
   6214struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
   6215					  u64 start, u64 owner_root, int level)
   6216{
   6217	unsigned long len = fs_info->nodesize;
   6218	int num_pages;
   6219	int i;
   6220	unsigned long index = start >> PAGE_SHIFT;
   6221	struct extent_buffer *eb;
   6222	struct extent_buffer *exists = NULL;
   6223	struct page *p;
   6224	struct address_space *mapping = fs_info->btree_inode->i_mapping;
   6225	int uptodate = 1;
   6226	int ret;
   6227
   6228	if (check_eb_alignment(fs_info, start))
   6229		return ERR_PTR(-EINVAL);
   6230
   6231#if BITS_PER_LONG == 32
   6232	if (start >= MAX_LFS_FILESIZE) {
   6233		btrfs_err_rl(fs_info,
   6234		"extent buffer %llu is beyond 32bit page cache limit", start);
   6235		btrfs_err_32bit_limit(fs_info);
   6236		return ERR_PTR(-EOVERFLOW);
   6237	}
   6238	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
   6239		btrfs_warn_32bit_limit(fs_info);
   6240#endif
   6241
   6242	eb = find_extent_buffer(fs_info, start);
   6243	if (eb)
   6244		return eb;
   6245
   6246	eb = __alloc_extent_buffer(fs_info, start, len);
   6247	if (!eb)
   6248		return ERR_PTR(-ENOMEM);
   6249	btrfs_set_buffer_lockdep_class(owner_root, eb, level);
   6250
   6251	num_pages = num_extent_pages(eb);
   6252	for (i = 0; i < num_pages; i++, index++) {
   6253		struct btrfs_subpage *prealloc = NULL;
   6254
   6255		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
   6256		if (!p) {
   6257			exists = ERR_PTR(-ENOMEM);
   6258			goto free_eb;
   6259		}
   6260
   6261		/*
   6262		 * Preallocate page->private for subpage case, so that we won't
   6263		 * allocate memory with private_lock hold.  The memory will be
   6264		 * freed by attach_extent_buffer_page() or freed manually if
   6265		 * we exit earlier.
   6266		 *
   6267		 * Although we have ensured one subpage eb can only have one
   6268		 * page, but it may change in the future for 16K page size
   6269		 * support, so we still preallocate the memory in the loop.
   6270		 */
   6271		if (fs_info->nodesize < PAGE_SIZE) {
   6272			prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
   6273			if (IS_ERR(prealloc)) {
   6274				ret = PTR_ERR(prealloc);
   6275				unlock_page(p);
   6276				put_page(p);
   6277				exists = ERR_PTR(ret);
   6278				goto free_eb;
   6279			}
   6280		}
   6281
   6282		spin_lock(&mapping->private_lock);
   6283		exists = grab_extent_buffer(fs_info, p);
   6284		if (exists) {
   6285			spin_unlock(&mapping->private_lock);
   6286			unlock_page(p);
   6287			put_page(p);
   6288			mark_extent_buffer_accessed(exists, p);
   6289			btrfs_free_subpage(prealloc);
   6290			goto free_eb;
   6291		}
   6292		/* Should not fail, as we have preallocated the memory */
   6293		ret = attach_extent_buffer_page(eb, p, prealloc);
   6294		ASSERT(!ret);
   6295		/*
   6296		 * To inform we have extra eb under allocation, so that
   6297		 * detach_extent_buffer_page() won't release the page private
   6298		 * when the eb hasn't yet been inserted into radix tree.
   6299		 *
   6300		 * The ref will be decreased when the eb released the page, in
   6301		 * detach_extent_buffer_page().
   6302		 * Thus needs no special handling in error path.
   6303		 */
   6304		btrfs_page_inc_eb_refs(fs_info, p);
   6305		spin_unlock(&mapping->private_lock);
   6306
   6307		WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
   6308		eb->pages[i] = p;
   6309		if (!PageUptodate(p))
   6310			uptodate = 0;
   6311
   6312		/*
   6313		 * We can't unlock the pages just yet since the extent buffer
   6314		 * hasn't been properly inserted in the radix tree, this
   6315		 * opens a race with btree_release_folio which can free a page
   6316		 * while we are still filling in all pages for the buffer and
   6317		 * we could crash.
   6318		 */
   6319	}
   6320	if (uptodate)
   6321		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
   6322
   6323	do {
   6324		ret = xa_insert(&fs_info->extent_buffers,
   6325				start >> fs_info->sectorsize_bits,
   6326				eb, GFP_NOFS);
   6327		if (ret == -ENOMEM) {
   6328			exists = ERR_PTR(ret);
   6329			goto free_eb;
   6330		}
   6331		if (ret == -EBUSY) {
   6332			exists = find_extent_buffer(fs_info, start);
   6333			if (exists)
   6334				goto free_eb;
   6335		}
   6336	} while (ret);
   6337
   6338	/* add one reference for the tree */
   6339	check_buffer_tree_ref(eb);
   6340	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
   6341
   6342	/*
   6343	 * Now it's safe to unlock the pages because any calls to
   6344	 * btree_release_folio will correctly detect that a page belongs to a
   6345	 * live buffer and won't free them prematurely.
   6346	 */
   6347	for (i = 0; i < num_pages; i++)
   6348		unlock_page(eb->pages[i]);
   6349	return eb;
   6350
   6351free_eb:
   6352	WARN_ON(!atomic_dec_and_test(&eb->refs));
   6353	for (i = 0; i < num_pages; i++) {
   6354		if (eb->pages[i])
   6355			unlock_page(eb->pages[i]);
   6356	}
   6357
   6358	btrfs_release_extent_buffer(eb);
   6359	return exists;
   6360}
   6361
   6362static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
   6363{
   6364	struct extent_buffer *eb =
   6365			container_of(head, struct extent_buffer, rcu_head);
   6366
   6367	__free_extent_buffer(eb);
   6368}
   6369
   6370static int release_extent_buffer(struct extent_buffer *eb)
   6371	__releases(&eb->refs_lock)
   6372{
   6373	lockdep_assert_held(&eb->refs_lock);
   6374
   6375	WARN_ON(atomic_read(&eb->refs) == 0);
   6376	if (atomic_dec_and_test(&eb->refs)) {
   6377		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
   6378			struct btrfs_fs_info *fs_info = eb->fs_info;
   6379
   6380			spin_unlock(&eb->refs_lock);
   6381
   6382			xa_erase(&fs_info->extent_buffers,
   6383				 eb->start >> fs_info->sectorsize_bits);
   6384		} else {
   6385			spin_unlock(&eb->refs_lock);
   6386		}
   6387
   6388		btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
   6389		/* Should be safe to release our pages at this point */
   6390		btrfs_release_extent_buffer_pages(eb);
   6391#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
   6392		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
   6393			__free_extent_buffer(eb);
   6394			return 1;
   6395		}
   6396#endif
   6397		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
   6398		return 1;
   6399	}
   6400	spin_unlock(&eb->refs_lock);
   6401
   6402	return 0;
   6403}
   6404
   6405void free_extent_buffer(struct extent_buffer *eb)
   6406{
   6407	int refs;
   6408	int old;
   6409	if (!eb)
   6410		return;
   6411
   6412	while (1) {
   6413		refs = atomic_read(&eb->refs);
   6414		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
   6415		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
   6416			refs == 1))
   6417			break;
   6418		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
   6419		if (old == refs)
   6420			return;
   6421	}
   6422
   6423	spin_lock(&eb->refs_lock);
   6424	if (atomic_read(&eb->refs) == 2 &&
   6425	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
   6426	    !extent_buffer_under_io(eb) &&
   6427	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
   6428		atomic_dec(&eb->refs);
   6429
   6430	/*
   6431	 * I know this is terrible, but it's temporary until we stop tracking
   6432	 * the uptodate bits and such for the extent buffers.
   6433	 */
   6434	release_extent_buffer(eb);
   6435}
   6436
   6437void free_extent_buffer_stale(struct extent_buffer *eb)
   6438{
   6439	if (!eb)
   6440		return;
   6441
   6442	spin_lock(&eb->refs_lock);
   6443	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
   6444
   6445	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
   6446	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
   6447		atomic_dec(&eb->refs);
   6448	release_extent_buffer(eb);
   6449}
   6450
   6451static void btree_clear_page_dirty(struct page *page)
   6452{
   6453	ASSERT(PageDirty(page));
   6454	ASSERT(PageLocked(page));
   6455	clear_page_dirty_for_io(page);
   6456	xa_lock_irq(&page->mapping->i_pages);
   6457	if (!PageDirty(page))
   6458		__xa_clear_mark(&page->mapping->i_pages,
   6459				page_index(page), PAGECACHE_TAG_DIRTY);
   6460	xa_unlock_irq(&page->mapping->i_pages);
   6461}
   6462
   6463static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
   6464{
   6465	struct btrfs_fs_info *fs_info = eb->fs_info;
   6466	struct page *page = eb->pages[0];
   6467	bool last;
   6468
   6469	/* btree_clear_page_dirty() needs page locked */
   6470	lock_page(page);
   6471	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
   6472						  eb->len);
   6473	if (last)
   6474		btree_clear_page_dirty(page);
   6475	unlock_page(page);
   6476	WARN_ON(atomic_read(&eb->refs) == 0);
   6477}
   6478
   6479void clear_extent_buffer_dirty(const struct extent_buffer *eb)
   6480{
   6481	int i;
   6482	int num_pages;
   6483	struct page *page;
   6484
   6485	if (eb->fs_info->nodesize < PAGE_SIZE)
   6486		return clear_subpage_extent_buffer_dirty(eb);
   6487
   6488	num_pages = num_extent_pages(eb);
   6489
   6490	for (i = 0; i < num_pages; i++) {
   6491		page = eb->pages[i];
   6492		if (!PageDirty(page))
   6493			continue;
   6494		lock_page(page);
   6495		btree_clear_page_dirty(page);
   6496		ClearPageError(page);
   6497		unlock_page(page);
   6498	}
   6499	WARN_ON(atomic_read(&eb->refs) == 0);
   6500}
   6501
   6502bool set_extent_buffer_dirty(struct extent_buffer *eb)
   6503{
   6504	int i;
   6505	int num_pages;
   6506	bool was_dirty;
   6507
   6508	check_buffer_tree_ref(eb);
   6509
   6510	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
   6511
   6512	num_pages = num_extent_pages(eb);
   6513	WARN_ON(atomic_read(&eb->refs) == 0);
   6514	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
   6515
   6516	if (!was_dirty) {
   6517		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
   6518
   6519		/*
   6520		 * For subpage case, we can have other extent buffers in the
   6521		 * same page, and in clear_subpage_extent_buffer_dirty() we
   6522		 * have to clear page dirty without subpage lock held.
   6523		 * This can cause race where our page gets dirty cleared after
   6524		 * we just set it.
   6525		 *
   6526		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
   6527		 * its page for other reasons, we can use page lock to prevent
   6528		 * the above race.
   6529		 */
   6530		if (subpage)
   6531			lock_page(eb->pages[0]);
   6532		for (i = 0; i < num_pages; i++)
   6533			btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
   6534					     eb->start, eb->len);
   6535		if (subpage)
   6536			unlock_page(eb->pages[0]);
   6537	}
   6538#ifdef CONFIG_BTRFS_DEBUG
   6539	for (i = 0; i < num_pages; i++)
   6540		ASSERT(PageDirty(eb->pages[i]));
   6541#endif
   6542
   6543	return was_dirty;
   6544}
   6545
   6546void clear_extent_buffer_uptodate(struct extent_buffer *eb)
   6547{
   6548	struct btrfs_fs_info *fs_info = eb->fs_info;
   6549	struct page *page;
   6550	int num_pages;
   6551	int i;
   6552
   6553	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
   6554	num_pages = num_extent_pages(eb);
   6555	for (i = 0; i < num_pages; i++) {
   6556		page = eb->pages[i];
   6557		if (!page)
   6558			continue;
   6559
   6560		/*
   6561		 * This is special handling for metadata subpage, as regular
   6562		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
   6563		 */
   6564		if (fs_info->nodesize >= PAGE_SIZE)
   6565			ClearPageUptodate(page);
   6566		else
   6567			btrfs_subpage_clear_uptodate(fs_info, page, eb->start,
   6568						     eb->len);
   6569	}
   6570}
   6571
   6572void set_extent_buffer_uptodate(struct extent_buffer *eb)
   6573{
   6574	struct btrfs_fs_info *fs_info = eb->fs_info;
   6575	struct page *page;
   6576	int num_pages;
   6577	int i;
   6578
   6579	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
   6580	num_pages = num_extent_pages(eb);
   6581	for (i = 0; i < num_pages; i++) {
   6582		page = eb->pages[i];
   6583
   6584		/*
   6585		 * This is special handling for metadata subpage, as regular
   6586		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
   6587		 */
   6588		if (fs_info->nodesize >= PAGE_SIZE)
   6589			SetPageUptodate(page);
   6590		else
   6591			btrfs_subpage_set_uptodate(fs_info, page, eb->start,
   6592						   eb->len);
   6593	}
   6594}
   6595
   6596static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
   6597				      int mirror_num)
   6598{
   6599	struct btrfs_fs_info *fs_info = eb->fs_info;
   6600	struct extent_io_tree *io_tree;
   6601	struct page *page = eb->pages[0];
   6602	struct btrfs_bio_ctrl bio_ctrl = { 0 };
   6603	int ret = 0;
   6604
   6605	ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
   6606	ASSERT(PagePrivate(page));
   6607	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
   6608
   6609	if (wait == WAIT_NONE) {
   6610		if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
   6611			return -EAGAIN;
   6612	} else {
   6613		ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
   6614		if (ret < 0)
   6615			return ret;
   6616	}
   6617
   6618	ret = 0;
   6619	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) ||
   6620	    PageUptodate(page) ||
   6621	    btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
   6622		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
   6623		unlock_extent(io_tree, eb->start, eb->start + eb->len - 1);
   6624		return ret;
   6625	}
   6626
   6627	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
   6628	eb->read_mirror = 0;
   6629	atomic_set(&eb->io_pages, 1);
   6630	check_buffer_tree_ref(eb);
   6631	btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
   6632
   6633	btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len);
   6634	ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, &bio_ctrl,
   6635				 page, eb->start, eb->len,
   6636				 eb->start - page_offset(page),
   6637				 end_bio_extent_readpage, mirror_num, 0,
   6638				 true);
   6639	if (ret) {
   6640		/*
   6641		 * In the endio function, if we hit something wrong we will
   6642		 * increase the io_pages, so here we need to decrease it for
   6643		 * error path.
   6644		 */
   6645		atomic_dec(&eb->io_pages);
   6646	}
   6647	if (bio_ctrl.bio) {
   6648		submit_one_bio(bio_ctrl.bio, mirror_num, 0);
   6649		bio_ctrl.bio = NULL;
   6650	}
   6651	if (ret || wait != WAIT_COMPLETE)
   6652		return ret;
   6653
   6654	wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED);
   6655	if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
   6656		ret = -EIO;
   6657	return ret;
   6658}
   6659
   6660int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
   6661{
   6662	int i;
   6663	struct page *page;
   6664	int err;
   6665	int ret = 0;
   6666	int locked_pages = 0;
   6667	int all_uptodate = 1;
   6668	int num_pages;
   6669	unsigned long num_reads = 0;
   6670	struct btrfs_bio_ctrl bio_ctrl = { 0 };
   6671
   6672	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
   6673		return 0;
   6674
   6675	/*
   6676	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
   6677	 * operation, which could potentially still be in flight.  In this case
   6678	 * we simply want to return an error.
   6679	 */
   6680	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
   6681		return -EIO;
   6682
   6683	if (eb->fs_info->nodesize < PAGE_SIZE)
   6684		return read_extent_buffer_subpage(eb, wait, mirror_num);
   6685
   6686	num_pages = num_extent_pages(eb);
   6687	for (i = 0; i < num_pages; i++) {
   6688		page = eb->pages[i];
   6689		if (wait == WAIT_NONE) {
   6690			/*
   6691			 * WAIT_NONE is only utilized by readahead. If we can't
   6692			 * acquire the lock atomically it means either the eb
   6693			 * is being read out or under modification.
   6694			 * Either way the eb will be or has been cached,
   6695			 * readahead can exit safely.
   6696			 */
   6697			if (!trylock_page(page))
   6698				goto unlock_exit;
   6699		} else {
   6700			lock_page(page);
   6701		}
   6702		locked_pages++;
   6703	}
   6704	/*
   6705	 * We need to firstly lock all pages to make sure that
   6706	 * the uptodate bit of our pages won't be affected by
   6707	 * clear_extent_buffer_uptodate().
   6708	 */
   6709	for (i = 0; i < num_pages; i++) {
   6710		page = eb->pages[i];
   6711		if (!PageUptodate(page)) {
   6712			num_reads++;
   6713			all_uptodate = 0;
   6714		}
   6715	}
   6716
   6717	if (all_uptodate) {
   6718		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
   6719		goto unlock_exit;
   6720	}
   6721
   6722	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
   6723	eb->read_mirror = 0;
   6724	atomic_set(&eb->io_pages, num_reads);
   6725	/*
   6726	 * It is possible for release_folio to clear the TREE_REF bit before we
   6727	 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
   6728	 */
   6729	check_buffer_tree_ref(eb);
   6730	for (i = 0; i < num_pages; i++) {
   6731		page = eb->pages[i];
   6732
   6733		if (!PageUptodate(page)) {
   6734			if (ret) {
   6735				atomic_dec(&eb->io_pages);
   6736				unlock_page(page);
   6737				continue;
   6738			}
   6739
   6740			ClearPageError(page);
   6741			err = submit_extent_page(REQ_OP_READ | REQ_META, NULL,
   6742					 &bio_ctrl, page, page_offset(page),
   6743					 PAGE_SIZE, 0, end_bio_extent_readpage,
   6744					 mirror_num, 0, false);
   6745			if (err) {
   6746				/*
   6747				 * We failed to submit the bio so it's the
   6748				 * caller's responsibility to perform cleanup
   6749				 * i.e unlock page/set error bit.
   6750				 */
   6751				ret = err;
   6752				SetPageError(page);
   6753				unlock_page(page);
   6754				atomic_dec(&eb->io_pages);
   6755			}
   6756		} else {
   6757			unlock_page(page);
   6758		}
   6759	}
   6760
   6761	if (bio_ctrl.bio) {
   6762		submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.compress_type);
   6763		bio_ctrl.bio = NULL;
   6764	}
   6765
   6766	if (ret || wait != WAIT_COMPLETE)
   6767		return ret;
   6768
   6769	for (i = 0; i < num_pages; i++) {
   6770		page = eb->pages[i];
   6771		wait_on_page_locked(page);
   6772		if (!PageUptodate(page))
   6773			ret = -EIO;
   6774	}
   6775
   6776	return ret;
   6777
   6778unlock_exit:
   6779	while (locked_pages > 0) {
   6780		locked_pages--;
   6781		page = eb->pages[locked_pages];
   6782		unlock_page(page);
   6783	}
   6784	return ret;
   6785}
   6786
   6787static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
   6788			    unsigned long len)
   6789{
   6790	btrfs_warn(eb->fs_info,
   6791		"access to eb bytenr %llu len %lu out of range start %lu len %lu",
   6792		eb->start, eb->len, start, len);
   6793	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
   6794
   6795	return true;
   6796}
   6797
   6798/*
   6799 * Check if the [start, start + len) range is valid before reading/writing
   6800 * the eb.
   6801 * NOTE: @start and @len are offset inside the eb, not logical address.
   6802 *
   6803 * Caller should not touch the dst/src memory if this function returns error.
   6804 */
   6805static inline int check_eb_range(const struct extent_buffer *eb,
   6806				 unsigned long start, unsigned long len)
   6807{
   6808	unsigned long offset;
   6809
   6810	/* start, start + len should not go beyond eb->len nor overflow */
   6811	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
   6812		return report_eb_range(eb, start, len);
   6813
   6814	return false;
   6815}
   6816
   6817void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
   6818			unsigned long start, unsigned long len)
   6819{
   6820	size_t cur;
   6821	size_t offset;
   6822	struct page *page;
   6823	char *kaddr;
   6824	char *dst = (char *)dstv;
   6825	unsigned long i = get_eb_page_index(start);
   6826
   6827	if (check_eb_range(eb, start, len))
   6828		return;
   6829
   6830	offset = get_eb_offset_in_page(eb, start);
   6831
   6832	while (len > 0) {
   6833		page = eb->pages[i];
   6834
   6835		cur = min(len, (PAGE_SIZE - offset));
   6836		kaddr = page_address(page);
   6837		memcpy(dst, kaddr + offset, cur);
   6838
   6839		dst += cur;
   6840		len -= cur;
   6841		offset = 0;
   6842		i++;
   6843	}
   6844}
   6845
   6846int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
   6847				       void __user *dstv,
   6848				       unsigned long start, unsigned long len)
   6849{
   6850	size_t cur;
   6851	size_t offset;
   6852	struct page *page;
   6853	char *kaddr;
   6854	char __user *dst = (char __user *)dstv;
   6855	unsigned long i = get_eb_page_index(start);
   6856	int ret = 0;
   6857
   6858	WARN_ON(start > eb->len);
   6859	WARN_ON(start + len > eb->start + eb->len);
   6860
   6861	offset = get_eb_offset_in_page(eb, start);
   6862
   6863	while (len > 0) {
   6864		page = eb->pages[i];
   6865
   6866		cur = min(len, (PAGE_SIZE - offset));
   6867		kaddr = page_address(page);
   6868		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
   6869			ret = -EFAULT;
   6870			break;
   6871		}
   6872
   6873		dst += cur;
   6874		len -= cur;
   6875		offset = 0;
   6876		i++;
   6877	}
   6878
   6879	return ret;
   6880}
   6881
   6882int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
   6883			 unsigned long start, unsigned long len)
   6884{
   6885	size_t cur;
   6886	size_t offset;
   6887	struct page *page;
   6888	char *kaddr;
   6889	char *ptr = (char *)ptrv;
   6890	unsigned long i = get_eb_page_index(start);
   6891	int ret = 0;
   6892
   6893	if (check_eb_range(eb, start, len))
   6894		return -EINVAL;
   6895
   6896	offset = get_eb_offset_in_page(eb, start);
   6897
   6898	while (len > 0) {
   6899		page = eb->pages[i];
   6900
   6901		cur = min(len, (PAGE_SIZE - offset));
   6902
   6903		kaddr = page_address(page);
   6904		ret = memcmp(ptr, kaddr + offset, cur);
   6905		if (ret)
   6906			break;
   6907
   6908		ptr += cur;
   6909		len -= cur;
   6910		offset = 0;
   6911		i++;
   6912	}
   6913	return ret;
   6914}
   6915
   6916/*
   6917 * Check that the extent buffer is uptodate.
   6918 *
   6919 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
   6920 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
   6921 */
   6922static void assert_eb_page_uptodate(const struct extent_buffer *eb,
   6923				    struct page *page)
   6924{
   6925	struct btrfs_fs_info *fs_info = eb->fs_info;
   6926
   6927	/*
   6928	 * If we are using the commit root we could potentially clear a page
   6929	 * Uptodate while we're using the extent buffer that we've previously
   6930	 * looked up.  We don't want to complain in this case, as the page was
   6931	 * valid before, we just didn't write it out.  Instead we want to catch
   6932	 * the case where we didn't actually read the block properly, which
   6933	 * would have !PageUptodate && !PageError, as we clear PageError before
   6934	 * reading.
   6935	 */
   6936	if (fs_info->nodesize < PAGE_SIZE) {
   6937		bool uptodate, error;
   6938
   6939		uptodate = btrfs_subpage_test_uptodate(fs_info, page,
   6940						       eb->start, eb->len);
   6941		error = btrfs_subpage_test_error(fs_info, page, eb->start, eb->len);
   6942		WARN_ON(!uptodate && !error);
   6943	} else {
   6944		WARN_ON(!PageUptodate(page) && !PageError(page));
   6945	}
   6946}
   6947
   6948void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
   6949		const void *srcv)
   6950{
   6951	char *kaddr;
   6952
   6953	assert_eb_page_uptodate(eb, eb->pages[0]);
   6954	kaddr = page_address(eb->pages[0]) +
   6955		get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
   6956						   chunk_tree_uuid));
   6957	memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
   6958}
   6959
   6960void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
   6961{
   6962	char *kaddr;
   6963
   6964	assert_eb_page_uptodate(eb, eb->pages[0]);
   6965	kaddr = page_address(eb->pages[0]) +
   6966		get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
   6967	memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
   6968}
   6969
   6970void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
   6971			 unsigned long start, unsigned long len)
   6972{
   6973	size_t cur;
   6974	size_t offset;
   6975	struct page *page;
   6976	char *kaddr;
   6977	char *src = (char *)srcv;
   6978	unsigned long i = get_eb_page_index(start);
   6979
   6980	WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
   6981
   6982	if (check_eb_range(eb, start, len))
   6983		return;
   6984
   6985	offset = get_eb_offset_in_page(eb, start);
   6986
   6987	while (len > 0) {
   6988		page = eb->pages[i];
   6989		assert_eb_page_uptodate(eb, page);
   6990
   6991		cur = min(len, PAGE_SIZE - offset);
   6992		kaddr = page_address(page);
   6993		memcpy(kaddr + offset, src, cur);
   6994
   6995		src += cur;
   6996		len -= cur;
   6997		offset = 0;
   6998		i++;
   6999	}
   7000}
   7001
   7002void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
   7003		unsigned long len)
   7004{
   7005	size_t cur;
   7006	size_t offset;
   7007	struct page *page;
   7008	char *kaddr;
   7009	unsigned long i = get_eb_page_index(start);
   7010
   7011	if (check_eb_range(eb, start, len))
   7012		return;
   7013
   7014	offset = get_eb_offset_in_page(eb, start);
   7015
   7016	while (len > 0) {
   7017		page = eb->pages[i];
   7018		assert_eb_page_uptodate(eb, page);
   7019
   7020		cur = min(len, PAGE_SIZE - offset);
   7021		kaddr = page_address(page);
   7022		memset(kaddr + offset, 0, cur);
   7023
   7024		len -= cur;
   7025		offset = 0;
   7026		i++;
   7027	}
   7028}
   7029
   7030void copy_extent_buffer_full(const struct extent_buffer *dst,
   7031			     const struct extent_buffer *src)
   7032{
   7033	int i;
   7034	int num_pages;
   7035
   7036	ASSERT(dst->len == src->len);
   7037
   7038	if (dst->fs_info->nodesize >= PAGE_SIZE) {
   7039		num_pages = num_extent_pages(dst);
   7040		for (i = 0; i < num_pages; i++)
   7041			copy_page(page_address(dst->pages[i]),
   7042				  page_address(src->pages[i]));
   7043	} else {
   7044		size_t src_offset = get_eb_offset_in_page(src, 0);
   7045		size_t dst_offset = get_eb_offset_in_page(dst, 0);
   7046
   7047		ASSERT(src->fs_info->nodesize < PAGE_SIZE);
   7048		memcpy(page_address(dst->pages[0]) + dst_offset,
   7049		       page_address(src->pages[0]) + src_offset,
   7050		       src->len);
   7051	}
   7052}
   7053
   7054void copy_extent_buffer(const struct extent_buffer *dst,
   7055			const struct extent_buffer *src,
   7056			unsigned long dst_offset, unsigned long src_offset,
   7057			unsigned long len)
   7058{
   7059	u64 dst_len = dst->len;
   7060	size_t cur;
   7061	size_t offset;
   7062	struct page *page;
   7063	char *kaddr;
   7064	unsigned long i = get_eb_page_index(dst_offset);
   7065
   7066	if (check_eb_range(dst, dst_offset, len) ||
   7067	    check_eb_range(src, src_offset, len))
   7068		return;
   7069
   7070	WARN_ON(src->len != dst_len);
   7071
   7072	offset = get_eb_offset_in_page(dst, dst_offset);
   7073
   7074	while (len > 0) {
   7075		page = dst->pages[i];
   7076		assert_eb_page_uptodate(dst, page);
   7077
   7078		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
   7079
   7080		kaddr = page_address(page);
   7081		read_extent_buffer(src, kaddr + offset, src_offset, cur);
   7082
   7083		src_offset += cur;
   7084		len -= cur;
   7085		offset = 0;
   7086		i++;
   7087	}
   7088}
   7089
   7090/*
   7091 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
   7092 * given bit number
   7093 * @eb: the extent buffer
   7094 * @start: offset of the bitmap item in the extent buffer
   7095 * @nr: bit number
   7096 * @page_index: return index of the page in the extent buffer that contains the
   7097 * given bit number
   7098 * @page_offset: return offset into the page given by page_index
   7099 *
   7100 * This helper hides the ugliness of finding the byte in an extent buffer which
   7101 * contains a given bit.
   7102 */
   7103static inline void eb_bitmap_offset(const struct extent_buffer *eb,
   7104				    unsigned long start, unsigned long nr,
   7105				    unsigned long *page_index,
   7106				    size_t *page_offset)
   7107{
   7108	size_t byte_offset = BIT_BYTE(nr);
   7109	size_t offset;
   7110
   7111	/*
   7112	 * The byte we want is the offset of the extent buffer + the offset of
   7113	 * the bitmap item in the extent buffer + the offset of the byte in the
   7114	 * bitmap item.
   7115	 */
   7116	offset = start + offset_in_page(eb->start) + byte_offset;
   7117
   7118	*page_index = offset >> PAGE_SHIFT;
   7119	*page_offset = offset_in_page(offset);
   7120}
   7121
   7122/**
   7123 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
   7124 * @eb: the extent buffer
   7125 * @start: offset of the bitmap item in the extent buffer
   7126 * @nr: bit number to test
   7127 */
   7128int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
   7129			   unsigned long nr)
   7130{
   7131	u8 *kaddr;
   7132	struct page *page;
   7133	unsigned long i;
   7134	size_t offset;
   7135
   7136	eb_bitmap_offset(eb, start, nr, &i, &offset);
   7137	page = eb->pages[i];
   7138	assert_eb_page_uptodate(eb, page);
   7139	kaddr = page_address(page);
   7140	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
   7141}
   7142
   7143/**
   7144 * extent_buffer_bitmap_set - set an area of a bitmap
   7145 * @eb: the extent buffer
   7146 * @start: offset of the bitmap item in the extent buffer
   7147 * @pos: bit number of the first bit
   7148 * @len: number of bits to set
   7149 */
   7150void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
   7151			      unsigned long pos, unsigned long len)
   7152{
   7153	u8 *kaddr;
   7154	struct page *page;
   7155	unsigned long i;
   7156	size_t offset;
   7157	const unsigned int size = pos + len;
   7158	int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
   7159	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
   7160
   7161	eb_bitmap_offset(eb, start, pos, &i, &offset);
   7162	page = eb->pages[i];
   7163	assert_eb_page_uptodate(eb, page);
   7164	kaddr = page_address(page);
   7165
   7166	while (len >= bits_to_set) {
   7167		kaddr[offset] |= mask_to_set;
   7168		len -= bits_to_set;
   7169		bits_to_set = BITS_PER_BYTE;
   7170		mask_to_set = ~0;
   7171		if (++offset >= PAGE_SIZE && len > 0) {
   7172			offset = 0;
   7173			page = eb->pages[++i];
   7174			assert_eb_page_uptodate(eb, page);
   7175			kaddr = page_address(page);
   7176		}
   7177	}
   7178	if (len) {
   7179		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
   7180		kaddr[offset] |= mask_to_set;
   7181	}
   7182}
   7183
   7184
   7185/**
   7186 * extent_buffer_bitmap_clear - clear an area of a bitmap
   7187 * @eb: the extent buffer
   7188 * @start: offset of the bitmap item in the extent buffer
   7189 * @pos: bit number of the first bit
   7190 * @len: number of bits to clear
   7191 */
   7192void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
   7193				unsigned long start, unsigned long pos,
   7194				unsigned long len)
   7195{
   7196	u8 *kaddr;
   7197	struct page *page;
   7198	unsigned long i;
   7199	size_t offset;
   7200	const unsigned int size = pos + len;
   7201	int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
   7202	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
   7203
   7204	eb_bitmap_offset(eb, start, pos, &i, &offset);
   7205	page = eb->pages[i];
   7206	assert_eb_page_uptodate(eb, page);
   7207	kaddr = page_address(page);
   7208
   7209	while (len >= bits_to_clear) {
   7210		kaddr[offset] &= ~mask_to_clear;
   7211		len -= bits_to_clear;
   7212		bits_to_clear = BITS_PER_BYTE;
   7213		mask_to_clear = ~0;
   7214		if (++offset >= PAGE_SIZE && len > 0) {
   7215			offset = 0;
   7216			page = eb->pages[++i];
   7217			assert_eb_page_uptodate(eb, page);
   7218			kaddr = page_address(page);
   7219		}
   7220	}
   7221	if (len) {
   7222		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
   7223		kaddr[offset] &= ~mask_to_clear;
   7224	}
   7225}
   7226
   7227static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
   7228{
   7229	unsigned long distance = (src > dst) ? src - dst : dst - src;
   7230	return distance < len;
   7231}
   7232
   7233static void copy_pages(struct page *dst_page, struct page *src_page,
   7234		       unsigned long dst_off, unsigned long src_off,
   7235		       unsigned long len)
   7236{
   7237	char *dst_kaddr = page_address(dst_page);
   7238	char *src_kaddr;
   7239	int must_memmove = 0;
   7240
   7241	if (dst_page != src_page) {
   7242		src_kaddr = page_address(src_page);
   7243	} else {
   7244		src_kaddr = dst_kaddr;
   7245		if (areas_overlap(src_off, dst_off, len))
   7246			must_memmove = 1;
   7247	}
   7248
   7249	if (must_memmove)
   7250		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
   7251	else
   7252		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
   7253}
   7254
   7255void memcpy_extent_buffer(const struct extent_buffer *dst,
   7256			  unsigned long dst_offset, unsigned long src_offset,
   7257			  unsigned long len)
   7258{
   7259	size_t cur;
   7260	size_t dst_off_in_page;
   7261	size_t src_off_in_page;
   7262	unsigned long dst_i;
   7263	unsigned long src_i;
   7264
   7265	if (check_eb_range(dst, dst_offset, len) ||
   7266	    check_eb_range(dst, src_offset, len))
   7267		return;
   7268
   7269	while (len > 0) {
   7270		dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
   7271		src_off_in_page = get_eb_offset_in_page(dst, src_offset);
   7272
   7273		dst_i = get_eb_page_index(dst_offset);
   7274		src_i = get_eb_page_index(src_offset);
   7275
   7276		cur = min(len, (unsigned long)(PAGE_SIZE -
   7277					       src_off_in_page));
   7278		cur = min_t(unsigned long, cur,
   7279			(unsigned long)(PAGE_SIZE - dst_off_in_page));
   7280
   7281		copy_pages(dst->pages[dst_i], dst->pages[src_i],
   7282			   dst_off_in_page, src_off_in_page, cur);
   7283
   7284		src_offset += cur;
   7285		dst_offset += cur;
   7286		len -= cur;
   7287	}
   7288}
   7289
   7290void memmove_extent_buffer(const struct extent_buffer *dst,
   7291			   unsigned long dst_offset, unsigned long src_offset,
   7292			   unsigned long len)
   7293{
   7294	size_t cur;
   7295	size_t dst_off_in_page;
   7296	size_t src_off_in_page;
   7297	unsigned long dst_end = dst_offset + len - 1;
   7298	unsigned long src_end = src_offset + len - 1;
   7299	unsigned long dst_i;
   7300	unsigned long src_i;
   7301
   7302	if (check_eb_range(dst, dst_offset, len) ||
   7303	    check_eb_range(dst, src_offset, len))
   7304		return;
   7305	if (dst_offset < src_offset) {
   7306		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
   7307		return;
   7308	}
   7309	while (len > 0) {
   7310		dst_i = get_eb_page_index(dst_end);
   7311		src_i = get_eb_page_index(src_end);
   7312
   7313		dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
   7314		src_off_in_page = get_eb_offset_in_page(dst, src_end);
   7315
   7316		cur = min_t(unsigned long, len, src_off_in_page + 1);
   7317		cur = min(cur, dst_off_in_page + 1);
   7318		copy_pages(dst->pages[dst_i], dst->pages[src_i],
   7319			   dst_off_in_page - cur + 1,
   7320			   src_off_in_page - cur + 1, cur);
   7321
   7322		dst_end -= cur;
   7323		src_end -= cur;
   7324		len -= cur;
   7325	}
   7326}
   7327
   7328static struct extent_buffer *get_next_extent_buffer(
   7329		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
   7330{
   7331	struct extent_buffer *eb;
   7332	unsigned long index;
   7333	u64 page_start = page_offset(page);
   7334
   7335	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
   7336	lockdep_assert_held(&fs_info->buffer_lock);
   7337
   7338	xa_for_each_start(&fs_info->extent_buffers, index, eb,
   7339			  page_start >> fs_info->sectorsize_bits) {
   7340		if (in_range(eb->start, page_start, PAGE_SIZE))
   7341			return eb;
   7342		else if (eb->start >= page_start + PAGE_SIZE)
   7343		        /* Already beyond page end */
   7344			return NULL;
   7345	}
   7346	return NULL;
   7347}
   7348
   7349static int try_release_subpage_extent_buffer(struct page *page)
   7350{
   7351	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
   7352	u64 cur = page_offset(page);
   7353	const u64 end = page_offset(page) + PAGE_SIZE;
   7354	int ret;
   7355
   7356	while (cur < end) {
   7357		struct extent_buffer *eb = NULL;
   7358
   7359		/*
   7360		 * Unlike try_release_extent_buffer() which uses page->private
   7361		 * to grab buffer, for subpage case we rely on radix tree, thus
   7362		 * we need to ensure radix tree consistency.
   7363		 *
   7364		 * We also want an atomic snapshot of the radix tree, thus go
   7365		 * with spinlock rather than RCU.
   7366		 */
   7367		spin_lock(&fs_info->buffer_lock);
   7368		eb = get_next_extent_buffer(fs_info, page, cur);
   7369		if (!eb) {
   7370			/* No more eb in the page range after or at cur */
   7371			spin_unlock(&fs_info->buffer_lock);
   7372			break;
   7373		}
   7374		cur = eb->start + eb->len;
   7375
   7376		/*
   7377		 * The same as try_release_extent_buffer(), to ensure the eb
   7378		 * won't disappear out from under us.
   7379		 */
   7380		spin_lock(&eb->refs_lock);
   7381		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
   7382			spin_unlock(&eb->refs_lock);
   7383			spin_unlock(&fs_info->buffer_lock);
   7384			break;
   7385		}
   7386		spin_unlock(&fs_info->buffer_lock);
   7387
   7388		/*
   7389		 * If tree ref isn't set then we know the ref on this eb is a
   7390		 * real ref, so just return, this eb will likely be freed soon
   7391		 * anyway.
   7392		 */
   7393		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
   7394			spin_unlock(&eb->refs_lock);
   7395			break;
   7396		}
   7397
   7398		/*
   7399		 * Here we don't care about the return value, we will always
   7400		 * check the page private at the end.  And
   7401		 * release_extent_buffer() will release the refs_lock.
   7402		 */
   7403		release_extent_buffer(eb);
   7404	}
   7405	/*
   7406	 * Finally to check if we have cleared page private, as if we have
   7407	 * released all ebs in the page, the page private should be cleared now.
   7408	 */
   7409	spin_lock(&page->mapping->private_lock);
   7410	if (!PagePrivate(page))
   7411		ret = 1;
   7412	else
   7413		ret = 0;
   7414	spin_unlock(&page->mapping->private_lock);
   7415	return ret;
   7416
   7417}
   7418
   7419int try_release_extent_buffer(struct page *page)
   7420{
   7421	struct extent_buffer *eb;
   7422
   7423	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
   7424		return try_release_subpage_extent_buffer(page);
   7425
   7426	/*
   7427	 * We need to make sure nobody is changing page->private, as we rely on
   7428	 * page->private as the pointer to extent buffer.
   7429	 */
   7430	spin_lock(&page->mapping->private_lock);
   7431	if (!PagePrivate(page)) {
   7432		spin_unlock(&page->mapping->private_lock);
   7433		return 1;
   7434	}
   7435
   7436	eb = (struct extent_buffer *)page->private;
   7437	BUG_ON(!eb);
   7438
   7439	/*
   7440	 * This is a little awful but should be ok, we need to make sure that
   7441	 * the eb doesn't disappear out from under us while we're looking at
   7442	 * this page.
   7443	 */
   7444	spin_lock(&eb->refs_lock);
   7445	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
   7446		spin_unlock(&eb->refs_lock);
   7447		spin_unlock(&page->mapping->private_lock);
   7448		return 0;
   7449	}
   7450	spin_unlock(&page->mapping->private_lock);
   7451
   7452	/*
   7453	 * If tree ref isn't set then we know the ref on this eb is a real ref,
   7454	 * so just return, this page will likely be freed soon anyway.
   7455	 */
   7456	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
   7457		spin_unlock(&eb->refs_lock);
   7458		return 0;
   7459	}
   7460
   7461	return release_extent_buffer(eb);
   7462}
   7463
   7464/*
   7465 * btrfs_readahead_tree_block - attempt to readahead a child block
   7466 * @fs_info:	the fs_info
   7467 * @bytenr:	bytenr to read
   7468 * @owner_root: objectid of the root that owns this eb
   7469 * @gen:	generation for the uptodate check, can be 0
   7470 * @level:	level for the eb
   7471 *
   7472 * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
   7473 * normal uptodate check of the eb, without checking the generation.  If we have
   7474 * to read the block we will not block on anything.
   7475 */
   7476void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
   7477				u64 bytenr, u64 owner_root, u64 gen, int level)
   7478{
   7479	struct extent_buffer *eb;
   7480	int ret;
   7481
   7482	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
   7483	if (IS_ERR(eb))
   7484		return;
   7485
   7486	if (btrfs_buffer_uptodate(eb, gen, 1)) {
   7487		free_extent_buffer(eb);
   7488		return;
   7489	}
   7490
   7491	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0);
   7492	if (ret < 0)
   7493		free_extent_buffer_stale(eb);
   7494	else
   7495		free_extent_buffer(eb);
   7496}
   7497
   7498/*
   7499 * btrfs_readahead_node_child - readahead a node's child block
   7500 * @node:	parent node we're reading from
   7501 * @slot:	slot in the parent node for the child we want to read
   7502 *
   7503 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
   7504 * the slot in the node provided.
   7505 */
   7506void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
   7507{
   7508	btrfs_readahead_tree_block(node->fs_info,
   7509				   btrfs_node_blockptr(node, slot),
   7510				   btrfs_header_owner(node),
   7511				   btrfs_node_ptr_generation(node, slot),
   7512				   btrfs_header_level(node) - 1);
   7513}