cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

reflink.c (28648B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3#include <linux/blkdev.h>
      4#include <linux/iversion.h>
      5#include "compression.h"
      6#include "ctree.h"
      7#include "delalloc-space.h"
      8#include "reflink.h"
      9#include "transaction.h"
     10#include "subpage.h"
     11
     12#define BTRFS_MAX_DEDUPE_LEN	SZ_16M
     13
     14static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
     15				     struct inode *inode,
     16				     u64 endoff,
     17				     const u64 destoff,
     18				     const u64 olen,
     19				     int no_time_update)
     20{
     21	struct btrfs_root *root = BTRFS_I(inode)->root;
     22	int ret;
     23
     24	inode_inc_iversion(inode);
     25	if (!no_time_update)
     26		inode->i_mtime = inode->i_ctime = current_time(inode);
     27	/*
     28	 * We round up to the block size at eof when determining which
     29	 * extents to clone above, but shouldn't round up the file size.
     30	 */
     31	if (endoff > destoff + olen)
     32		endoff = destoff + olen;
     33	if (endoff > inode->i_size) {
     34		i_size_write(inode, endoff);
     35		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
     36	}
     37
     38	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
     39	if (ret) {
     40		btrfs_abort_transaction(trans, ret);
     41		btrfs_end_transaction(trans);
     42		goto out;
     43	}
     44	ret = btrfs_end_transaction(trans);
     45out:
     46	return ret;
     47}
     48
     49static int copy_inline_to_page(struct btrfs_inode *inode,
     50			       const u64 file_offset,
     51			       char *inline_data,
     52			       const u64 size,
     53			       const u64 datal,
     54			       const u8 comp_type)
     55{
     56	struct btrfs_fs_info *fs_info = inode->root->fs_info;
     57	const u32 block_size = fs_info->sectorsize;
     58	const u64 range_end = file_offset + block_size - 1;
     59	const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0);
     60	char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0);
     61	struct extent_changeset *data_reserved = NULL;
     62	struct page *page = NULL;
     63	struct address_space *mapping = inode->vfs_inode.i_mapping;
     64	int ret;
     65
     66	ASSERT(IS_ALIGNED(file_offset, block_size));
     67
     68	/*
     69	 * We have flushed and locked the ranges of the source and destination
     70	 * inodes, we also have locked the inodes, so we are safe to do a
     71	 * reservation here. Also we must not do the reservation while holding
     72	 * a transaction open, otherwise we would deadlock.
     73	 */
     74	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset,
     75					   block_size);
     76	if (ret)
     77		goto out;
     78
     79	page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT,
     80				   btrfs_alloc_write_mask(mapping));
     81	if (!page) {
     82		ret = -ENOMEM;
     83		goto out_unlock;
     84	}
     85
     86	ret = set_page_extent_mapped(page);
     87	if (ret < 0)
     88		goto out_unlock;
     89
     90	clear_extent_bit(&inode->io_tree, file_offset, range_end,
     91			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
     92			 0, 0, NULL);
     93	ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL);
     94	if (ret)
     95		goto out_unlock;
     96
     97	/*
     98	 * After dirtying the page our caller will need to start a transaction,
     99	 * and if we are low on metadata free space, that can cause flushing of
    100	 * delalloc for all inodes in order to get metadata space released.
    101	 * However we are holding the range locked for the whole duration of
    102	 * the clone/dedupe operation, so we may deadlock if that happens and no
    103	 * other task releases enough space. So mark this inode as not being
    104	 * possible to flush to avoid such deadlock. We will clear that flag
    105	 * when we finish cloning all extents, since a transaction is started
    106	 * after finding each extent to clone.
    107	 */
    108	set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
    109
    110	if (comp_type == BTRFS_COMPRESS_NONE) {
    111		memcpy_to_page(page, offset_in_page(file_offset), data_start,
    112			       datal);
    113		flush_dcache_page(page);
    114	} else {
    115		ret = btrfs_decompress(comp_type, data_start, page,
    116				       offset_in_page(file_offset),
    117				       inline_size, datal);
    118		if (ret)
    119			goto out_unlock;
    120		flush_dcache_page(page);
    121	}
    122
    123	/*
    124	 * If our inline data is smaller then the block/page size, then the
    125	 * remaining of the block/page is equivalent to zeroes. We had something
    126	 * like the following done:
    127	 *
    128	 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file
    129	 * $ sync  # (or fsync)
    130	 * $ xfs_io -c "falloc 0 4K" file
    131	 * $ xfs_io -c "pwrite -S 0xcd 4K 4K"
    132	 *
    133	 * So what's in the range [500, 4095] corresponds to zeroes.
    134	 */
    135	if (datal < block_size) {
    136		memzero_page(page, datal, block_size - datal);
    137		flush_dcache_page(page);
    138	}
    139
    140	btrfs_page_set_uptodate(fs_info, page, file_offset, block_size);
    141	btrfs_page_clear_checked(fs_info, page, file_offset, block_size);
    142	btrfs_page_set_dirty(fs_info, page, file_offset, block_size);
    143out_unlock:
    144	if (page) {
    145		unlock_page(page);
    146		put_page(page);
    147	}
    148	if (ret)
    149		btrfs_delalloc_release_space(inode, data_reserved, file_offset,
    150					     block_size, true);
    151	btrfs_delalloc_release_extents(inode, block_size);
    152out:
    153	extent_changeset_free(data_reserved);
    154
    155	return ret;
    156}
    157
    158/*
    159 * Deal with cloning of inline extents. We try to copy the inline extent from
    160 * the source inode to destination inode when possible. When not possible we
    161 * copy the inline extent's data into the respective page of the inode.
    162 */
    163static int clone_copy_inline_extent(struct inode *dst,
    164				    struct btrfs_path *path,
    165				    struct btrfs_key *new_key,
    166				    const u64 drop_start,
    167				    const u64 datal,
    168				    const u64 size,
    169				    const u8 comp_type,
    170				    char *inline_data,
    171				    struct btrfs_trans_handle **trans_out)
    172{
    173	struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
    174	struct btrfs_root *root = BTRFS_I(dst)->root;
    175	const u64 aligned_end = ALIGN(new_key->offset + datal,
    176				      fs_info->sectorsize);
    177	struct btrfs_trans_handle *trans = NULL;
    178	struct btrfs_drop_extents_args drop_args = { 0 };
    179	int ret;
    180	struct btrfs_key key;
    181
    182	if (new_key->offset > 0) {
    183		ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
    184					  inline_data, size, datal, comp_type);
    185		goto out;
    186	}
    187
    188	key.objectid = btrfs_ino(BTRFS_I(dst));
    189	key.type = BTRFS_EXTENT_DATA_KEY;
    190	key.offset = 0;
    191	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
    192	if (ret < 0) {
    193		return ret;
    194	} else if (ret > 0) {
    195		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
    196			ret = btrfs_next_leaf(root, path);
    197			if (ret < 0)
    198				return ret;
    199			else if (ret > 0)
    200				goto copy_inline_extent;
    201		}
    202		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
    203		if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
    204		    key.type == BTRFS_EXTENT_DATA_KEY) {
    205			/*
    206			 * There's an implicit hole at file offset 0, copy the
    207			 * inline extent's data to the page.
    208			 */
    209			ASSERT(key.offset > 0);
    210			goto copy_to_page;
    211		}
    212	} else if (i_size_read(dst) <= datal) {
    213		struct btrfs_file_extent_item *ei;
    214
    215		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
    216				    struct btrfs_file_extent_item);
    217		/*
    218		 * If it's an inline extent replace it with the source inline
    219		 * extent, otherwise copy the source inline extent data into
    220		 * the respective page at the destination inode.
    221		 */
    222		if (btrfs_file_extent_type(path->nodes[0], ei) ==
    223		    BTRFS_FILE_EXTENT_INLINE)
    224			goto copy_inline_extent;
    225
    226		goto copy_to_page;
    227	}
    228
    229copy_inline_extent:
    230	/*
    231	 * We have no extent items, or we have an extent at offset 0 which may
    232	 * or may not be inlined. All these cases are dealt the same way.
    233	 */
    234	if (i_size_read(dst) > datal) {
    235		/*
    236		 * At the destination offset 0 we have either a hole, a regular
    237		 * extent or an inline extent larger then the one we want to
    238		 * clone. Deal with all these cases by copying the inline extent
    239		 * data into the respective page at the destination inode.
    240		 */
    241		goto copy_to_page;
    242	}
    243
    244	/*
    245	 * Release path before starting a new transaction so we don't hold locks
    246	 * that would confuse lockdep.
    247	 */
    248	btrfs_release_path(path);
    249	/*
    250	 * If we end up here it means were copy the inline extent into a leaf
    251	 * of the destination inode. We know we will drop or adjust at most one
    252	 * extent item in the destination root.
    253	 *
    254	 * 1 unit - adjusting old extent (we may have to split it)
    255	 * 1 unit - add new extent
    256	 * 1 unit - inode update
    257	 */
    258	trans = btrfs_start_transaction(root, 3);
    259	if (IS_ERR(trans)) {
    260		ret = PTR_ERR(trans);
    261		trans = NULL;
    262		goto out;
    263	}
    264	drop_args.path = path;
    265	drop_args.start = drop_start;
    266	drop_args.end = aligned_end;
    267	drop_args.drop_cache = true;
    268	ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args);
    269	if (ret)
    270		goto out;
    271	ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
    272	if (ret)
    273		goto out;
    274
    275	write_extent_buffer(path->nodes[0], inline_data,
    276			    btrfs_item_ptr_offset(path->nodes[0],
    277						  path->slots[0]),
    278			    size);
    279	btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found);
    280	btrfs_set_inode_full_sync(BTRFS_I(dst));
    281	ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
    282out:
    283	if (!ret && !trans) {
    284		/*
    285		 * No transaction here means we copied the inline extent into a
    286		 * page of the destination inode.
    287		 *
    288		 * 1 unit to update inode item
    289		 */
    290		trans = btrfs_start_transaction(root, 1);
    291		if (IS_ERR(trans)) {
    292			ret = PTR_ERR(trans);
    293			trans = NULL;
    294		}
    295	}
    296	if (ret && trans) {
    297		btrfs_abort_transaction(trans, ret);
    298		btrfs_end_transaction(trans);
    299	}
    300	if (!ret)
    301		*trans_out = trans;
    302
    303	return ret;
    304
    305copy_to_page:
    306	/*
    307	 * Release our path because we don't need it anymore and also because
    308	 * copy_inline_to_page() needs to reserve data and metadata, which may
    309	 * need to flush delalloc when we are low on available space and
    310	 * therefore cause a deadlock if writeback of an inline extent needs to
    311	 * write to the same leaf or an ordered extent completion needs to write
    312	 * to the same leaf.
    313	 */
    314	btrfs_release_path(path);
    315
    316	ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
    317				  inline_data, size, datal, comp_type);
    318	goto out;
    319}
    320
    321/**
    322 * btrfs_clone() - clone a range from inode file to another
    323 *
    324 * @src: Inode to clone from
    325 * @inode: Inode to clone to
    326 * @off: Offset within source to start clone from
    327 * @olen: Original length, passed by user, of range to clone
    328 * @olen_aligned: Block-aligned value of olen
    329 * @destoff: Offset within @inode to start clone
    330 * @no_time_update: Whether to update mtime/ctime on the target inode
    331 */
    332static int btrfs_clone(struct inode *src, struct inode *inode,
    333		       const u64 off, const u64 olen, const u64 olen_aligned,
    334		       const u64 destoff, int no_time_update)
    335{
    336	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
    337	struct btrfs_path *path = NULL;
    338	struct extent_buffer *leaf;
    339	struct btrfs_trans_handle *trans;
    340	char *buf = NULL;
    341	struct btrfs_key key;
    342	u32 nritems;
    343	int slot;
    344	int ret;
    345	const u64 len = olen_aligned;
    346	u64 last_dest_end = destoff;
    347	u64 prev_extent_end = off;
    348
    349	ret = -ENOMEM;
    350	buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
    351	if (!buf)
    352		return ret;
    353
    354	path = btrfs_alloc_path();
    355	if (!path) {
    356		kvfree(buf);
    357		return ret;
    358	}
    359
    360	path->reada = READA_FORWARD;
    361	/* Clone data */
    362	key.objectid = btrfs_ino(BTRFS_I(src));
    363	key.type = BTRFS_EXTENT_DATA_KEY;
    364	key.offset = off;
    365
    366	while (1) {
    367		struct btrfs_file_extent_item *extent;
    368		u64 extent_gen;
    369		int type;
    370		u32 size;
    371		struct btrfs_key new_key;
    372		u64 disko = 0, diskl = 0;
    373		u64 datao = 0, datal = 0;
    374		u8 comp;
    375		u64 drop_start;
    376
    377		/* Note the key will change type as we walk through the tree */
    378		ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
    379				0, 0);
    380		if (ret < 0)
    381			goto out;
    382		/*
    383		 * First search, if no extent item that starts at offset off was
    384		 * found but the previous item is an extent item, it's possible
    385		 * it might overlap our target range, therefore process it.
    386		 */
    387		if (key.offset == off && ret > 0 && path->slots[0] > 0) {
    388			btrfs_item_key_to_cpu(path->nodes[0], &key,
    389					      path->slots[0] - 1);
    390			if (key.type == BTRFS_EXTENT_DATA_KEY)
    391				path->slots[0]--;
    392		}
    393
    394		nritems = btrfs_header_nritems(path->nodes[0]);
    395process_slot:
    396		if (path->slots[0] >= nritems) {
    397			ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
    398			if (ret < 0)
    399				goto out;
    400			if (ret > 0)
    401				break;
    402			nritems = btrfs_header_nritems(path->nodes[0]);
    403		}
    404		leaf = path->nodes[0];
    405		slot = path->slots[0];
    406
    407		btrfs_item_key_to_cpu(leaf, &key, slot);
    408		if (key.type > BTRFS_EXTENT_DATA_KEY ||
    409		    key.objectid != btrfs_ino(BTRFS_I(src)))
    410			break;
    411
    412		ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
    413
    414		extent = btrfs_item_ptr(leaf, slot,
    415					struct btrfs_file_extent_item);
    416		extent_gen = btrfs_file_extent_generation(leaf, extent);
    417		comp = btrfs_file_extent_compression(leaf, extent);
    418		type = btrfs_file_extent_type(leaf, extent);
    419		if (type == BTRFS_FILE_EXTENT_REG ||
    420		    type == BTRFS_FILE_EXTENT_PREALLOC) {
    421			disko = btrfs_file_extent_disk_bytenr(leaf, extent);
    422			diskl = btrfs_file_extent_disk_num_bytes(leaf, extent);
    423			datao = btrfs_file_extent_offset(leaf, extent);
    424			datal = btrfs_file_extent_num_bytes(leaf, extent);
    425		} else if (type == BTRFS_FILE_EXTENT_INLINE) {
    426			/* Take upper bound, may be compressed */
    427			datal = btrfs_file_extent_ram_bytes(leaf, extent);
    428		}
    429
    430		/*
    431		 * The first search might have left us at an extent item that
    432		 * ends before our target range's start, can happen if we have
    433		 * holes and NO_HOLES feature enabled.
    434		 *
    435		 * Subsequent searches may leave us on a file range we have
    436		 * processed before - this happens due to a race with ordered
    437		 * extent completion for a file range that is outside our source
    438		 * range, but that range was part of a file extent item that
    439		 * also covered a leading part of our source range.
    440		 */
    441		if (key.offset + datal <= prev_extent_end) {
    442			path->slots[0]++;
    443			goto process_slot;
    444		} else if (key.offset >= off + len) {
    445			break;
    446		}
    447
    448		prev_extent_end = key.offset + datal;
    449		size = btrfs_item_size(leaf, slot);
    450		read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
    451				   size);
    452
    453		btrfs_release_path(path);
    454
    455		memcpy(&new_key, &key, sizeof(new_key));
    456		new_key.objectid = btrfs_ino(BTRFS_I(inode));
    457		if (off <= key.offset)
    458			new_key.offset = key.offset + destoff - off;
    459		else
    460			new_key.offset = destoff;
    461
    462		/*
    463		 * Deal with a hole that doesn't have an extent item that
    464		 * represents it (NO_HOLES feature enabled).
    465		 * This hole is either in the middle of the cloning range or at
    466		 * the beginning (fully overlaps it or partially overlaps it).
    467		 */
    468		if (new_key.offset != last_dest_end)
    469			drop_start = last_dest_end;
    470		else
    471			drop_start = new_key.offset;
    472
    473		if (type == BTRFS_FILE_EXTENT_REG ||
    474		    type == BTRFS_FILE_EXTENT_PREALLOC) {
    475			struct btrfs_replace_extent_info clone_info;
    476
    477			/*
    478			 *    a  | --- range to clone ---|  b
    479			 * | ------------- extent ------------- |
    480			 */
    481
    482			/* Subtract range b */
    483			if (key.offset + datal > off + len)
    484				datal = off + len - key.offset;
    485
    486			/* Subtract range a */
    487			if (off > key.offset) {
    488				datao += off - key.offset;
    489				datal -= off - key.offset;
    490			}
    491
    492			clone_info.disk_offset = disko;
    493			clone_info.disk_len = diskl;
    494			clone_info.data_offset = datao;
    495			clone_info.data_len = datal;
    496			clone_info.file_offset = new_key.offset;
    497			clone_info.extent_buf = buf;
    498			clone_info.is_new_extent = false;
    499			clone_info.update_times = !no_time_update;
    500			ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
    501					drop_start, new_key.offset + datal - 1,
    502					&clone_info, &trans);
    503			if (ret)
    504				goto out;
    505		} else {
    506			ASSERT(type == BTRFS_FILE_EXTENT_INLINE);
    507			/*
    508			 * Inline extents always have to start at file offset 0
    509			 * and can never be bigger then the sector size. We can
    510			 * never clone only parts of an inline extent, since all
    511			 * reflink operations must start at a sector size aligned
    512			 * offset, and the length must be aligned too or end at
    513			 * the i_size (which implies the whole inlined data).
    514			 */
    515			ASSERT(key.offset == 0);
    516			ASSERT(datal <= fs_info->sectorsize);
    517			if (WARN_ON(type != BTRFS_FILE_EXTENT_INLINE) ||
    518			    WARN_ON(key.offset != 0) ||
    519			    WARN_ON(datal > fs_info->sectorsize)) {
    520				ret = -EUCLEAN;
    521				goto out;
    522			}
    523
    524			ret = clone_copy_inline_extent(inode, path, &new_key,
    525						       drop_start, datal, size,
    526						       comp, buf, &trans);
    527			if (ret)
    528				goto out;
    529		}
    530
    531		btrfs_release_path(path);
    532
    533		/*
    534		 * Whenever we share an extent we update the last_reflink_trans
    535		 * of each inode to the current transaction. This is needed to
    536		 * make sure fsync does not log multiple checksum items with
    537		 * overlapping ranges (because some extent items might refer
    538		 * only to sections of the original extent). For the destination
    539		 * inode we do this regardless of the generation of the extents
    540		 * or even if they are inline extents or explicit holes, to make
    541		 * sure a full fsync does not skip them. For the source inode,
    542		 * we only need to update last_reflink_trans in case it's a new
    543		 * extent that is not a hole or an inline extent, to deal with
    544		 * the checksums problem on fsync.
    545		 */
    546		if (extent_gen == trans->transid && disko > 0)
    547			BTRFS_I(src)->last_reflink_trans = trans->transid;
    548
    549		BTRFS_I(inode)->last_reflink_trans = trans->transid;
    550
    551		last_dest_end = ALIGN(new_key.offset + datal,
    552				      fs_info->sectorsize);
    553		ret = clone_finish_inode_update(trans, inode, last_dest_end,
    554						destoff, olen, no_time_update);
    555		if (ret)
    556			goto out;
    557		if (new_key.offset + datal >= destoff + len)
    558			break;
    559
    560		btrfs_release_path(path);
    561		key.offset = prev_extent_end;
    562
    563		if (fatal_signal_pending(current)) {
    564			ret = -EINTR;
    565			goto out;
    566		}
    567
    568		cond_resched();
    569	}
    570	ret = 0;
    571
    572	if (last_dest_end < destoff + len) {
    573		/*
    574		 * We have an implicit hole that fully or partially overlaps our
    575		 * cloning range at its end. This means that we either have the
    576		 * NO_HOLES feature enabled or the implicit hole happened due to
    577		 * mixing buffered and direct IO writes against this file.
    578		 */
    579		btrfs_release_path(path);
    580
    581		/*
    582		 * When using NO_HOLES and we are cloning a range that covers
    583		 * only a hole (no extents) into a range beyond the current
    584		 * i_size, punching a hole in the target range will not create
    585		 * an extent map defining a hole, because the range starts at or
    586		 * beyond current i_size. If the file previously had an i_size
    587		 * greater than the new i_size set by this clone operation, we
    588		 * need to make sure the next fsync is a full fsync, so that it
    589		 * detects and logs a hole covering a range from the current
    590		 * i_size to the new i_size. If the clone range covers extents,
    591		 * besides a hole, then we know the full sync flag was already
    592		 * set by previous calls to btrfs_replace_file_extents() that
    593		 * replaced file extent items.
    594		 */
    595		if (last_dest_end >= i_size_read(inode))
    596			btrfs_set_inode_full_sync(BTRFS_I(inode));
    597
    598		ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
    599				last_dest_end, destoff + len - 1, NULL, &trans);
    600		if (ret)
    601			goto out;
    602
    603		ret = clone_finish_inode_update(trans, inode, destoff + len,
    604						destoff, olen, no_time_update);
    605	}
    606
    607out:
    608	btrfs_free_path(path);
    609	kvfree(buf);
    610	clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
    611
    612	return ret;
    613}
    614
    615static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
    616				       struct inode *inode2, u64 loff2, u64 len)
    617{
    618	unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
    619	unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
    620}
    621
    622static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
    623				     struct inode *inode2, u64 loff2, u64 len)
    624{
    625	u64 range1_end = loff1 + len - 1;
    626	u64 range2_end = loff2 + len - 1;
    627
    628	if (inode1 < inode2) {
    629		swap(inode1, inode2);
    630		swap(loff1, loff2);
    631		swap(range1_end, range2_end);
    632	} else if (inode1 == inode2 && loff2 < loff1) {
    633		swap(loff1, loff2);
    634		swap(range1_end, range2_end);
    635	}
    636
    637	lock_extent(&BTRFS_I(inode1)->io_tree, loff1, range1_end);
    638	lock_extent(&BTRFS_I(inode2)->io_tree, loff2, range2_end);
    639
    640	btrfs_assert_inode_range_clean(BTRFS_I(inode1), loff1, range1_end);
    641	btrfs_assert_inode_range_clean(BTRFS_I(inode2), loff2, range2_end);
    642}
    643
    644static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2)
    645{
    646	if (inode1 < inode2)
    647		swap(inode1, inode2);
    648	down_write(&BTRFS_I(inode1)->i_mmap_lock);
    649	down_write_nested(&BTRFS_I(inode2)->i_mmap_lock, SINGLE_DEPTH_NESTING);
    650}
    651
    652static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2)
    653{
    654	up_write(&BTRFS_I(inode1)->i_mmap_lock);
    655	up_write(&BTRFS_I(inode2)->i_mmap_lock);
    656}
    657
    658static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
    659				   struct inode *dst, u64 dst_loff)
    660{
    661	const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
    662	int ret;
    663
    664	/*
    665	 * Lock destination range to serialize with concurrent readahead() and
    666	 * source range to serialize with relocation.
    667	 */
    668	btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
    669	ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
    670	btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
    671
    672	return ret;
    673}
    674
    675static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
    676			     struct inode *dst, u64 dst_loff)
    677{
    678	int ret = 0;
    679	u64 i, tail_len, chunk_count;
    680	struct btrfs_root *root_dst = BTRFS_I(dst)->root;
    681
    682	spin_lock(&root_dst->root_item_lock);
    683	if (root_dst->send_in_progress) {
    684		btrfs_warn_rl(root_dst->fs_info,
    685"cannot deduplicate to root %llu while send operations are using it (%d in progress)",
    686			      root_dst->root_key.objectid,
    687			      root_dst->send_in_progress);
    688		spin_unlock(&root_dst->root_item_lock);
    689		return -EAGAIN;
    690	}
    691	root_dst->dedupe_in_progress++;
    692	spin_unlock(&root_dst->root_item_lock);
    693
    694	tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
    695	chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
    696
    697	for (i = 0; i < chunk_count; i++) {
    698		ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
    699					      dst, dst_loff);
    700		if (ret)
    701			goto out;
    702
    703		loff += BTRFS_MAX_DEDUPE_LEN;
    704		dst_loff += BTRFS_MAX_DEDUPE_LEN;
    705	}
    706
    707	if (tail_len > 0)
    708		ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff);
    709out:
    710	spin_lock(&root_dst->root_item_lock);
    711	root_dst->dedupe_in_progress--;
    712	spin_unlock(&root_dst->root_item_lock);
    713
    714	return ret;
    715}
    716
    717static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
    718					u64 off, u64 olen, u64 destoff)
    719{
    720	struct inode *inode = file_inode(file);
    721	struct inode *src = file_inode(file_src);
    722	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
    723	int ret;
    724	int wb_ret;
    725	u64 len = olen;
    726	u64 bs = fs_info->sb->s_blocksize;
    727
    728	/*
    729	 * VFS's generic_remap_file_range_prep() protects us from cloning the
    730	 * eof block into the middle of a file, which would result in corruption
    731	 * if the file size is not blocksize aligned. So we don't need to check
    732	 * for that case here.
    733	 */
    734	if (off + len == src->i_size)
    735		len = ALIGN(src->i_size, bs) - off;
    736
    737	if (destoff > inode->i_size) {
    738		const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
    739
    740		ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff);
    741		if (ret)
    742			return ret;
    743		/*
    744		 * We may have truncated the last block if the inode's size is
    745		 * not sector size aligned, so we need to wait for writeback to
    746		 * complete before proceeding further, otherwise we can race
    747		 * with cloning and attempt to increment a reference to an
    748		 * extent that no longer exists (writeback completed right after
    749		 * we found the previous extent covering eof and before we
    750		 * attempted to increment its reference count).
    751		 */
    752		ret = btrfs_wait_ordered_range(inode, wb_start,
    753					       destoff - wb_start);
    754		if (ret)
    755			return ret;
    756	}
    757
    758	/*
    759	 * Lock destination range to serialize with concurrent readahead() and
    760	 * source range to serialize with relocation.
    761	 */
    762	btrfs_double_extent_lock(src, off, inode, destoff, len);
    763	ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
    764	btrfs_double_extent_unlock(src, off, inode, destoff, len);
    765
    766	/*
    767	 * We may have copied an inline extent into a page of the destination
    768	 * range, so wait for writeback to complete before truncating pages
    769	 * from the page cache. This is a rare case.
    770	 */
    771	wb_ret = btrfs_wait_ordered_range(inode, destoff, len);
    772	ret = ret ? ret : wb_ret;
    773	/*
    774	 * Truncate page cache pages so that future reads will see the cloned
    775	 * data immediately and not the previous data.
    776	 */
    777	truncate_inode_pages_range(&inode->i_data,
    778				round_down(destoff, PAGE_SIZE),
    779				round_up(destoff + len, PAGE_SIZE) - 1);
    780
    781	return ret;
    782}
    783
    784static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
    785				       struct file *file_out, loff_t pos_out,
    786				       loff_t *len, unsigned int remap_flags)
    787{
    788	struct inode *inode_in = file_inode(file_in);
    789	struct inode *inode_out = file_inode(file_out);
    790	u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
    791	u64 wb_len;
    792	int ret;
    793
    794	if (!(remap_flags & REMAP_FILE_DEDUP)) {
    795		struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
    796
    797		if (btrfs_root_readonly(root_out))
    798			return -EROFS;
    799
    800		ASSERT(inode_in->i_sb == inode_out->i_sb);
    801	}
    802
    803	/* Don't make the dst file partly checksummed */
    804	if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
    805	    (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
    806		return -EINVAL;
    807	}
    808
    809	/*
    810	 * Now that the inodes are locked, we need to start writeback ourselves
    811	 * and can not rely on the writeback from the VFS's generic helper
    812	 * generic_remap_file_range_prep() because:
    813	 *
    814	 * 1) For compression we must call filemap_fdatawrite_range() range
    815	 *    twice (btrfs_fdatawrite_range() does it for us), and the generic
    816	 *    helper only calls it once;
    817	 *
    818	 * 2) filemap_fdatawrite_range(), called by the generic helper only
    819	 *    waits for the writeback to complete, i.e. for IO to be done, and
    820	 *    not for the ordered extents to complete. We need to wait for them
    821	 *    to complete so that new file extent items are in the fs tree.
    822	 */
    823	if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
    824		wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
    825	else
    826		wb_len = ALIGN(*len, bs);
    827
    828	/*
    829	 * Workaround to make sure NOCOW buffered write reach disk as NOCOW.
    830	 *
    831	 * Btrfs' back references do not have a block level granularity, they
    832	 * work at the whole extent level.
    833	 * NOCOW buffered write without data space reserved may not be able
    834	 * to fall back to CoW due to lack of data space, thus could cause
    835	 * data loss.
    836	 *
    837	 * Here we take a shortcut by flushing the whole inode, so that all
    838	 * nocow write should reach disk as nocow before we increase the
    839	 * reference of the extent. We could do better by only flushing NOCOW
    840	 * data, but that needs extra accounting.
    841	 *
    842	 * Also we don't need to check ASYNC_EXTENT, as async extent will be
    843	 * CoWed anyway, not affecting nocow part.
    844	 */
    845	ret = filemap_flush(inode_in->i_mapping);
    846	if (ret < 0)
    847		return ret;
    848
    849	ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
    850				       wb_len);
    851	if (ret < 0)
    852		return ret;
    853	ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
    854				       wb_len);
    855	if (ret < 0)
    856		return ret;
    857
    858	return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
    859					    len, remap_flags);
    860}
    861
    862static bool file_sync_write(const struct file *file)
    863{
    864	if (file->f_flags & (__O_SYNC | O_DSYNC))
    865		return true;
    866	if (IS_SYNC(file_inode(file)))
    867		return true;
    868
    869	return false;
    870}
    871
    872loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
    873		struct file *dst_file, loff_t destoff, loff_t len,
    874		unsigned int remap_flags)
    875{
    876	struct inode *src_inode = file_inode(src_file);
    877	struct inode *dst_inode = file_inode(dst_file);
    878	bool same_inode = dst_inode == src_inode;
    879	int ret;
    880
    881	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
    882		return -EINVAL;
    883
    884	if (same_inode) {
    885		btrfs_inode_lock(src_inode, BTRFS_ILOCK_MMAP);
    886	} else {
    887		lock_two_nondirectories(src_inode, dst_inode);
    888		btrfs_double_mmap_lock(src_inode, dst_inode);
    889	}
    890
    891	ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff,
    892					  &len, remap_flags);
    893	if (ret < 0 || len == 0)
    894		goto out_unlock;
    895
    896	if (remap_flags & REMAP_FILE_DEDUP)
    897		ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
    898	else
    899		ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
    900
    901out_unlock:
    902	if (same_inode) {
    903		btrfs_inode_unlock(src_inode, BTRFS_ILOCK_MMAP);
    904	} else {
    905		btrfs_double_mmap_unlock(src_inode, dst_inode);
    906		unlock_two_nondirectories(src_inode, dst_inode);
    907	}
    908
    909	/*
    910	 * If either the source or the destination file was opened with O_SYNC,
    911	 * O_DSYNC or has the S_SYNC attribute, fsync both the destination and
    912	 * source files/ranges, so that after a successful return (0) followed
    913	 * by a power failure results in the reflinked data to be readable from
    914	 * both files/ranges.
    915	 */
    916	if (ret == 0 && len > 0 &&
    917	    (file_sync_write(src_file) || file_sync_write(dst_file))) {
    918		ret = btrfs_sync_file(src_file, off, off + len - 1, 0);
    919		if (ret == 0)
    920			ret = btrfs_sync_file(dst_file, destoff,
    921					      destoff + len - 1, 0);
    922	}
    923
    924	return ret < 0 ? ret : len;
    925}