xfs_reflink.c (46334B)
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6#include "xfs.h" 7#include "xfs_fs.h" 8#include "xfs_shared.h" 9#include "xfs_format.h" 10#include "xfs_log_format.h" 11#include "xfs_trans_resv.h" 12#include "xfs_mount.h" 13#include "xfs_defer.h" 14#include "xfs_inode.h" 15#include "xfs_trans.h" 16#include "xfs_bmap.h" 17#include "xfs_bmap_util.h" 18#include "xfs_trace.h" 19#include "xfs_icache.h" 20#include "xfs_btree.h" 21#include "xfs_refcount_btree.h" 22#include "xfs_refcount.h" 23#include "xfs_bmap_btree.h" 24#include "xfs_trans_space.h" 25#include "xfs_bit.h" 26#include "xfs_alloc.h" 27#include "xfs_quota.h" 28#include "xfs_reflink.h" 29#include "xfs_iomap.h" 30#include "xfs_ag.h" 31#include "xfs_ag_resv.h" 32 33/* 34 * Copy on Write of Shared Blocks 35 * 36 * XFS must preserve "the usual" file semantics even when two files share 37 * the same physical blocks. This means that a write to one file must not 38 * alter the blocks in a different file; the way that we'll do that is 39 * through the use of a copy-on-write mechanism. At a high level, that 40 * means that when we want to write to a shared block, we allocate a new 41 * block, write the data to the new block, and if that succeeds we map the 42 * new block into the file. 43 * 44 * XFS provides a "delayed allocation" mechanism that defers the allocation 45 * of disk blocks to dirty-but-not-yet-mapped file blocks as long as 46 * possible. This reduces fragmentation by enabling the filesystem to ask 47 * for bigger chunks less often, which is exactly what we want for CoW. 48 * 49 * The delalloc mechanism begins when the kernel wants to make a block 50 * writable (write_begin or page_mkwrite). If the offset is not mapped, we 51 * create a delalloc mapping, which is a regular in-core extent, but without 52 * a real startblock. (For delalloc mappings, the startblock encodes both 53 * a flag that this is a delalloc mapping, and a worst-case estimate of how 54 * many blocks might be required to put the mapping into the BMBT.) delalloc 55 * mappings are a reservation against the free space in the filesystem; 56 * adjacent mappings can also be combined into fewer larger mappings. 57 * 58 * As an optimization, the CoW extent size hint (cowextsz) creates 59 * outsized aligned delalloc reservations in the hope of landing out of 60 * order nearby CoW writes in a single extent on disk, thereby reducing 61 * fragmentation and improving future performance. 62 * 63 * D: --RRRRRRSSSRRRRRRRR--- (data fork) 64 * C: ------DDDDDDD--------- (CoW fork) 65 * 66 * When dirty pages are being written out (typically in writepage), the 67 * delalloc reservations are converted into unwritten mappings by 68 * allocating blocks and replacing the delalloc mapping with real ones. 69 * A delalloc mapping can be replaced by several unwritten ones if the 70 * free space is fragmented. 71 * 72 * D: --RRRRRRSSSRRRRRRRR--- 73 * C: ------UUUUUUU--------- 74 * 75 * We want to adapt the delalloc mechanism for copy-on-write, since the 76 * write paths are similar. The first two steps (creating the reservation 77 * and allocating the blocks) are exactly the same as delalloc except that 78 * the mappings must be stored in a separate CoW fork because we do not want 79 * to disturb the mapping in the data fork until we're sure that the write 80 * succeeded. IO completion in this case is the process of removing the old 81 * mapping from the data fork and moving the new mapping from the CoW fork to 82 * the data fork. This will be discussed shortly. 83 * 84 * For now, unaligned directio writes will be bounced back to the page cache. 85 * Block-aligned directio writes will use the same mechanism as buffered 86 * writes. 87 * 88 * Just prior to submitting the actual disk write requests, we convert 89 * the extents representing the range of the file actually being written 90 * (as opposed to extra pieces created for the cowextsize hint) to real 91 * extents. This will become important in the next step: 92 * 93 * D: --RRRRRRSSSRRRRRRRR--- 94 * C: ------UUrrUUU--------- 95 * 96 * CoW remapping must be done after the data block write completes, 97 * because we don't want to destroy the old data fork map until we're sure 98 * the new block has been written. Since the new mappings are kept in a 99 * separate fork, we can simply iterate these mappings to find the ones 100 * that cover the file blocks that we just CoW'd. For each extent, simply 101 * unmap the corresponding range in the data fork, map the new range into 102 * the data fork, and remove the extent from the CoW fork. Because of 103 * the presence of the cowextsize hint, however, we must be careful 104 * only to remap the blocks that we've actually written out -- we must 105 * never remap delalloc reservations nor CoW staging blocks that have 106 * yet to be written. This corresponds exactly to the real extents in 107 * the CoW fork: 108 * 109 * D: --RRRRRRrrSRRRRRRRR--- 110 * C: ------UU--UUU--------- 111 * 112 * Since the remapping operation can be applied to an arbitrary file 113 * range, we record the need for the remap step as a flag in the ioend 114 * instead of declaring a new IO type. This is required for direct io 115 * because we only have ioend for the whole dio, and we have to be able to 116 * remember the presence of unwritten blocks and CoW blocks with a single 117 * ioend structure. Better yet, the more ground we can cover with one 118 * ioend, the better. 119 */ 120 121/* 122 * Given an AG extent, find the lowest-numbered run of shared blocks 123 * within that range and return the range in fbno/flen. If 124 * find_end_of_shared is true, return the longest contiguous extent of 125 * shared blocks. If there are no shared extents, fbno and flen will 126 * be set to NULLAGBLOCK and 0, respectively. 127 */ 128int 129xfs_reflink_find_shared( 130 struct xfs_mount *mp, 131 struct xfs_trans *tp, 132 xfs_agnumber_t agno, 133 xfs_agblock_t agbno, 134 xfs_extlen_t aglen, 135 xfs_agblock_t *fbno, 136 xfs_extlen_t *flen, 137 bool find_end_of_shared) 138{ 139 struct xfs_buf *agbp; 140 struct xfs_btree_cur *cur; 141 int error; 142 143 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp); 144 if (error) 145 return error; 146 147 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agbp->b_pag); 148 149 error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen, 150 find_end_of_shared); 151 152 xfs_btree_del_cursor(cur, error); 153 154 xfs_trans_brelse(tp, agbp); 155 return error; 156} 157 158/* 159 * Trim the mapping to the next block where there's a change in the 160 * shared/unshared status. More specifically, this means that we 161 * find the lowest-numbered extent of shared blocks that coincides with 162 * the given block mapping. If the shared extent overlaps the start of 163 * the mapping, trim the mapping to the end of the shared extent. If 164 * the shared region intersects the mapping, trim the mapping to the 165 * start of the shared extent. If there are no shared regions that 166 * overlap, just return the original extent. 167 */ 168int 169xfs_reflink_trim_around_shared( 170 struct xfs_inode *ip, 171 struct xfs_bmbt_irec *irec, 172 bool *shared) 173{ 174 xfs_agnumber_t agno; 175 xfs_agblock_t agbno; 176 xfs_extlen_t aglen; 177 xfs_agblock_t fbno; 178 xfs_extlen_t flen; 179 int error = 0; 180 181 /* Holes, unwritten, and delalloc extents cannot be shared */ 182 if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_written_extent(irec)) { 183 *shared = false; 184 return 0; 185 } 186 187 trace_xfs_reflink_trim_around_shared(ip, irec); 188 189 agno = XFS_FSB_TO_AGNO(ip->i_mount, irec->br_startblock); 190 agbno = XFS_FSB_TO_AGBNO(ip->i_mount, irec->br_startblock); 191 aglen = irec->br_blockcount; 192 193 error = xfs_reflink_find_shared(ip->i_mount, NULL, agno, agbno, 194 aglen, &fbno, &flen, true); 195 if (error) 196 return error; 197 198 *shared = false; 199 if (fbno == NULLAGBLOCK) { 200 /* No shared blocks at all. */ 201 return 0; 202 } else if (fbno == agbno) { 203 /* 204 * The start of this extent is shared. Truncate the 205 * mapping at the end of the shared region so that a 206 * subsequent iteration starts at the start of the 207 * unshared region. 208 */ 209 irec->br_blockcount = flen; 210 *shared = true; 211 return 0; 212 } else { 213 /* 214 * There's a shared extent midway through this extent. 215 * Truncate the mapping at the start of the shared 216 * extent so that a subsequent iteration starts at the 217 * start of the shared region. 218 */ 219 irec->br_blockcount = fbno - agbno; 220 return 0; 221 } 222} 223 224int 225xfs_bmap_trim_cow( 226 struct xfs_inode *ip, 227 struct xfs_bmbt_irec *imap, 228 bool *shared) 229{ 230 /* We can't update any real extents in always COW mode. */ 231 if (xfs_is_always_cow_inode(ip) && 232 !isnullstartblock(imap->br_startblock)) { 233 *shared = true; 234 return 0; 235 } 236 237 /* Trim the mapping to the nearest shared extent boundary. */ 238 return xfs_reflink_trim_around_shared(ip, imap, shared); 239} 240 241static int 242xfs_reflink_convert_cow_locked( 243 struct xfs_inode *ip, 244 xfs_fileoff_t offset_fsb, 245 xfs_filblks_t count_fsb) 246{ 247 struct xfs_iext_cursor icur; 248 struct xfs_bmbt_irec got; 249 struct xfs_btree_cur *dummy_cur = NULL; 250 int dummy_logflags; 251 int error = 0; 252 253 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got)) 254 return 0; 255 256 do { 257 if (got.br_startoff >= offset_fsb + count_fsb) 258 break; 259 if (got.br_state == XFS_EXT_NORM) 260 continue; 261 if (WARN_ON_ONCE(isnullstartblock(got.br_startblock))) 262 return -EIO; 263 264 xfs_trim_extent(&got, offset_fsb, count_fsb); 265 if (!got.br_blockcount) 266 continue; 267 268 got.br_state = XFS_EXT_NORM; 269 error = xfs_bmap_add_extent_unwritten_real(NULL, ip, 270 XFS_COW_FORK, &icur, &dummy_cur, &got, 271 &dummy_logflags); 272 if (error) 273 return error; 274 } while (xfs_iext_next_extent(ip->i_cowfp, &icur, &got)); 275 276 return error; 277} 278 279/* Convert all of the unwritten CoW extents in a file's range to real ones. */ 280int 281xfs_reflink_convert_cow( 282 struct xfs_inode *ip, 283 xfs_off_t offset, 284 xfs_off_t count) 285{ 286 struct xfs_mount *mp = ip->i_mount; 287 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 288 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); 289 xfs_filblks_t count_fsb = end_fsb - offset_fsb; 290 int error; 291 292 ASSERT(count != 0); 293 294 xfs_ilock(ip, XFS_ILOCK_EXCL); 295 error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb); 296 xfs_iunlock(ip, XFS_ILOCK_EXCL); 297 return error; 298} 299 300/* 301 * Find the extent that maps the given range in the COW fork. Even if the extent 302 * is not shared we might have a preallocation for it in the COW fork. If so we 303 * use it that rather than trigger a new allocation. 304 */ 305static int 306xfs_find_trim_cow_extent( 307 struct xfs_inode *ip, 308 struct xfs_bmbt_irec *imap, 309 struct xfs_bmbt_irec *cmap, 310 bool *shared, 311 bool *found) 312{ 313 xfs_fileoff_t offset_fsb = imap->br_startoff; 314 xfs_filblks_t count_fsb = imap->br_blockcount; 315 struct xfs_iext_cursor icur; 316 317 *found = false; 318 319 /* 320 * If we don't find an overlapping extent, trim the range we need to 321 * allocate to fit the hole we found. 322 */ 323 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, cmap)) 324 cmap->br_startoff = offset_fsb + count_fsb; 325 if (cmap->br_startoff > offset_fsb) { 326 xfs_trim_extent(imap, imap->br_startoff, 327 cmap->br_startoff - imap->br_startoff); 328 return xfs_bmap_trim_cow(ip, imap, shared); 329 } 330 331 *shared = true; 332 if (isnullstartblock(cmap->br_startblock)) { 333 xfs_trim_extent(imap, cmap->br_startoff, cmap->br_blockcount); 334 return 0; 335 } 336 337 /* real extent found - no need to allocate */ 338 xfs_trim_extent(cmap, offset_fsb, count_fsb); 339 *found = true; 340 return 0; 341} 342 343/* Allocate all CoW reservations covering a range of blocks in a file. */ 344int 345xfs_reflink_allocate_cow( 346 struct xfs_inode *ip, 347 struct xfs_bmbt_irec *imap, 348 struct xfs_bmbt_irec *cmap, 349 bool *shared, 350 uint *lockmode, 351 bool convert_now) 352{ 353 struct xfs_mount *mp = ip->i_mount; 354 xfs_fileoff_t offset_fsb = imap->br_startoff; 355 xfs_filblks_t count_fsb = imap->br_blockcount; 356 struct xfs_trans *tp; 357 int nimaps, error = 0; 358 bool found; 359 xfs_filblks_t resaligned; 360 xfs_extlen_t resblks = 0; 361 362 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 363 if (!ip->i_cowfp) { 364 ASSERT(!xfs_is_reflink_inode(ip)); 365 xfs_ifork_init_cow(ip); 366 } 367 368 error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found); 369 if (error || !*shared) 370 return error; 371 if (found) 372 goto convert; 373 374 resaligned = xfs_aligned_fsb_count(imap->br_startoff, 375 imap->br_blockcount, xfs_get_cowextsz_hint(ip)); 376 resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 377 378 xfs_iunlock(ip, *lockmode); 379 *lockmode = 0; 380 381 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0, 382 false, &tp); 383 if (error) 384 return error; 385 386 *lockmode = XFS_ILOCK_EXCL; 387 388 /* 389 * Check for an overlapping extent again now that we dropped the ilock. 390 */ 391 error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found); 392 if (error || !*shared) 393 goto out_trans_cancel; 394 if (found) { 395 xfs_trans_cancel(tp); 396 goto convert; 397 } 398 399 /* Allocate the entire reservation as unwritten blocks. */ 400 nimaps = 1; 401 error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount, 402 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0, cmap, 403 &nimaps); 404 if (error) 405 goto out_trans_cancel; 406 407 xfs_inode_set_cowblocks_tag(ip); 408 error = xfs_trans_commit(tp); 409 if (error) 410 return error; 411 412 /* 413 * Allocation succeeded but the requested range was not even partially 414 * satisfied? Bail out! 415 */ 416 if (nimaps == 0) 417 return -ENOSPC; 418convert: 419 xfs_trim_extent(cmap, offset_fsb, count_fsb); 420 /* 421 * COW fork extents are supposed to remain unwritten until we're ready 422 * to initiate a disk write. For direct I/O we are going to write the 423 * data and need the conversion, but for buffered writes we're done. 424 */ 425 if (!convert_now || cmap->br_state == XFS_EXT_NORM) 426 return 0; 427 trace_xfs_reflink_convert_cow(ip, cmap); 428 error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb); 429 if (!error) 430 cmap->br_state = XFS_EXT_NORM; 431 return error; 432 433out_trans_cancel: 434 xfs_trans_cancel(tp); 435 return error; 436} 437 438/* 439 * Cancel CoW reservations for some block range of an inode. 440 * 441 * If cancel_real is true this function cancels all COW fork extents for the 442 * inode; if cancel_real is false, real extents are not cleared. 443 * 444 * Caller must have already joined the inode to the current transaction. The 445 * inode will be joined to the transaction returned to the caller. 446 */ 447int 448xfs_reflink_cancel_cow_blocks( 449 struct xfs_inode *ip, 450 struct xfs_trans **tpp, 451 xfs_fileoff_t offset_fsb, 452 xfs_fileoff_t end_fsb, 453 bool cancel_real) 454{ 455 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 456 struct xfs_bmbt_irec got, del; 457 struct xfs_iext_cursor icur; 458 int error = 0; 459 460 if (!xfs_inode_has_cow_data(ip)) 461 return 0; 462 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) 463 return 0; 464 465 /* Walk backwards until we're out of the I/O range... */ 466 while (got.br_startoff + got.br_blockcount > offset_fsb) { 467 del = got; 468 xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb); 469 470 /* Extent delete may have bumped ext forward */ 471 if (!del.br_blockcount) { 472 xfs_iext_prev(ifp, &icur); 473 goto next_extent; 474 } 475 476 trace_xfs_reflink_cancel_cow(ip, &del); 477 478 if (isnullstartblock(del.br_startblock)) { 479 error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK, 480 &icur, &got, &del); 481 if (error) 482 break; 483 } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) { 484 ASSERT((*tpp)->t_firstblock == NULLFSBLOCK); 485 486 /* Free the CoW orphan record. */ 487 xfs_refcount_free_cow_extent(*tpp, del.br_startblock, 488 del.br_blockcount); 489 490 xfs_free_extent_later(*tpp, del.br_startblock, 491 del.br_blockcount, NULL); 492 493 /* Roll the transaction */ 494 error = xfs_defer_finish(tpp); 495 if (error) 496 break; 497 498 /* Remove the mapping from the CoW fork. */ 499 xfs_bmap_del_extent_cow(ip, &icur, &got, &del); 500 501 /* Remove the quota reservation */ 502 error = xfs_quota_unreserve_blkres(ip, 503 del.br_blockcount); 504 if (error) 505 break; 506 } else { 507 /* Didn't do anything, push cursor back. */ 508 xfs_iext_prev(ifp, &icur); 509 } 510next_extent: 511 if (!xfs_iext_get_extent(ifp, &icur, &got)) 512 break; 513 } 514 515 /* clear tag if cow fork is emptied */ 516 if (!ifp->if_bytes) 517 xfs_inode_clear_cowblocks_tag(ip); 518 return error; 519} 520 521/* 522 * Cancel CoW reservations for some byte range of an inode. 523 * 524 * If cancel_real is true this function cancels all COW fork extents for the 525 * inode; if cancel_real is false, real extents are not cleared. 526 */ 527int 528xfs_reflink_cancel_cow_range( 529 struct xfs_inode *ip, 530 xfs_off_t offset, 531 xfs_off_t count, 532 bool cancel_real) 533{ 534 struct xfs_trans *tp; 535 xfs_fileoff_t offset_fsb; 536 xfs_fileoff_t end_fsb; 537 int error; 538 539 trace_xfs_reflink_cancel_cow_range(ip, offset, count); 540 ASSERT(ip->i_cowfp); 541 542 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 543 if (count == NULLFILEOFF) 544 end_fsb = NULLFILEOFF; 545 else 546 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count); 547 548 /* Start a rolling transaction to remove the mappings */ 549 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write, 550 0, 0, 0, &tp); 551 if (error) 552 goto out; 553 554 xfs_ilock(ip, XFS_ILOCK_EXCL); 555 xfs_trans_ijoin(tp, ip, 0); 556 557 /* Scrape out the old CoW reservations */ 558 error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb, 559 cancel_real); 560 if (error) 561 goto out_cancel; 562 563 error = xfs_trans_commit(tp); 564 565 xfs_iunlock(ip, XFS_ILOCK_EXCL); 566 return error; 567 568out_cancel: 569 xfs_trans_cancel(tp); 570 xfs_iunlock(ip, XFS_ILOCK_EXCL); 571out: 572 trace_xfs_reflink_cancel_cow_range_error(ip, error, _RET_IP_); 573 return error; 574} 575 576/* 577 * Remap part of the CoW fork into the data fork. 578 * 579 * We aim to remap the range starting at @offset_fsb and ending at @end_fsb 580 * into the data fork; this function will remap what it can (at the end of the 581 * range) and update @end_fsb appropriately. Each remap gets its own 582 * transaction because we can end up merging and splitting bmbt blocks for 583 * every remap operation and we'd like to keep the block reservation 584 * requirements as low as possible. 585 */ 586STATIC int 587xfs_reflink_end_cow_extent( 588 struct xfs_inode *ip, 589 xfs_fileoff_t *offset_fsb, 590 xfs_fileoff_t end_fsb) 591{ 592 struct xfs_iext_cursor icur; 593 struct xfs_bmbt_irec got, del, data; 594 struct xfs_mount *mp = ip->i_mount; 595 struct xfs_trans *tp; 596 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 597 unsigned int resblks; 598 int nmaps; 599 int error; 600 601 /* No COW extents? That's easy! */ 602 if (ifp->if_bytes == 0) { 603 *offset_fsb = end_fsb; 604 return 0; 605 } 606 607 resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 608 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 609 XFS_TRANS_RESERVE, &tp); 610 if (error) 611 return error; 612 613 /* 614 * Lock the inode. We have to ijoin without automatic unlock because 615 * the lead transaction is the refcountbt record deletion; the data 616 * fork update follows as a deferred log item. 617 */ 618 xfs_ilock(ip, XFS_ILOCK_EXCL); 619 xfs_trans_ijoin(tp, ip, 0); 620 621 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, 622 XFS_IEXT_REFLINK_END_COW_CNT); 623 if (error == -EFBIG) 624 error = xfs_iext_count_upgrade(tp, ip, 625 XFS_IEXT_REFLINK_END_COW_CNT); 626 if (error) 627 goto out_cancel; 628 629 /* 630 * In case of racing, overlapping AIO writes no COW extents might be 631 * left by the time I/O completes for the loser of the race. In that 632 * case we are done. 633 */ 634 if (!xfs_iext_lookup_extent(ip, ifp, *offset_fsb, &icur, &got) || 635 got.br_startoff >= end_fsb) { 636 *offset_fsb = end_fsb; 637 goto out_cancel; 638 } 639 640 /* 641 * Only remap real extents that contain data. With AIO, speculative 642 * preallocations can leak into the range we are called upon, and we 643 * need to skip them. Preserve @got for the eventual CoW fork 644 * deletion; from now on @del represents the mapping that we're 645 * actually remapping. 646 */ 647 while (!xfs_bmap_is_written_extent(&got)) { 648 if (!xfs_iext_next_extent(ifp, &icur, &got) || 649 got.br_startoff >= end_fsb) { 650 *offset_fsb = end_fsb; 651 goto out_cancel; 652 } 653 } 654 del = got; 655 656 /* Grab the corresponding mapping in the data fork. */ 657 nmaps = 1; 658 error = xfs_bmapi_read(ip, del.br_startoff, del.br_blockcount, &data, 659 &nmaps, 0); 660 if (error) 661 goto out_cancel; 662 663 /* We can only remap the smaller of the two extent sizes. */ 664 data.br_blockcount = min(data.br_blockcount, del.br_blockcount); 665 del.br_blockcount = data.br_blockcount; 666 667 trace_xfs_reflink_cow_remap_from(ip, &del); 668 trace_xfs_reflink_cow_remap_to(ip, &data); 669 670 if (xfs_bmap_is_real_extent(&data)) { 671 /* 672 * If the extent we're remapping is backed by storage (written 673 * or not), unmap the extent and drop its refcount. 674 */ 675 xfs_bmap_unmap_extent(tp, ip, &data); 676 xfs_refcount_decrease_extent(tp, &data); 677 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 678 -data.br_blockcount); 679 } else if (data.br_startblock == DELAYSTARTBLOCK) { 680 int done; 681 682 /* 683 * If the extent we're remapping is a delalloc reservation, 684 * we can use the regular bunmapi function to release the 685 * incore state. Dropping the delalloc reservation takes care 686 * of the quota reservation for us. 687 */ 688 error = xfs_bunmapi(NULL, ip, data.br_startoff, 689 data.br_blockcount, 0, 1, &done); 690 if (error) 691 goto out_cancel; 692 ASSERT(done); 693 } 694 695 /* Free the CoW orphan record. */ 696 xfs_refcount_free_cow_extent(tp, del.br_startblock, del.br_blockcount); 697 698 /* Map the new blocks into the data fork. */ 699 xfs_bmap_map_extent(tp, ip, &del); 700 701 /* Charge this new data fork mapping to the on-disk quota. */ 702 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT, 703 (long)del.br_blockcount); 704 705 /* Remove the mapping from the CoW fork. */ 706 xfs_bmap_del_extent_cow(ip, &icur, &got, &del); 707 708 error = xfs_trans_commit(tp); 709 xfs_iunlock(ip, XFS_ILOCK_EXCL); 710 if (error) 711 return error; 712 713 /* Update the caller about how much progress we made. */ 714 *offset_fsb = del.br_startoff + del.br_blockcount; 715 return 0; 716 717out_cancel: 718 xfs_trans_cancel(tp); 719 xfs_iunlock(ip, XFS_ILOCK_EXCL); 720 return error; 721} 722 723/* 724 * Remap parts of a file's data fork after a successful CoW. 725 */ 726int 727xfs_reflink_end_cow( 728 struct xfs_inode *ip, 729 xfs_off_t offset, 730 xfs_off_t count) 731{ 732 xfs_fileoff_t offset_fsb; 733 xfs_fileoff_t end_fsb; 734 int error = 0; 735 736 trace_xfs_reflink_end_cow(ip, offset, count); 737 738 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 739 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count); 740 741 /* 742 * Walk forwards until we've remapped the I/O range. The loop function 743 * repeatedly cycles the ILOCK to allocate one transaction per remapped 744 * extent. 745 * 746 * If we're being called by writeback then the pages will still 747 * have PageWriteback set, which prevents races with reflink remapping 748 * and truncate. Reflink remapping prevents races with writeback by 749 * taking the iolock and mmaplock before flushing the pages and 750 * remapping, which means there won't be any further writeback or page 751 * cache dirtying until the reflink completes. 752 * 753 * We should never have two threads issuing writeback for the same file 754 * region. There are also have post-eof checks in the writeback 755 * preparation code so that we don't bother writing out pages that are 756 * about to be truncated. 757 * 758 * If we're being called as part of directio write completion, the dio 759 * count is still elevated, which reflink and truncate will wait for. 760 * Reflink remapping takes the iolock and mmaplock and waits for 761 * pending dio to finish, which should prevent any directio until the 762 * remap completes. Multiple concurrent directio writes to the same 763 * region are handled by end_cow processing only occurring for the 764 * threads which succeed; the outcome of multiple overlapping direct 765 * writes is not well defined anyway. 766 * 767 * It's possible that a buffered write and a direct write could collide 768 * here (the buffered write stumbles in after the dio flushes and 769 * invalidates the page cache and immediately queues writeback), but we 770 * have never supported this 100%. If either disk write succeeds the 771 * blocks will be remapped. 772 */ 773 while (end_fsb > offset_fsb && !error) 774 error = xfs_reflink_end_cow_extent(ip, &offset_fsb, end_fsb); 775 776 if (error) 777 trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_); 778 return error; 779} 780 781/* 782 * Free all CoW staging blocks that are still referenced by the ondisk refcount 783 * metadata. The ondisk metadata does not track which inode created the 784 * staging extent, so callers must ensure that there are no cached inodes with 785 * live CoW staging extents. 786 */ 787int 788xfs_reflink_recover_cow( 789 struct xfs_mount *mp) 790{ 791 struct xfs_perag *pag; 792 xfs_agnumber_t agno; 793 int error = 0; 794 795 if (!xfs_has_reflink(mp)) 796 return 0; 797 798 for_each_perag(mp, agno, pag) { 799 error = xfs_refcount_recover_cow_leftovers(mp, pag); 800 if (error) { 801 xfs_perag_put(pag); 802 break; 803 } 804 } 805 806 return error; 807} 808 809/* 810 * Reflinking (Block) Ranges of Two Files Together 811 * 812 * First, ensure that the reflink flag is set on both inodes. The flag is an 813 * optimization to avoid unnecessary refcount btree lookups in the write path. 814 * 815 * Now we can iteratively remap the range of extents (and holes) in src to the 816 * corresponding ranges in dest. Let drange and srange denote the ranges of 817 * logical blocks in dest and src touched by the reflink operation. 818 * 819 * While the length of drange is greater than zero, 820 * - Read src's bmbt at the start of srange ("imap") 821 * - If imap doesn't exist, make imap appear to start at the end of srange 822 * with zero length. 823 * - If imap starts before srange, advance imap to start at srange. 824 * - If imap goes beyond srange, truncate imap to end at the end of srange. 825 * - Punch (imap start - srange start + imap len) blocks from dest at 826 * offset (drange start). 827 * - If imap points to a real range of pblks, 828 * > Increase the refcount of the imap's pblks 829 * > Map imap's pblks into dest at the offset 830 * (drange start + imap start - srange start) 831 * - Advance drange and srange by (imap start - srange start + imap len) 832 * 833 * Finally, if the reflink made dest longer, update both the in-core and 834 * on-disk file sizes. 835 * 836 * ASCII Art Demonstration: 837 * 838 * Let's say we want to reflink this source file: 839 * 840 * ----SSSSSSS-SSSSS----SSSSSS (src file) 841 * <--------------------> 842 * 843 * into this destination file: 844 * 845 * --DDDDDDDDDDDDDDDDDDD--DDD (dest file) 846 * <--------------------> 847 * '-' means a hole, and 'S' and 'D' are written blocks in the src and dest. 848 * Observe that the range has different logical offsets in either file. 849 * 850 * Consider that the first extent in the source file doesn't line up with our 851 * reflink range. Unmapping and remapping are separate operations, so we can 852 * unmap more blocks from the destination file than we remap. 853 * 854 * ----SSSSSSS-SSSSS----SSSSSS 855 * <-------> 856 * --DDDDD---------DDDDD--DDD 857 * <-------> 858 * 859 * Now remap the source extent into the destination file: 860 * 861 * ----SSSSSSS-SSSSS----SSSSSS 862 * <-------> 863 * --DDDDD--SSSSSSSDDDDD--DDD 864 * <-------> 865 * 866 * Do likewise with the second hole and extent in our range. Holes in the 867 * unmap range don't affect our operation. 868 * 869 * ----SSSSSSS-SSSSS----SSSSSS 870 * <----> 871 * --DDDDD--SSSSSSS-SSSSS-DDD 872 * <----> 873 * 874 * Finally, unmap and remap part of the third extent. This will increase the 875 * size of the destination file. 876 * 877 * ----SSSSSSS-SSSSS----SSSSSS 878 * <-----> 879 * --DDDDD--SSSSSSS-SSSSS----SSS 880 * <-----> 881 * 882 * Once we update the destination file's i_size, we're done. 883 */ 884 885/* 886 * Ensure the reflink bit is set in both inodes. 887 */ 888STATIC int 889xfs_reflink_set_inode_flag( 890 struct xfs_inode *src, 891 struct xfs_inode *dest) 892{ 893 struct xfs_mount *mp = src->i_mount; 894 int error; 895 struct xfs_trans *tp; 896 897 if (xfs_is_reflink_inode(src) && xfs_is_reflink_inode(dest)) 898 return 0; 899 900 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 901 if (error) 902 goto out_error; 903 904 /* Lock both files against IO */ 905 if (src->i_ino == dest->i_ino) 906 xfs_ilock(src, XFS_ILOCK_EXCL); 907 else 908 xfs_lock_two_inodes(src, XFS_ILOCK_EXCL, dest, XFS_ILOCK_EXCL); 909 910 if (!xfs_is_reflink_inode(src)) { 911 trace_xfs_reflink_set_inode_flag(src); 912 xfs_trans_ijoin(tp, src, XFS_ILOCK_EXCL); 913 src->i_diflags2 |= XFS_DIFLAG2_REFLINK; 914 xfs_trans_log_inode(tp, src, XFS_ILOG_CORE); 915 xfs_ifork_init_cow(src); 916 } else 917 xfs_iunlock(src, XFS_ILOCK_EXCL); 918 919 if (src->i_ino == dest->i_ino) 920 goto commit_flags; 921 922 if (!xfs_is_reflink_inode(dest)) { 923 trace_xfs_reflink_set_inode_flag(dest); 924 xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL); 925 dest->i_diflags2 |= XFS_DIFLAG2_REFLINK; 926 xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE); 927 xfs_ifork_init_cow(dest); 928 } else 929 xfs_iunlock(dest, XFS_ILOCK_EXCL); 930 931commit_flags: 932 error = xfs_trans_commit(tp); 933 if (error) 934 goto out_error; 935 return error; 936 937out_error: 938 trace_xfs_reflink_set_inode_flag_error(dest, error, _RET_IP_); 939 return error; 940} 941 942/* 943 * Update destination inode size & cowextsize hint, if necessary. 944 */ 945int 946xfs_reflink_update_dest( 947 struct xfs_inode *dest, 948 xfs_off_t newlen, 949 xfs_extlen_t cowextsize, 950 unsigned int remap_flags) 951{ 952 struct xfs_mount *mp = dest->i_mount; 953 struct xfs_trans *tp; 954 int error; 955 956 if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0) 957 return 0; 958 959 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 960 if (error) 961 goto out_error; 962 963 xfs_ilock(dest, XFS_ILOCK_EXCL); 964 xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL); 965 966 if (newlen > i_size_read(VFS_I(dest))) { 967 trace_xfs_reflink_update_inode_size(dest, newlen); 968 i_size_write(VFS_I(dest), newlen); 969 dest->i_disk_size = newlen; 970 } 971 972 if (cowextsize) { 973 dest->i_cowextsize = cowextsize; 974 dest->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE; 975 } 976 977 xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE); 978 979 error = xfs_trans_commit(tp); 980 if (error) 981 goto out_error; 982 return error; 983 984out_error: 985 trace_xfs_reflink_update_inode_size_error(dest, error, _RET_IP_); 986 return error; 987} 988 989/* 990 * Do we have enough reserve in this AG to handle a reflink? The refcount 991 * btree already reserved all the space it needs, but the rmap btree can grow 992 * infinitely, so we won't allow more reflinks when the AG is down to the 993 * btree reserves. 994 */ 995static int 996xfs_reflink_ag_has_free_space( 997 struct xfs_mount *mp, 998 xfs_agnumber_t agno) 999{ 1000 struct xfs_perag *pag; 1001 int error = 0; 1002 1003 if (!xfs_has_rmapbt(mp)) 1004 return 0; 1005 1006 pag = xfs_perag_get(mp, agno); 1007 if (xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) || 1008 xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA)) 1009 error = -ENOSPC; 1010 xfs_perag_put(pag); 1011 return error; 1012} 1013 1014/* 1015 * Remap the given extent into the file. The dmap blockcount will be set to 1016 * the number of blocks that were actually remapped. 1017 */ 1018STATIC int 1019xfs_reflink_remap_extent( 1020 struct xfs_inode *ip, 1021 struct xfs_bmbt_irec *dmap, 1022 xfs_off_t new_isize) 1023{ 1024 struct xfs_bmbt_irec smap; 1025 struct xfs_mount *mp = ip->i_mount; 1026 struct xfs_trans *tp; 1027 xfs_off_t newlen; 1028 int64_t qdelta = 0; 1029 unsigned int resblks; 1030 bool quota_reserved = true; 1031 bool smap_real; 1032 bool dmap_written = xfs_bmap_is_written_extent(dmap); 1033 int iext_delta = 0; 1034 int nimaps; 1035 int error; 1036 1037 /* 1038 * Start a rolling transaction to switch the mappings. 1039 * 1040 * Adding a written extent to the extent map can cause a bmbt split, 1041 * and removing a mapped extent from the extent can cause a bmbt split. 1042 * The two operations cannot both cause a split since they operate on 1043 * the same index in the bmap btree, so we only need a reservation for 1044 * one bmbt split if either thing is happening. However, we haven't 1045 * locked the inode yet, so we reserve assuming this is the case. 1046 * 1047 * The first allocation call tries to reserve enough space to handle 1048 * mapping dmap into a sparse part of the file plus the bmbt split. We 1049 * haven't locked the inode or read the existing mapping yet, so we do 1050 * not know for sure that we need the space. This should succeed most 1051 * of the time. 1052 * 1053 * If the first attempt fails, try again but reserving only enough 1054 * space to handle a bmbt split. This is the hard minimum requirement, 1055 * and we revisit quota reservations later when we know more about what 1056 * we're remapping. 1057 */ 1058 resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 1059 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, 1060 resblks + dmap->br_blockcount, 0, false, &tp); 1061 if (error == -EDQUOT || error == -ENOSPC) { 1062 quota_reserved = false; 1063 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, 1064 resblks, 0, false, &tp); 1065 } 1066 if (error) 1067 goto out; 1068 1069 /* 1070 * Read what's currently mapped in the destination file into smap. 1071 * If smap isn't a hole, we will have to remove it before we can add 1072 * dmap to the destination file. 1073 */ 1074 nimaps = 1; 1075 error = xfs_bmapi_read(ip, dmap->br_startoff, dmap->br_blockcount, 1076 &smap, &nimaps, 0); 1077 if (error) 1078 goto out_cancel; 1079 ASSERT(nimaps == 1 && smap.br_startoff == dmap->br_startoff); 1080 smap_real = xfs_bmap_is_real_extent(&smap); 1081 1082 /* 1083 * We can only remap as many blocks as the smaller of the two extent 1084 * maps, because we can only remap one extent at a time. 1085 */ 1086 dmap->br_blockcount = min(dmap->br_blockcount, smap.br_blockcount); 1087 ASSERT(dmap->br_blockcount == smap.br_blockcount); 1088 1089 trace_xfs_reflink_remap_extent_dest(ip, &smap); 1090 1091 /* 1092 * Two extents mapped to the same physical block must not have 1093 * different states; that's filesystem corruption. Move on to the next 1094 * extent if they're both holes or both the same physical extent. 1095 */ 1096 if (dmap->br_startblock == smap.br_startblock) { 1097 if (dmap->br_state != smap.br_state) 1098 error = -EFSCORRUPTED; 1099 goto out_cancel; 1100 } 1101 1102 /* If both extents are unwritten, leave them alone. */ 1103 if (dmap->br_state == XFS_EXT_UNWRITTEN && 1104 smap.br_state == XFS_EXT_UNWRITTEN) 1105 goto out_cancel; 1106 1107 /* No reflinking if the AG of the dest mapping is low on space. */ 1108 if (dmap_written) { 1109 error = xfs_reflink_ag_has_free_space(mp, 1110 XFS_FSB_TO_AGNO(mp, dmap->br_startblock)); 1111 if (error) 1112 goto out_cancel; 1113 } 1114 1115 /* 1116 * Increase quota reservation if we think the quota block counter for 1117 * this file could increase. 1118 * 1119 * If we are mapping a written extent into the file, we need to have 1120 * enough quota block count reservation to handle the blocks in that 1121 * extent. We log only the delta to the quota block counts, so if the 1122 * extent we're unmapping also has blocks allocated to it, we don't 1123 * need a quota reservation for the extent itself. 1124 * 1125 * Note that if we're replacing a delalloc reservation with a written 1126 * extent, we have to take the full quota reservation because removing 1127 * the delalloc reservation gives the block count back to the quota 1128 * count. This is suboptimal, but the VFS flushed the dest range 1129 * before we started. That should have removed all the delalloc 1130 * reservations, but we code defensively. 1131 * 1132 * xfs_trans_alloc_inode above already tried to grab an even larger 1133 * quota reservation, and kicked off a blockgc scan if it couldn't. 1134 * If we can't get a potentially smaller quota reservation now, we're 1135 * done. 1136 */ 1137 if (!quota_reserved && !smap_real && dmap_written) { 1138 error = xfs_trans_reserve_quota_nblks(tp, ip, 1139 dmap->br_blockcount, 0, false); 1140 if (error) 1141 goto out_cancel; 1142 } 1143 1144 if (smap_real) 1145 ++iext_delta; 1146 1147 if (dmap_written) 1148 ++iext_delta; 1149 1150 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, iext_delta); 1151 if (error == -EFBIG) 1152 error = xfs_iext_count_upgrade(tp, ip, iext_delta); 1153 if (error) 1154 goto out_cancel; 1155 1156 if (smap_real) { 1157 /* 1158 * If the extent we're unmapping is backed by storage (written 1159 * or not), unmap the extent and drop its refcount. 1160 */ 1161 xfs_bmap_unmap_extent(tp, ip, &smap); 1162 xfs_refcount_decrease_extent(tp, &smap); 1163 qdelta -= smap.br_blockcount; 1164 } else if (smap.br_startblock == DELAYSTARTBLOCK) { 1165 int done; 1166 1167 /* 1168 * If the extent we're unmapping is a delalloc reservation, 1169 * we can use the regular bunmapi function to release the 1170 * incore state. Dropping the delalloc reservation takes care 1171 * of the quota reservation for us. 1172 */ 1173 error = xfs_bunmapi(NULL, ip, smap.br_startoff, 1174 smap.br_blockcount, 0, 1, &done); 1175 if (error) 1176 goto out_cancel; 1177 ASSERT(done); 1178 } 1179 1180 /* 1181 * If the extent we're sharing is backed by written storage, increase 1182 * its refcount and map it into the file. 1183 */ 1184 if (dmap_written) { 1185 xfs_refcount_increase_extent(tp, dmap); 1186 xfs_bmap_map_extent(tp, ip, dmap); 1187 qdelta += dmap->br_blockcount; 1188 } 1189 1190 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, qdelta); 1191 1192 /* Update dest isize if needed. */ 1193 newlen = XFS_FSB_TO_B(mp, dmap->br_startoff + dmap->br_blockcount); 1194 newlen = min_t(xfs_off_t, newlen, new_isize); 1195 if (newlen > i_size_read(VFS_I(ip))) { 1196 trace_xfs_reflink_update_inode_size(ip, newlen); 1197 i_size_write(VFS_I(ip), newlen); 1198 ip->i_disk_size = newlen; 1199 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1200 } 1201 1202 /* Commit everything and unlock. */ 1203 error = xfs_trans_commit(tp); 1204 goto out_unlock; 1205 1206out_cancel: 1207 xfs_trans_cancel(tp); 1208out_unlock: 1209 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1210out: 1211 if (error) 1212 trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_); 1213 return error; 1214} 1215 1216/* Remap a range of one file to the other. */ 1217int 1218xfs_reflink_remap_blocks( 1219 struct xfs_inode *src, 1220 loff_t pos_in, 1221 struct xfs_inode *dest, 1222 loff_t pos_out, 1223 loff_t remap_len, 1224 loff_t *remapped) 1225{ 1226 struct xfs_bmbt_irec imap; 1227 struct xfs_mount *mp = src->i_mount; 1228 xfs_fileoff_t srcoff = XFS_B_TO_FSBT(mp, pos_in); 1229 xfs_fileoff_t destoff = XFS_B_TO_FSBT(mp, pos_out); 1230 xfs_filblks_t len; 1231 xfs_filblks_t remapped_len = 0; 1232 xfs_off_t new_isize = pos_out + remap_len; 1233 int nimaps; 1234 int error = 0; 1235 1236 len = min_t(xfs_filblks_t, XFS_B_TO_FSB(mp, remap_len), 1237 XFS_MAX_FILEOFF); 1238 1239 trace_xfs_reflink_remap_blocks(src, srcoff, len, dest, destoff); 1240 1241 while (len > 0) { 1242 unsigned int lock_mode; 1243 1244 /* Read extent from the source file */ 1245 nimaps = 1; 1246 lock_mode = xfs_ilock_data_map_shared(src); 1247 error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0); 1248 xfs_iunlock(src, lock_mode); 1249 if (error) 1250 break; 1251 /* 1252 * The caller supposedly flushed all dirty pages in the source 1253 * file range, which means that writeback should have allocated 1254 * or deleted all delalloc reservations in that range. If we 1255 * find one, that's a good sign that something is seriously 1256 * wrong here. 1257 */ 1258 ASSERT(nimaps == 1 && imap.br_startoff == srcoff); 1259 if (imap.br_startblock == DELAYSTARTBLOCK) { 1260 ASSERT(imap.br_startblock != DELAYSTARTBLOCK); 1261 error = -EFSCORRUPTED; 1262 break; 1263 } 1264 1265 trace_xfs_reflink_remap_extent_src(src, &imap); 1266 1267 /* Remap into the destination file at the given offset. */ 1268 imap.br_startoff = destoff; 1269 error = xfs_reflink_remap_extent(dest, &imap, new_isize); 1270 if (error) 1271 break; 1272 1273 if (fatal_signal_pending(current)) { 1274 error = -EINTR; 1275 break; 1276 } 1277 1278 /* Advance drange/srange */ 1279 srcoff += imap.br_blockcount; 1280 destoff += imap.br_blockcount; 1281 len -= imap.br_blockcount; 1282 remapped_len += imap.br_blockcount; 1283 } 1284 1285 if (error) 1286 trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_); 1287 *remapped = min_t(loff_t, remap_len, 1288 XFS_FSB_TO_B(src->i_mount, remapped_len)); 1289 return error; 1290} 1291 1292/* 1293 * If we're reflinking to a point past the destination file's EOF, we must 1294 * zero any speculative post-EOF preallocations that sit between the old EOF 1295 * and the destination file offset. 1296 */ 1297static int 1298xfs_reflink_zero_posteof( 1299 struct xfs_inode *ip, 1300 loff_t pos) 1301{ 1302 loff_t isize = i_size_read(VFS_I(ip)); 1303 1304 if (pos <= isize) 1305 return 0; 1306 1307 trace_xfs_zero_eof(ip, isize, pos - isize); 1308 return xfs_zero_range(ip, isize, pos - isize, NULL); 1309} 1310 1311/* 1312 * Prepare two files for range cloning. Upon a successful return both inodes 1313 * will have the iolock and mmaplock held, the page cache of the out file will 1314 * be truncated, and any leases on the out file will have been broken. This 1315 * function borrows heavily from xfs_file_aio_write_checks. 1316 * 1317 * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't 1318 * checked that the bytes beyond EOF physically match. Hence we cannot use the 1319 * EOF block in the source dedupe range because it's not a complete block match, 1320 * hence can introduce a corruption into the file that has it's block replaced. 1321 * 1322 * In similar fashion, the VFS file cloning also allows partial EOF blocks to be 1323 * "block aligned" for the purposes of cloning entire files. However, if the 1324 * source file range includes the EOF block and it lands within the existing EOF 1325 * of the destination file, then we can expose stale data from beyond the source 1326 * file EOF in the destination file. 1327 * 1328 * XFS doesn't support partial block sharing, so in both cases we have check 1329 * these cases ourselves. For dedupe, we can simply round the length to dedupe 1330 * down to the previous whole block and ignore the partial EOF block. While this 1331 * means we can't dedupe the last block of a file, this is an acceptible 1332 * tradeoff for simplicity on implementation. 1333 * 1334 * For cloning, we want to share the partial EOF block if it is also the new EOF 1335 * block of the destination file. If the partial EOF block lies inside the 1336 * existing destination EOF, then we have to abort the clone to avoid exposing 1337 * stale data in the destination file. Hence we reject these clone attempts with 1338 * -EINVAL in this case. 1339 */ 1340int 1341xfs_reflink_remap_prep( 1342 struct file *file_in, 1343 loff_t pos_in, 1344 struct file *file_out, 1345 loff_t pos_out, 1346 loff_t *len, 1347 unsigned int remap_flags) 1348{ 1349 struct inode *inode_in = file_inode(file_in); 1350 struct xfs_inode *src = XFS_I(inode_in); 1351 struct inode *inode_out = file_inode(file_out); 1352 struct xfs_inode *dest = XFS_I(inode_out); 1353 int ret; 1354 1355 /* Lock both files against IO */ 1356 ret = xfs_ilock2_io_mmap(src, dest); 1357 if (ret) 1358 return ret; 1359 1360 /* Check file eligibility and prepare for block sharing. */ 1361 ret = -EINVAL; 1362 /* Don't reflink realtime inodes */ 1363 if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest)) 1364 goto out_unlock; 1365 1366 /* Don't share DAX file data for now. */ 1367 if (IS_DAX(inode_in) || IS_DAX(inode_out)) 1368 goto out_unlock; 1369 1370 ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, 1371 len, remap_flags); 1372 if (ret || *len == 0) 1373 goto out_unlock; 1374 1375 /* Attach dquots to dest inode before changing block map */ 1376 ret = xfs_qm_dqattach(dest); 1377 if (ret) 1378 goto out_unlock; 1379 1380 /* 1381 * Zero existing post-eof speculative preallocations in the destination 1382 * file. 1383 */ 1384 ret = xfs_reflink_zero_posteof(dest, pos_out); 1385 if (ret) 1386 goto out_unlock; 1387 1388 /* Set flags and remap blocks. */ 1389 ret = xfs_reflink_set_inode_flag(src, dest); 1390 if (ret) 1391 goto out_unlock; 1392 1393 /* 1394 * If pos_out > EOF, we may have dirtied blocks between EOF and 1395 * pos_out. In that case, we need to extend the flush and unmap to cover 1396 * from EOF to the end of the copy length. 1397 */ 1398 if (pos_out > XFS_ISIZE(dest)) { 1399 loff_t flen = *len + (pos_out - XFS_ISIZE(dest)); 1400 ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen); 1401 } else { 1402 ret = xfs_flush_unmap_range(dest, pos_out, *len); 1403 } 1404 if (ret) 1405 goto out_unlock; 1406 1407 return 0; 1408out_unlock: 1409 xfs_iunlock2_io_mmap(src, dest); 1410 return ret; 1411} 1412 1413/* Does this inode need the reflink flag? */ 1414int 1415xfs_reflink_inode_has_shared_extents( 1416 struct xfs_trans *tp, 1417 struct xfs_inode *ip, 1418 bool *has_shared) 1419{ 1420 struct xfs_bmbt_irec got; 1421 struct xfs_mount *mp = ip->i_mount; 1422 struct xfs_ifork *ifp; 1423 xfs_agnumber_t agno; 1424 xfs_agblock_t agbno; 1425 xfs_extlen_t aglen; 1426 xfs_agblock_t rbno; 1427 xfs_extlen_t rlen; 1428 struct xfs_iext_cursor icur; 1429 bool found; 1430 int error; 1431 1432 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1433 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK); 1434 if (error) 1435 return error; 1436 1437 *has_shared = false; 1438 found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got); 1439 while (found) { 1440 if (isnullstartblock(got.br_startblock) || 1441 got.br_state != XFS_EXT_NORM) 1442 goto next; 1443 agno = XFS_FSB_TO_AGNO(mp, got.br_startblock); 1444 agbno = XFS_FSB_TO_AGBNO(mp, got.br_startblock); 1445 aglen = got.br_blockcount; 1446 1447 error = xfs_reflink_find_shared(mp, tp, agno, agbno, aglen, 1448 &rbno, &rlen, false); 1449 if (error) 1450 return error; 1451 /* Is there still a shared block here? */ 1452 if (rbno != NULLAGBLOCK) { 1453 *has_shared = true; 1454 return 0; 1455 } 1456next: 1457 found = xfs_iext_next_extent(ifp, &icur, &got); 1458 } 1459 1460 return 0; 1461} 1462 1463/* 1464 * Clear the inode reflink flag if there are no shared extents. 1465 * 1466 * The caller is responsible for joining the inode to the transaction passed in. 1467 * The inode will be joined to the transaction that is returned to the caller. 1468 */ 1469int 1470xfs_reflink_clear_inode_flag( 1471 struct xfs_inode *ip, 1472 struct xfs_trans **tpp) 1473{ 1474 bool needs_flag; 1475 int error = 0; 1476 1477 ASSERT(xfs_is_reflink_inode(ip)); 1478 1479 error = xfs_reflink_inode_has_shared_extents(*tpp, ip, &needs_flag); 1480 if (error || needs_flag) 1481 return error; 1482 1483 /* 1484 * We didn't find any shared blocks so turn off the reflink flag. 1485 * First, get rid of any leftover CoW mappings. 1486 */ 1487 error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, XFS_MAX_FILEOFF, 1488 true); 1489 if (error) 1490 return error; 1491 1492 /* Clear the inode flag. */ 1493 trace_xfs_reflink_unset_inode_flag(ip); 1494 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1495 xfs_inode_clear_cowblocks_tag(ip); 1496 xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE); 1497 1498 return error; 1499} 1500 1501/* 1502 * Clear the inode reflink flag if there are no shared extents and the size 1503 * hasn't changed. 1504 */ 1505STATIC int 1506xfs_reflink_try_clear_inode_flag( 1507 struct xfs_inode *ip) 1508{ 1509 struct xfs_mount *mp = ip->i_mount; 1510 struct xfs_trans *tp; 1511 int error = 0; 1512 1513 /* Start a rolling transaction to remove the mappings */ 1514 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp); 1515 if (error) 1516 return error; 1517 1518 xfs_ilock(ip, XFS_ILOCK_EXCL); 1519 xfs_trans_ijoin(tp, ip, 0); 1520 1521 error = xfs_reflink_clear_inode_flag(ip, &tp); 1522 if (error) 1523 goto cancel; 1524 1525 error = xfs_trans_commit(tp); 1526 if (error) 1527 goto out; 1528 1529 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1530 return 0; 1531cancel: 1532 xfs_trans_cancel(tp); 1533out: 1534 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1535 return error; 1536} 1537 1538/* 1539 * Pre-COW all shared blocks within a given byte range of a file and turn off 1540 * the reflink flag if we unshare all of the file's blocks. 1541 */ 1542int 1543xfs_reflink_unshare( 1544 struct xfs_inode *ip, 1545 xfs_off_t offset, 1546 xfs_off_t len) 1547{ 1548 struct inode *inode = VFS_I(ip); 1549 int error; 1550 1551 if (!xfs_is_reflink_inode(ip)) 1552 return 0; 1553 1554 trace_xfs_reflink_unshare(ip, offset, len); 1555 1556 inode_dio_wait(inode); 1557 1558 error = iomap_file_unshare(inode, offset, len, 1559 &xfs_buffered_write_iomap_ops); 1560 if (error) 1561 goto out; 1562 1563 error = filemap_write_and_wait_range(inode->i_mapping, offset, 1564 offset + len - 1); 1565 if (error) 1566 goto out; 1567 1568 /* Turn off the reflink flag if possible. */ 1569 error = xfs_reflink_try_clear_inode_flag(ip); 1570 if (error) 1571 goto out; 1572 return 0; 1573 1574out: 1575 trace_xfs_reflink_unshare_error(ip, error, _RET_IP_); 1576 return error; 1577}