migrate_device.c (22269B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Device Memory Migration functionality. 4 * 5 * Originally written by Jérôme Glisse. 6 */ 7#include <linux/export.h> 8#include <linux/memremap.h> 9#include <linux/migrate.h> 10#include <linux/mm_inline.h> 11#include <linux/mmu_notifier.h> 12#include <linux/oom.h> 13#include <linux/pagewalk.h> 14#include <linux/rmap.h> 15#include <linux/swapops.h> 16#include <asm/tlbflush.h> 17#include "internal.h" 18 19static int migrate_vma_collect_skip(unsigned long start, 20 unsigned long end, 21 struct mm_walk *walk) 22{ 23 struct migrate_vma *migrate = walk->private; 24 unsigned long addr; 25 26 for (addr = start; addr < end; addr += PAGE_SIZE) { 27 migrate->dst[migrate->npages] = 0; 28 migrate->src[migrate->npages++] = 0; 29 } 30 31 return 0; 32} 33 34static int migrate_vma_collect_hole(unsigned long start, 35 unsigned long end, 36 __always_unused int depth, 37 struct mm_walk *walk) 38{ 39 struct migrate_vma *migrate = walk->private; 40 unsigned long addr; 41 42 /* Only allow populating anonymous memory. */ 43 if (!vma_is_anonymous(walk->vma)) 44 return migrate_vma_collect_skip(start, end, walk); 45 46 for (addr = start; addr < end; addr += PAGE_SIZE) { 47 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; 48 migrate->dst[migrate->npages] = 0; 49 migrate->npages++; 50 migrate->cpages++; 51 } 52 53 return 0; 54} 55 56static int migrate_vma_collect_pmd(pmd_t *pmdp, 57 unsigned long start, 58 unsigned long end, 59 struct mm_walk *walk) 60{ 61 struct migrate_vma *migrate = walk->private; 62 struct vm_area_struct *vma = walk->vma; 63 struct mm_struct *mm = vma->vm_mm; 64 unsigned long addr = start, unmapped = 0; 65 spinlock_t *ptl; 66 pte_t *ptep; 67 68again: 69 if (pmd_none(*pmdp)) 70 return migrate_vma_collect_hole(start, end, -1, walk); 71 72 if (pmd_trans_huge(*pmdp)) { 73 struct page *page; 74 75 ptl = pmd_lock(mm, pmdp); 76 if (unlikely(!pmd_trans_huge(*pmdp))) { 77 spin_unlock(ptl); 78 goto again; 79 } 80 81 page = pmd_page(*pmdp); 82 if (is_huge_zero_page(page)) { 83 spin_unlock(ptl); 84 split_huge_pmd(vma, pmdp, addr); 85 if (pmd_trans_unstable(pmdp)) 86 return migrate_vma_collect_skip(start, end, 87 walk); 88 } else { 89 int ret; 90 91 get_page(page); 92 spin_unlock(ptl); 93 if (unlikely(!trylock_page(page))) 94 return migrate_vma_collect_skip(start, end, 95 walk); 96 ret = split_huge_page(page); 97 unlock_page(page); 98 put_page(page); 99 if (ret) 100 return migrate_vma_collect_skip(start, end, 101 walk); 102 if (pmd_none(*pmdp)) 103 return migrate_vma_collect_hole(start, end, -1, 104 walk); 105 } 106 } 107 108 if (unlikely(pmd_bad(*pmdp))) 109 return migrate_vma_collect_skip(start, end, walk); 110 111 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 112 arch_enter_lazy_mmu_mode(); 113 114 for (; addr < end; addr += PAGE_SIZE, ptep++) { 115 unsigned long mpfn = 0, pfn; 116 struct page *page; 117 swp_entry_t entry; 118 pte_t pte; 119 120 pte = *ptep; 121 122 if (pte_none(pte)) { 123 if (vma_is_anonymous(vma)) { 124 mpfn = MIGRATE_PFN_MIGRATE; 125 migrate->cpages++; 126 } 127 goto next; 128 } 129 130 if (!pte_present(pte)) { 131 /* 132 * Only care about unaddressable device page special 133 * page table entry. Other special swap entries are not 134 * migratable, and we ignore regular swapped page. 135 */ 136 entry = pte_to_swp_entry(pte); 137 if (!is_device_private_entry(entry)) 138 goto next; 139 140 page = pfn_swap_entry_to_page(entry); 141 if (!(migrate->flags & 142 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || 143 page->pgmap->owner != migrate->pgmap_owner) 144 goto next; 145 146 mpfn = migrate_pfn(page_to_pfn(page)) | 147 MIGRATE_PFN_MIGRATE; 148 if (is_writable_device_private_entry(entry)) 149 mpfn |= MIGRATE_PFN_WRITE; 150 } else { 151 if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) 152 goto next; 153 pfn = pte_pfn(pte); 154 if (is_zero_pfn(pfn)) { 155 mpfn = MIGRATE_PFN_MIGRATE; 156 migrate->cpages++; 157 goto next; 158 } 159 page = vm_normal_page(migrate->vma, addr, pte); 160 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; 161 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; 162 } 163 164 /* FIXME support THP */ 165 if (!page || !page->mapping || PageTransCompound(page)) { 166 mpfn = 0; 167 goto next; 168 } 169 170 /* 171 * By getting a reference on the page we pin it and that blocks 172 * any kind of migration. Side effect is that it "freezes" the 173 * pte. 174 * 175 * We drop this reference after isolating the page from the lru 176 * for non device page (device page are not on the lru and thus 177 * can't be dropped from it). 178 */ 179 get_page(page); 180 181 /* 182 * Optimize for the common case where page is only mapped once 183 * in one process. If we can lock the page, then we can safely 184 * set up a special migration page table entry now. 185 */ 186 if (trylock_page(page)) { 187 bool anon_exclusive; 188 pte_t swp_pte; 189 190 anon_exclusive = PageAnon(page) && PageAnonExclusive(page); 191 if (anon_exclusive) { 192 flush_cache_page(vma, addr, pte_pfn(*ptep)); 193 ptep_clear_flush(vma, addr, ptep); 194 195 if (page_try_share_anon_rmap(page)) { 196 set_pte_at(mm, addr, ptep, pte); 197 unlock_page(page); 198 put_page(page); 199 mpfn = 0; 200 goto next; 201 } 202 } else { 203 ptep_get_and_clear(mm, addr, ptep); 204 } 205 206 migrate->cpages++; 207 208 /* Setup special migration page table entry */ 209 if (mpfn & MIGRATE_PFN_WRITE) 210 entry = make_writable_migration_entry( 211 page_to_pfn(page)); 212 else if (anon_exclusive) 213 entry = make_readable_exclusive_migration_entry( 214 page_to_pfn(page)); 215 else 216 entry = make_readable_migration_entry( 217 page_to_pfn(page)); 218 swp_pte = swp_entry_to_pte(entry); 219 if (pte_present(pte)) { 220 if (pte_soft_dirty(pte)) 221 swp_pte = pte_swp_mksoft_dirty(swp_pte); 222 if (pte_uffd_wp(pte)) 223 swp_pte = pte_swp_mkuffd_wp(swp_pte); 224 } else { 225 if (pte_swp_soft_dirty(pte)) 226 swp_pte = pte_swp_mksoft_dirty(swp_pte); 227 if (pte_swp_uffd_wp(pte)) 228 swp_pte = pte_swp_mkuffd_wp(swp_pte); 229 } 230 set_pte_at(mm, addr, ptep, swp_pte); 231 232 /* 233 * This is like regular unmap: we remove the rmap and 234 * drop page refcount. Page won't be freed, as we took 235 * a reference just above. 236 */ 237 page_remove_rmap(page, vma, false); 238 put_page(page); 239 240 if (pte_present(pte)) 241 unmapped++; 242 } else { 243 put_page(page); 244 mpfn = 0; 245 } 246 247next: 248 migrate->dst[migrate->npages] = 0; 249 migrate->src[migrate->npages++] = mpfn; 250 } 251 arch_leave_lazy_mmu_mode(); 252 pte_unmap_unlock(ptep - 1, ptl); 253 254 /* Only flush the TLB if we actually modified any entries */ 255 if (unmapped) 256 flush_tlb_range(walk->vma, start, end); 257 258 return 0; 259} 260 261static const struct mm_walk_ops migrate_vma_walk_ops = { 262 .pmd_entry = migrate_vma_collect_pmd, 263 .pte_hole = migrate_vma_collect_hole, 264}; 265 266/* 267 * migrate_vma_collect() - collect pages over a range of virtual addresses 268 * @migrate: migrate struct containing all migration information 269 * 270 * This will walk the CPU page table. For each virtual address backed by a 271 * valid page, it updates the src array and takes a reference on the page, in 272 * order to pin the page until we lock it and unmap it. 273 */ 274static void migrate_vma_collect(struct migrate_vma *migrate) 275{ 276 struct mmu_notifier_range range; 277 278 /* 279 * Note that the pgmap_owner is passed to the mmu notifier callback so 280 * that the registered device driver can skip invalidating device 281 * private page mappings that won't be migrated. 282 */ 283 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, 284 migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end, 285 migrate->pgmap_owner); 286 mmu_notifier_invalidate_range_start(&range); 287 288 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, 289 &migrate_vma_walk_ops, migrate); 290 291 mmu_notifier_invalidate_range_end(&range); 292 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); 293} 294 295/* 296 * migrate_vma_check_page() - check if page is pinned or not 297 * @page: struct page to check 298 * 299 * Pinned pages cannot be migrated. This is the same test as in 300 * folio_migrate_mapping(), except that here we allow migration of a 301 * ZONE_DEVICE page. 302 */ 303static bool migrate_vma_check_page(struct page *page) 304{ 305 /* 306 * One extra ref because caller holds an extra reference, either from 307 * isolate_lru_page() for a regular page, or migrate_vma_collect() for 308 * a device page. 309 */ 310 int extra = 1; 311 312 /* 313 * FIXME support THP (transparent huge page), it is bit more complex to 314 * check them than regular pages, because they can be mapped with a pmd 315 * or with a pte (split pte mapping). 316 */ 317 if (PageCompound(page)) 318 return false; 319 320 /* Page from ZONE_DEVICE have one extra reference */ 321 if (is_zone_device_page(page)) 322 extra++; 323 324 /* For file back page */ 325 if (page_mapping(page)) 326 extra += 1 + page_has_private(page); 327 328 if ((page_count(page) - extra) > page_mapcount(page)) 329 return false; 330 331 return true; 332} 333 334/* 335 * migrate_vma_unmap() - replace page mapping with special migration pte entry 336 * @migrate: migrate struct containing all migration information 337 * 338 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a 339 * special migration pte entry and check if it has been pinned. Pinned pages are 340 * restored because we cannot migrate them. 341 * 342 * This is the last step before we call the device driver callback to allocate 343 * destination memory and copy contents of original page over to new page. 344 */ 345static void migrate_vma_unmap(struct migrate_vma *migrate) 346{ 347 const unsigned long npages = migrate->npages; 348 unsigned long i, restore = 0; 349 bool allow_drain = true; 350 351 lru_add_drain(); 352 353 for (i = 0; i < npages; i++) { 354 struct page *page = migrate_pfn_to_page(migrate->src[i]); 355 struct folio *folio; 356 357 if (!page) 358 continue; 359 360 /* ZONE_DEVICE pages are not on LRU */ 361 if (!is_zone_device_page(page)) { 362 if (!PageLRU(page) && allow_drain) { 363 /* Drain CPU's pagevec */ 364 lru_add_drain_all(); 365 allow_drain = false; 366 } 367 368 if (isolate_lru_page(page)) { 369 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 370 migrate->cpages--; 371 restore++; 372 continue; 373 } 374 375 /* Drop the reference we took in collect */ 376 put_page(page); 377 } 378 379 folio = page_folio(page); 380 if (folio_mapped(folio)) 381 try_to_migrate(folio, 0); 382 383 if (page_mapped(page) || !migrate_vma_check_page(page)) { 384 if (!is_zone_device_page(page)) { 385 get_page(page); 386 putback_lru_page(page); 387 } 388 389 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 390 migrate->cpages--; 391 restore++; 392 continue; 393 } 394 } 395 396 for (i = 0; i < npages && restore; i++) { 397 struct page *page = migrate_pfn_to_page(migrate->src[i]); 398 struct folio *folio; 399 400 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 401 continue; 402 403 folio = page_folio(page); 404 remove_migration_ptes(folio, folio, false); 405 406 migrate->src[i] = 0; 407 folio_unlock(folio); 408 folio_put(folio); 409 restore--; 410 } 411} 412 413/** 414 * migrate_vma_setup() - prepare to migrate a range of memory 415 * @args: contains the vma, start, and pfns arrays for the migration 416 * 417 * Returns: negative errno on failures, 0 when 0 or more pages were migrated 418 * without an error. 419 * 420 * Prepare to migrate a range of memory virtual address range by collecting all 421 * the pages backing each virtual address in the range, saving them inside the 422 * src array. Then lock those pages and unmap them. Once the pages are locked 423 * and unmapped, check whether each page is pinned or not. Pages that aren't 424 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the 425 * corresponding src array entry. Then restores any pages that are pinned, by 426 * remapping and unlocking those pages. 427 * 428 * The caller should then allocate destination memory and copy source memory to 429 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE 430 * flag set). Once these are allocated and copied, the caller must update each 431 * corresponding entry in the dst array with the pfn value of the destination 432 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via 433 * lock_page(). 434 * 435 * Note that the caller does not have to migrate all the pages that are marked 436 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from 437 * device memory to system memory. If the caller cannot migrate a device page 438 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe 439 * consequences for the userspace process, so it must be avoided if at all 440 * possible. 441 * 442 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we 443 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus 444 * allowing the caller to allocate device memory for those unbacked virtual 445 * addresses. For this the caller simply has to allocate device memory and 446 * properly set the destination entry like for regular migration. Note that 447 * this can still fail, and thus inside the device driver you must check if the 448 * migration was successful for those entries after calling migrate_vma_pages(), 449 * just like for regular migration. 450 * 451 * After that, the callers must call migrate_vma_pages() to go over each entry 452 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag 453 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, 454 * then migrate_vma_pages() to migrate struct page information from the source 455 * struct page to the destination struct page. If it fails to migrate the 456 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the 457 * src array. 458 * 459 * At this point all successfully migrated pages have an entry in the src 460 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst 461 * array entry with MIGRATE_PFN_VALID flag set. 462 * 463 * Once migrate_vma_pages() returns the caller may inspect which pages were 464 * successfully migrated, and which were not. Successfully migrated pages will 465 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. 466 * 467 * It is safe to update device page table after migrate_vma_pages() because 468 * both destination and source page are still locked, and the mmap_lock is held 469 * in read mode (hence no one can unmap the range being migrated). 470 * 471 * Once the caller is done cleaning up things and updating its page table (if it 472 * chose to do so, this is not an obligation) it finally calls 473 * migrate_vma_finalize() to update the CPU page table to point to new pages 474 * for successfully migrated pages or otherwise restore the CPU page table to 475 * point to the original source pages. 476 */ 477int migrate_vma_setup(struct migrate_vma *args) 478{ 479 long nr_pages = (args->end - args->start) >> PAGE_SHIFT; 480 481 args->start &= PAGE_MASK; 482 args->end &= PAGE_MASK; 483 if (!args->vma || is_vm_hugetlb_page(args->vma) || 484 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) 485 return -EINVAL; 486 if (nr_pages <= 0) 487 return -EINVAL; 488 if (args->start < args->vma->vm_start || 489 args->start >= args->vma->vm_end) 490 return -EINVAL; 491 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) 492 return -EINVAL; 493 if (!args->src || !args->dst) 494 return -EINVAL; 495 496 memset(args->src, 0, sizeof(*args->src) * nr_pages); 497 args->cpages = 0; 498 args->npages = 0; 499 500 migrate_vma_collect(args); 501 502 if (args->cpages) 503 migrate_vma_unmap(args); 504 505 /* 506 * At this point pages are locked and unmapped, and thus they have 507 * stable content and can safely be copied to destination memory that 508 * is allocated by the drivers. 509 */ 510 return 0; 511 512} 513EXPORT_SYMBOL(migrate_vma_setup); 514 515/* 516 * This code closely matches the code in: 517 * __handle_mm_fault() 518 * handle_pte_fault() 519 * do_anonymous_page() 520 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE 521 * private page. 522 */ 523static void migrate_vma_insert_page(struct migrate_vma *migrate, 524 unsigned long addr, 525 struct page *page, 526 unsigned long *src) 527{ 528 struct vm_area_struct *vma = migrate->vma; 529 struct mm_struct *mm = vma->vm_mm; 530 bool flush = false; 531 spinlock_t *ptl; 532 pte_t entry; 533 pgd_t *pgdp; 534 p4d_t *p4dp; 535 pud_t *pudp; 536 pmd_t *pmdp; 537 pte_t *ptep; 538 539 /* Only allow populating anonymous memory */ 540 if (!vma_is_anonymous(vma)) 541 goto abort; 542 543 pgdp = pgd_offset(mm, addr); 544 p4dp = p4d_alloc(mm, pgdp, addr); 545 if (!p4dp) 546 goto abort; 547 pudp = pud_alloc(mm, p4dp, addr); 548 if (!pudp) 549 goto abort; 550 pmdp = pmd_alloc(mm, pudp, addr); 551 if (!pmdp) 552 goto abort; 553 554 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) 555 goto abort; 556 557 /* 558 * Use pte_alloc() instead of pte_alloc_map(). We can't run 559 * pte_offset_map() on pmds where a huge pmd might be created 560 * from a different thread. 561 * 562 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 563 * parallel threads are excluded by other means. 564 * 565 * Here we only have mmap_read_lock(mm). 566 */ 567 if (pte_alloc(mm, pmdp)) 568 goto abort; 569 570 /* See the comment in pte_alloc_one_map() */ 571 if (unlikely(pmd_trans_unstable(pmdp))) 572 goto abort; 573 574 if (unlikely(anon_vma_prepare(vma))) 575 goto abort; 576 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) 577 goto abort; 578 579 /* 580 * The memory barrier inside __SetPageUptodate makes sure that 581 * preceding stores to the page contents become visible before 582 * the set_pte_at() write. 583 */ 584 __SetPageUptodate(page); 585 586 if (is_device_private_page(page)) { 587 swp_entry_t swp_entry; 588 589 if (vma->vm_flags & VM_WRITE) 590 swp_entry = make_writable_device_private_entry( 591 page_to_pfn(page)); 592 else 593 swp_entry = make_readable_device_private_entry( 594 page_to_pfn(page)); 595 entry = swp_entry_to_pte(swp_entry); 596 } else { 597 /* 598 * For now we only support migrating to un-addressable device 599 * memory. 600 */ 601 if (is_zone_device_page(page)) { 602 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); 603 goto abort; 604 } 605 entry = mk_pte(page, vma->vm_page_prot); 606 if (vma->vm_flags & VM_WRITE) 607 entry = pte_mkwrite(pte_mkdirty(entry)); 608 } 609 610 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 611 612 if (check_stable_address_space(mm)) 613 goto unlock_abort; 614 615 if (pte_present(*ptep)) { 616 unsigned long pfn = pte_pfn(*ptep); 617 618 if (!is_zero_pfn(pfn)) 619 goto unlock_abort; 620 flush = true; 621 } else if (!pte_none(*ptep)) 622 goto unlock_abort; 623 624 /* 625 * Check for userfaultfd but do not deliver the fault. Instead, 626 * just back off. 627 */ 628 if (userfaultfd_missing(vma)) 629 goto unlock_abort; 630 631 inc_mm_counter(mm, MM_ANONPAGES); 632 page_add_new_anon_rmap(page, vma, addr); 633 if (!is_zone_device_page(page)) 634 lru_cache_add_inactive_or_unevictable(page, vma); 635 get_page(page); 636 637 if (flush) { 638 flush_cache_page(vma, addr, pte_pfn(*ptep)); 639 ptep_clear_flush_notify(vma, addr, ptep); 640 set_pte_at_notify(mm, addr, ptep, entry); 641 update_mmu_cache(vma, addr, ptep); 642 } else { 643 /* No need to invalidate - it was non-present before */ 644 set_pte_at(mm, addr, ptep, entry); 645 update_mmu_cache(vma, addr, ptep); 646 } 647 648 pte_unmap_unlock(ptep, ptl); 649 *src = MIGRATE_PFN_MIGRATE; 650 return; 651 652unlock_abort: 653 pte_unmap_unlock(ptep, ptl); 654abort: 655 *src &= ~MIGRATE_PFN_MIGRATE; 656} 657 658/** 659 * migrate_vma_pages() - migrate meta-data from src page to dst page 660 * @migrate: migrate struct containing all migration information 661 * 662 * This migrates struct page meta-data from source struct page to destination 663 * struct page. This effectively finishes the migration from source page to the 664 * destination page. 665 */ 666void migrate_vma_pages(struct migrate_vma *migrate) 667{ 668 const unsigned long npages = migrate->npages; 669 const unsigned long start = migrate->start; 670 struct mmu_notifier_range range; 671 unsigned long addr, i; 672 bool notified = false; 673 674 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { 675 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 676 struct page *page = migrate_pfn_to_page(migrate->src[i]); 677 struct address_space *mapping; 678 int r; 679 680 if (!newpage) { 681 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 682 continue; 683 } 684 685 if (!page) { 686 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 687 continue; 688 if (!notified) { 689 notified = true; 690 691 mmu_notifier_range_init_owner(&range, 692 MMU_NOTIFY_MIGRATE, 0, migrate->vma, 693 migrate->vma->vm_mm, addr, migrate->end, 694 migrate->pgmap_owner); 695 mmu_notifier_invalidate_range_start(&range); 696 } 697 migrate_vma_insert_page(migrate, addr, newpage, 698 &migrate->src[i]); 699 continue; 700 } 701 702 mapping = page_mapping(page); 703 704 if (is_device_private_page(newpage)) { 705 /* 706 * For now only support private anonymous when migrating 707 * to un-addressable device memory. 708 */ 709 if (mapping) { 710 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 711 continue; 712 } 713 } else if (is_zone_device_page(newpage)) { 714 /* 715 * Other types of ZONE_DEVICE page are not supported. 716 */ 717 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 718 continue; 719 } 720 721 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); 722 if (r != MIGRATEPAGE_SUCCESS) 723 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 724 } 725 726 /* 727 * No need to double call mmu_notifier->invalidate_range() callback as 728 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page() 729 * did already call it. 730 */ 731 if (notified) 732 mmu_notifier_invalidate_range_only_end(&range); 733} 734EXPORT_SYMBOL(migrate_vma_pages); 735 736/** 737 * migrate_vma_finalize() - restore CPU page table entry 738 * @migrate: migrate struct containing all migration information 739 * 740 * This replaces the special migration pte entry with either a mapping to the 741 * new page if migration was successful for that page, or to the original page 742 * otherwise. 743 * 744 * This also unlocks the pages and puts them back on the lru, or drops the extra 745 * refcount, for device pages. 746 */ 747void migrate_vma_finalize(struct migrate_vma *migrate) 748{ 749 const unsigned long npages = migrate->npages; 750 unsigned long i; 751 752 for (i = 0; i < npages; i++) { 753 struct folio *dst, *src; 754 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 755 struct page *page = migrate_pfn_to_page(migrate->src[i]); 756 757 if (!page) { 758 if (newpage) { 759 unlock_page(newpage); 760 put_page(newpage); 761 } 762 continue; 763 } 764 765 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 766 if (newpage) { 767 unlock_page(newpage); 768 put_page(newpage); 769 } 770 newpage = page; 771 } 772 773 src = page_folio(page); 774 dst = page_folio(newpage); 775 remove_migration_ptes(src, dst, false); 776 folio_unlock(src); 777 778 if (is_zone_device_page(page)) 779 put_page(page); 780 else 781 putback_lru_page(page); 782 783 if (newpage != page) { 784 unlock_page(newpage); 785 if (is_zone_device_page(newpage)) 786 put_page(newpage); 787 else 788 putback_lru_page(newpage); 789 } 790 } 791} 792EXPORT_SYMBOL(migrate_vma_finalize);