ioremap.c (25113B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Re-map IO memory to kernel address space so that we can access it. 4 * This is needed for high PCI addresses that aren't mapped in the 5 * 640k-1MB IO memory area on PC's 6 * 7 * (C) Copyright 1995 1996 Linus Torvalds 8 */ 9 10#include <linux/memblock.h> 11#include <linux/init.h> 12#include <linux/io.h> 13#include <linux/ioport.h> 14#include <linux/slab.h> 15#include <linux/vmalloc.h> 16#include <linux/mmiotrace.h> 17#include <linux/cc_platform.h> 18#include <linux/efi.h> 19#include <linux/pgtable.h> 20 21#include <asm/set_memory.h> 22#include <asm/e820/api.h> 23#include <asm/efi.h> 24#include <asm/fixmap.h> 25#include <asm/tlbflush.h> 26#include <asm/pgalloc.h> 27#include <asm/memtype.h> 28#include <asm/setup.h> 29 30#include "physaddr.h" 31 32/* 33 * Descriptor controlling ioremap() behavior. 34 */ 35struct ioremap_desc { 36 unsigned int flags; 37}; 38 39/* 40 * Fix up the linear direct mapping of the kernel to avoid cache attribute 41 * conflicts. 42 */ 43int ioremap_change_attr(unsigned long vaddr, unsigned long size, 44 enum page_cache_mode pcm) 45{ 46 unsigned long nrpages = size >> PAGE_SHIFT; 47 int err; 48 49 switch (pcm) { 50 case _PAGE_CACHE_MODE_UC: 51 default: 52 err = _set_memory_uc(vaddr, nrpages); 53 break; 54 case _PAGE_CACHE_MODE_WC: 55 err = _set_memory_wc(vaddr, nrpages); 56 break; 57 case _PAGE_CACHE_MODE_WT: 58 err = _set_memory_wt(vaddr, nrpages); 59 break; 60 case _PAGE_CACHE_MODE_WB: 61 err = _set_memory_wb(vaddr, nrpages); 62 break; 63 } 64 65 return err; 66} 67 68/* Does the range (or a subset of) contain normal RAM? */ 69static unsigned int __ioremap_check_ram(struct resource *res) 70{ 71 unsigned long start_pfn, stop_pfn; 72 unsigned long i; 73 74 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM) 75 return 0; 76 77 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT; 78 stop_pfn = (res->end + 1) >> PAGE_SHIFT; 79 if (stop_pfn > start_pfn) { 80 for (i = 0; i < (stop_pfn - start_pfn); ++i) 81 if (pfn_valid(start_pfn + i) && 82 !PageReserved(pfn_to_page(start_pfn + i))) 83 return IORES_MAP_SYSTEM_RAM; 84 } 85 86 return 0; 87} 88 89/* 90 * In a SEV guest, NONE and RESERVED should not be mapped encrypted because 91 * there the whole memory is already encrypted. 92 */ 93static unsigned int __ioremap_check_encrypted(struct resource *res) 94{ 95 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 96 return 0; 97 98 switch (res->desc) { 99 case IORES_DESC_NONE: 100 case IORES_DESC_RESERVED: 101 break; 102 default: 103 return IORES_MAP_ENCRYPTED; 104 } 105 106 return 0; 107} 108 109/* 110 * The EFI runtime services data area is not covered by walk_mem_res(), but must 111 * be mapped encrypted when SEV is active. 112 */ 113static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) 114{ 115 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 116 return; 117 118 if (!IS_ENABLED(CONFIG_EFI)) 119 return; 120 121 if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA || 122 (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA && 123 efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME)) 124 desc->flags |= IORES_MAP_ENCRYPTED; 125} 126 127static int __ioremap_collect_map_flags(struct resource *res, void *arg) 128{ 129 struct ioremap_desc *desc = arg; 130 131 if (!(desc->flags & IORES_MAP_SYSTEM_RAM)) 132 desc->flags |= __ioremap_check_ram(res); 133 134 if (!(desc->flags & IORES_MAP_ENCRYPTED)) 135 desc->flags |= __ioremap_check_encrypted(res); 136 137 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) == 138 (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)); 139} 140 141/* 142 * To avoid multiple resource walks, this function walks resources marked as 143 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a 144 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES). 145 * 146 * After that, deal with misc other ranges in __ioremap_check_other() which do 147 * not fall into the above category. 148 */ 149static void __ioremap_check_mem(resource_size_t addr, unsigned long size, 150 struct ioremap_desc *desc) 151{ 152 u64 start, end; 153 154 start = (u64)addr; 155 end = start + size - 1; 156 memset(desc, 0, sizeof(struct ioremap_desc)); 157 158 walk_mem_res(start, end, desc, __ioremap_collect_map_flags); 159 160 __ioremap_check_other(addr, desc); 161} 162 163/* 164 * Remap an arbitrary physical address space into the kernel virtual 165 * address space. It transparently creates kernel huge I/O mapping when 166 * the physical address is aligned by a huge page size (1GB or 2MB) and 167 * the requested size is at least the huge page size. 168 * 169 * NOTE: MTRRs can override PAT memory types with a 4KB granularity. 170 * Therefore, the mapping code falls back to use a smaller page toward 4KB 171 * when a mapping range is covered by non-WB type of MTRRs. 172 * 173 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 174 * have to convert them into an offset in a page-aligned mapping, but the 175 * caller shouldn't need to know that small detail. 176 */ 177static void __iomem * 178__ioremap_caller(resource_size_t phys_addr, unsigned long size, 179 enum page_cache_mode pcm, void *caller, bool encrypted) 180{ 181 unsigned long offset, vaddr; 182 resource_size_t last_addr; 183 const resource_size_t unaligned_phys_addr = phys_addr; 184 const unsigned long unaligned_size = size; 185 struct ioremap_desc io_desc; 186 struct vm_struct *area; 187 enum page_cache_mode new_pcm; 188 pgprot_t prot; 189 int retval; 190 void __iomem *ret_addr; 191 192 /* Don't allow wraparound or zero size */ 193 last_addr = phys_addr + size - 1; 194 if (!size || last_addr < phys_addr) 195 return NULL; 196 197 if (!phys_addr_valid(phys_addr)) { 198 printk(KERN_WARNING "ioremap: invalid physical address %llx\n", 199 (unsigned long long)phys_addr); 200 WARN_ON_ONCE(1); 201 return NULL; 202 } 203 204 __ioremap_check_mem(phys_addr, size, &io_desc); 205 206 /* 207 * Don't allow anybody to remap normal RAM that we're using.. 208 */ 209 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) { 210 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", 211 &phys_addr, &last_addr); 212 return NULL; 213 } 214 215 /* 216 * Mappings have to be page-aligned 217 */ 218 offset = phys_addr & ~PAGE_MASK; 219 phys_addr &= PHYSICAL_PAGE_MASK; 220 size = PAGE_ALIGN(last_addr+1) - phys_addr; 221 222 retval = memtype_reserve(phys_addr, (u64)phys_addr + size, 223 pcm, &new_pcm); 224 if (retval) { 225 printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval); 226 return NULL; 227 } 228 229 if (pcm != new_pcm) { 230 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { 231 printk(KERN_ERR 232 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", 233 (unsigned long long)phys_addr, 234 (unsigned long long)(phys_addr + size), 235 pcm, new_pcm); 236 goto err_free_memtype; 237 } 238 pcm = new_pcm; 239 } 240 241 /* 242 * If the page being mapped is in memory and SEV is active then 243 * make sure the memory encryption attribute is enabled in the 244 * resulting mapping. 245 * In TDX guests, memory is marked private by default. If encryption 246 * is not requested (using encrypted), explicitly set decrypt 247 * attribute in all IOREMAPPED memory. 248 */ 249 prot = PAGE_KERNEL_IO; 250 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted) 251 prot = pgprot_encrypted(prot); 252 else 253 prot = pgprot_decrypted(prot); 254 255 switch (pcm) { 256 case _PAGE_CACHE_MODE_UC: 257 default: 258 prot = __pgprot(pgprot_val(prot) | 259 cachemode2protval(_PAGE_CACHE_MODE_UC)); 260 break; 261 case _PAGE_CACHE_MODE_UC_MINUS: 262 prot = __pgprot(pgprot_val(prot) | 263 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); 264 break; 265 case _PAGE_CACHE_MODE_WC: 266 prot = __pgprot(pgprot_val(prot) | 267 cachemode2protval(_PAGE_CACHE_MODE_WC)); 268 break; 269 case _PAGE_CACHE_MODE_WT: 270 prot = __pgprot(pgprot_val(prot) | 271 cachemode2protval(_PAGE_CACHE_MODE_WT)); 272 break; 273 case _PAGE_CACHE_MODE_WB: 274 break; 275 } 276 277 /* 278 * Ok, go for it.. 279 */ 280 area = get_vm_area_caller(size, VM_IOREMAP, caller); 281 if (!area) 282 goto err_free_memtype; 283 area->phys_addr = phys_addr; 284 vaddr = (unsigned long) area->addr; 285 286 if (memtype_kernel_map_sync(phys_addr, size, pcm)) 287 goto err_free_area; 288 289 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) 290 goto err_free_area; 291 292 ret_addr = (void __iomem *) (vaddr + offset); 293 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); 294 295 /* 296 * Check if the request spans more than any BAR in the iomem resource 297 * tree. 298 */ 299 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size)) 300 pr_warn("caller %pS mapping multiple BARs\n", caller); 301 302 return ret_addr; 303err_free_area: 304 free_vm_area(area); 305err_free_memtype: 306 memtype_free(phys_addr, phys_addr + size); 307 return NULL; 308} 309 310/** 311 * ioremap - map bus memory into CPU space 312 * @phys_addr: bus address of the memory 313 * @size: size of the resource to map 314 * 315 * ioremap performs a platform specific sequence of operations to 316 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 317 * writew/writel functions and the other mmio helpers. The returned 318 * address is not guaranteed to be usable directly as a virtual 319 * address. 320 * 321 * This version of ioremap ensures that the memory is marked uncachable 322 * on the CPU as well as honouring existing caching rules from things like 323 * the PCI bus. Note that there are other caches and buffers on many 324 * busses. In particular driver authors should read up on PCI writes 325 * 326 * It's useful if some control registers are in such an area and 327 * write combining or read caching is not desirable: 328 * 329 * Must be freed with iounmap. 330 */ 331void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) 332{ 333 /* 334 * Ideally, this should be: 335 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; 336 * 337 * Till we fix all X drivers to use ioremap_wc(), we will use 338 * UC MINUS. Drivers that are certain they need or can already 339 * be converted over to strong UC can use ioremap_uc(). 340 */ 341 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS; 342 343 return __ioremap_caller(phys_addr, size, pcm, 344 __builtin_return_address(0), false); 345} 346EXPORT_SYMBOL(ioremap); 347 348/** 349 * ioremap_uc - map bus memory into CPU space as strongly uncachable 350 * @phys_addr: bus address of the memory 351 * @size: size of the resource to map 352 * 353 * ioremap_uc performs a platform specific sequence of operations to 354 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 355 * writew/writel functions and the other mmio helpers. The returned 356 * address is not guaranteed to be usable directly as a virtual 357 * address. 358 * 359 * This version of ioremap ensures that the memory is marked with a strong 360 * preference as completely uncachable on the CPU when possible. For non-PAT 361 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT 362 * systems this will set the PAT entry for the pages as strong UC. This call 363 * will honor existing caching rules from things like the PCI bus. Note that 364 * there are other caches and buffers on many busses. In particular driver 365 * authors should read up on PCI writes. 366 * 367 * It's useful if some control registers are in such an area and 368 * write combining or read caching is not desirable: 369 * 370 * Must be freed with iounmap. 371 */ 372void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size) 373{ 374 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC; 375 376 return __ioremap_caller(phys_addr, size, pcm, 377 __builtin_return_address(0), false); 378} 379EXPORT_SYMBOL_GPL(ioremap_uc); 380 381/** 382 * ioremap_wc - map memory into CPU space write combined 383 * @phys_addr: bus address of the memory 384 * @size: size of the resource to map 385 * 386 * This version of ioremap ensures that the memory is marked write combining. 387 * Write combining allows faster writes to some hardware devices. 388 * 389 * Must be freed with iounmap. 390 */ 391void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) 392{ 393 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, 394 __builtin_return_address(0), false); 395} 396EXPORT_SYMBOL(ioremap_wc); 397 398/** 399 * ioremap_wt - map memory into CPU space write through 400 * @phys_addr: bus address of the memory 401 * @size: size of the resource to map 402 * 403 * This version of ioremap ensures that the memory is marked write through. 404 * Write through stores data into memory while keeping the cache up-to-date. 405 * 406 * Must be freed with iounmap. 407 */ 408void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size) 409{ 410 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT, 411 __builtin_return_address(0), false); 412} 413EXPORT_SYMBOL(ioremap_wt); 414 415void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size) 416{ 417 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, 418 __builtin_return_address(0), true); 419} 420EXPORT_SYMBOL(ioremap_encrypted); 421 422void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) 423{ 424 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, 425 __builtin_return_address(0), false); 426} 427EXPORT_SYMBOL(ioremap_cache); 428 429void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 430 unsigned long prot_val) 431{ 432 return __ioremap_caller(phys_addr, size, 433 pgprot2cachemode(__pgprot(prot_val)), 434 __builtin_return_address(0), false); 435} 436EXPORT_SYMBOL(ioremap_prot); 437 438/** 439 * iounmap - Free a IO remapping 440 * @addr: virtual address from ioremap_* 441 * 442 * Caller must ensure there is only one unmapping for the same pointer. 443 */ 444void iounmap(volatile void __iomem *addr) 445{ 446 struct vm_struct *p, *o; 447 448 if ((void __force *)addr <= high_memory) 449 return; 450 451 /* 452 * The PCI/ISA range special-casing was removed from __ioremap() 453 * so this check, in theory, can be removed. However, there are 454 * cases where iounmap() is called for addresses not obtained via 455 * ioremap() (vga16fb for example). Add a warning so that these 456 * cases can be caught and fixed. 457 */ 458 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && 459 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) { 460 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n"); 461 return; 462 } 463 464 mmiotrace_iounmap(addr); 465 466 addr = (volatile void __iomem *) 467 (PAGE_MASK & (unsigned long __force)addr); 468 469 /* Use the vm area unlocked, assuming the caller 470 ensures there isn't another iounmap for the same address 471 in parallel. Reuse of the virtual address is prevented by 472 leaving it in the global lists until we're done with it. 473 cpa takes care of the direct mappings. */ 474 p = find_vm_area((void __force *)addr); 475 476 if (!p) { 477 printk(KERN_ERR "iounmap: bad address %p\n", addr); 478 dump_stack(); 479 return; 480 } 481 482 memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 483 484 /* Finally remove it */ 485 o = remove_vm_area((void __force *)addr); 486 BUG_ON(p != o || o == NULL); 487 kfree(p); 488} 489EXPORT_SYMBOL(iounmap); 490 491/* 492 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 493 * access 494 */ 495void *xlate_dev_mem_ptr(phys_addr_t phys) 496{ 497 unsigned long start = phys & PAGE_MASK; 498 unsigned long offset = phys & ~PAGE_MASK; 499 void *vaddr; 500 501 /* memremap() maps if RAM, otherwise falls back to ioremap() */ 502 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB); 503 504 /* Only add the offset on success and return NULL if memremap() failed */ 505 if (vaddr) 506 vaddr += offset; 507 508 return vaddr; 509} 510 511void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 512{ 513 memunmap((void *)((unsigned long)addr & PAGE_MASK)); 514} 515 516#ifdef CONFIG_AMD_MEM_ENCRYPT 517/* 518 * Examine the physical address to determine if it is an area of memory 519 * that should be mapped decrypted. If the memory is not part of the 520 * kernel usable area it was accessed and created decrypted, so these 521 * areas should be mapped decrypted. And since the encryption key can 522 * change across reboots, persistent memory should also be mapped 523 * decrypted. 524 * 525 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so 526 * only persistent memory should be mapped decrypted. 527 */ 528static bool memremap_should_map_decrypted(resource_size_t phys_addr, 529 unsigned long size) 530{ 531 int is_pmem; 532 533 /* 534 * Check if the address is part of a persistent memory region. 535 * This check covers areas added by E820, EFI and ACPI. 536 */ 537 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM, 538 IORES_DESC_PERSISTENT_MEMORY); 539 if (is_pmem != REGION_DISJOINT) 540 return true; 541 542 /* 543 * Check if the non-volatile attribute is set for an EFI 544 * reserved area. 545 */ 546 if (efi_enabled(EFI_BOOT)) { 547 switch (efi_mem_type(phys_addr)) { 548 case EFI_RESERVED_TYPE: 549 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV) 550 return true; 551 break; 552 default: 553 break; 554 } 555 } 556 557 /* Check if the address is outside kernel usable area */ 558 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) { 559 case E820_TYPE_RESERVED: 560 case E820_TYPE_ACPI: 561 case E820_TYPE_NVS: 562 case E820_TYPE_UNUSABLE: 563 /* For SEV, these areas are encrypted */ 564 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 565 break; 566 fallthrough; 567 568 case E820_TYPE_PRAM: 569 return true; 570 default: 571 break; 572 } 573 574 return false; 575} 576 577/* 578 * Examine the physical address to determine if it is EFI data. Check 579 * it against the boot params structure and EFI tables and memory types. 580 */ 581static bool memremap_is_efi_data(resource_size_t phys_addr, 582 unsigned long size) 583{ 584 u64 paddr; 585 586 /* Check if the address is part of EFI boot/runtime data */ 587 if (!efi_enabled(EFI_BOOT)) 588 return false; 589 590 paddr = boot_params.efi_info.efi_memmap_hi; 591 paddr <<= 32; 592 paddr |= boot_params.efi_info.efi_memmap; 593 if (phys_addr == paddr) 594 return true; 595 596 paddr = boot_params.efi_info.efi_systab_hi; 597 paddr <<= 32; 598 paddr |= boot_params.efi_info.efi_systab; 599 if (phys_addr == paddr) 600 return true; 601 602 if (efi_is_table_address(phys_addr)) 603 return true; 604 605 switch (efi_mem_type(phys_addr)) { 606 case EFI_BOOT_SERVICES_DATA: 607 case EFI_RUNTIME_SERVICES_DATA: 608 return true; 609 default: 610 break; 611 } 612 613 return false; 614} 615 616/* 617 * Examine the physical address to determine if it is boot data by checking 618 * it against the boot params setup_data chain. 619 */ 620static bool memremap_is_setup_data(resource_size_t phys_addr, 621 unsigned long size) 622{ 623 struct setup_indirect *indirect; 624 struct setup_data *data; 625 u64 paddr, paddr_next; 626 627 paddr = boot_params.hdr.setup_data; 628 while (paddr) { 629 unsigned int len; 630 631 if (phys_addr == paddr) 632 return true; 633 634 data = memremap(paddr, sizeof(*data), 635 MEMREMAP_WB | MEMREMAP_DEC); 636 if (!data) { 637 pr_warn("failed to memremap setup_data entry\n"); 638 return false; 639 } 640 641 paddr_next = data->next; 642 len = data->len; 643 644 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { 645 memunmap(data); 646 return true; 647 } 648 649 if (data->type == SETUP_INDIRECT) { 650 memunmap(data); 651 data = memremap(paddr, sizeof(*data) + len, 652 MEMREMAP_WB | MEMREMAP_DEC); 653 if (!data) { 654 pr_warn("failed to memremap indirect setup_data\n"); 655 return false; 656 } 657 658 indirect = (struct setup_indirect *)data->data; 659 660 if (indirect->type != SETUP_INDIRECT) { 661 paddr = indirect->addr; 662 len = indirect->len; 663 } 664 } 665 666 memunmap(data); 667 668 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) 669 return true; 670 671 paddr = paddr_next; 672 } 673 674 return false; 675} 676 677/* 678 * Examine the physical address to determine if it is boot data by checking 679 * it against the boot params setup_data chain (early boot version). 680 */ 681static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, 682 unsigned long size) 683{ 684 struct setup_indirect *indirect; 685 struct setup_data *data; 686 u64 paddr, paddr_next; 687 688 paddr = boot_params.hdr.setup_data; 689 while (paddr) { 690 unsigned int len, size; 691 692 if (phys_addr == paddr) 693 return true; 694 695 data = early_memremap_decrypted(paddr, sizeof(*data)); 696 if (!data) { 697 pr_warn("failed to early memremap setup_data entry\n"); 698 return false; 699 } 700 701 size = sizeof(*data); 702 703 paddr_next = data->next; 704 len = data->len; 705 706 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { 707 early_memunmap(data, sizeof(*data)); 708 return true; 709 } 710 711 if (data->type == SETUP_INDIRECT) { 712 size += len; 713 early_memunmap(data, sizeof(*data)); 714 data = early_memremap_decrypted(paddr, size); 715 if (!data) { 716 pr_warn("failed to early memremap indirect setup_data\n"); 717 return false; 718 } 719 720 indirect = (struct setup_indirect *)data->data; 721 722 if (indirect->type != SETUP_INDIRECT) { 723 paddr = indirect->addr; 724 len = indirect->len; 725 } 726 } 727 728 early_memunmap(data, size); 729 730 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) 731 return true; 732 733 paddr = paddr_next; 734 } 735 736 return false; 737} 738 739/* 740 * Architecture function to determine if RAM remap is allowed. By default, a 741 * RAM remap will map the data as encrypted. Determine if a RAM remap should 742 * not be done so that the data will be mapped decrypted. 743 */ 744bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, 745 unsigned long flags) 746{ 747 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 748 return true; 749 750 if (flags & MEMREMAP_ENC) 751 return true; 752 753 if (flags & MEMREMAP_DEC) 754 return false; 755 756 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 757 if (memremap_is_setup_data(phys_addr, size) || 758 memremap_is_efi_data(phys_addr, size)) 759 return false; 760 } 761 762 return !memremap_should_map_decrypted(phys_addr, size); 763} 764 765/* 766 * Architecture override of __weak function to adjust the protection attributes 767 * used when remapping memory. By default, early_memremap() will map the data 768 * as encrypted. Determine if an encrypted mapping should not be done and set 769 * the appropriate protection attributes. 770 */ 771pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, 772 unsigned long size, 773 pgprot_t prot) 774{ 775 bool encrypted_prot; 776 777 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 778 return prot; 779 780 encrypted_prot = true; 781 782 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 783 if (early_memremap_is_setup_data(phys_addr, size) || 784 memremap_is_efi_data(phys_addr, size)) 785 encrypted_prot = false; 786 } 787 788 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size)) 789 encrypted_prot = false; 790 791 return encrypted_prot ? pgprot_encrypted(prot) 792 : pgprot_decrypted(prot); 793} 794 795bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) 796{ 797 return arch_memremap_can_ram_remap(phys_addr, size, 0); 798} 799 800/* Remap memory with encryption */ 801void __init *early_memremap_encrypted(resource_size_t phys_addr, 802 unsigned long size) 803{ 804 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC); 805} 806 807/* 808 * Remap memory with encryption and write-protected - cannot be called 809 * before pat_init() is called 810 */ 811void __init *early_memremap_encrypted_wp(resource_size_t phys_addr, 812 unsigned long size) 813{ 814 if (!x86_has_pat_wp()) 815 return NULL; 816 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP); 817} 818 819/* Remap memory without encryption */ 820void __init *early_memremap_decrypted(resource_size_t phys_addr, 821 unsigned long size) 822{ 823 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC); 824} 825 826/* 827 * Remap memory without encryption and write-protected - cannot be called 828 * before pat_init() is called 829 */ 830void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, 831 unsigned long size) 832{ 833 if (!x86_has_pat_wp()) 834 return NULL; 835 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); 836} 837#endif /* CONFIG_AMD_MEM_ENCRYPT */ 838 839static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; 840 841static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 842{ 843 /* Don't assume we're using swapper_pg_dir at this point */ 844 pgd_t *base = __va(read_cr3_pa()); 845 pgd_t *pgd = &base[pgd_index(addr)]; 846 p4d_t *p4d = p4d_offset(pgd, addr); 847 pud_t *pud = pud_offset(p4d, addr); 848 pmd_t *pmd = pmd_offset(pud, addr); 849 850 return pmd; 851} 852 853static inline pte_t * __init early_ioremap_pte(unsigned long addr) 854{ 855 return &bm_pte[pte_index(addr)]; 856} 857 858bool __init is_early_ioremap_ptep(pte_t *ptep) 859{ 860 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; 861} 862 863void __init early_ioremap_init(void) 864{ 865 pmd_t *pmd; 866 867#ifdef CONFIG_X86_64 868 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); 869#else 870 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); 871#endif 872 873 early_ioremap_setup(); 874 875 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 876 memset(bm_pte, 0, sizeof(bm_pte)); 877 pmd_populate_kernel(&init_mm, pmd, bm_pte); 878 879 /* 880 * The boot-ioremap range spans multiple pmds, for which 881 * we are not prepared: 882 */ 883#define __FIXADDR_TOP (-PAGE_SIZE) 884 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 885 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 886#undef __FIXADDR_TOP 887 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { 888 WARN_ON(1); 889 printk(KERN_WARNING "pmd %p != %p\n", 890 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); 891 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 892 fix_to_virt(FIX_BTMAP_BEGIN)); 893 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", 894 fix_to_virt(FIX_BTMAP_END)); 895 896 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 897 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", 898 FIX_BTMAP_BEGIN); 899 } 900} 901 902void __init __early_set_fixmap(enum fixed_addresses idx, 903 phys_addr_t phys, pgprot_t flags) 904{ 905 unsigned long addr = __fix_to_virt(idx); 906 pte_t *pte; 907 908 if (idx >= __end_of_fixed_addresses) { 909 BUG(); 910 return; 911 } 912 pte = early_ioremap_pte(addr); 913 914 /* Sanitize 'prot' against any unsupported bits: */ 915 pgprot_val(flags) &= __supported_pte_mask; 916 917 if (pgprot_val(flags)) 918 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 919 else 920 pte_clear(&init_mm, addr, pte); 921 flush_tlb_one_kernel(addr); 922}