page_64.h (2766B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ASM_X86_PAGE_64_H 3#define _ASM_X86_PAGE_64_H 4 5#include <asm/page_64_types.h> 6 7#ifndef __ASSEMBLY__ 8#include <asm/cpufeatures.h> 9#include <asm/alternative.h> 10 11/* duplicated to the one in bootmem.h */ 12extern unsigned long max_pfn; 13extern unsigned long phys_base; 14 15extern unsigned long page_offset_base; 16extern unsigned long vmalloc_base; 17extern unsigned long vmemmap_base; 18 19static __always_inline unsigned long __phys_addr_nodebug(unsigned long x) 20{ 21 unsigned long y = x - __START_KERNEL_map; 22 23 /* use the carry flag to determine if x was < __START_KERNEL_map */ 24 x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET)); 25 26 return x; 27} 28 29#ifdef CONFIG_DEBUG_VIRTUAL 30extern unsigned long __phys_addr(unsigned long); 31extern unsigned long __phys_addr_symbol(unsigned long); 32#else 33#define __phys_addr(x) __phys_addr_nodebug(x) 34#define __phys_addr_symbol(x) \ 35 ((unsigned long)(x) - __START_KERNEL_map + phys_base) 36#endif 37 38#define __phys_reloc_hide(x) (x) 39 40#ifdef CONFIG_FLATMEM 41#define pfn_valid(pfn) ((pfn) < max_pfn) 42#endif 43 44void clear_page_orig(void *page); 45void clear_page_rep(void *page); 46void clear_page_erms(void *page); 47 48static inline void clear_page(void *page) 49{ 50 alternative_call_2(clear_page_orig, 51 clear_page_rep, X86_FEATURE_REP_GOOD, 52 clear_page_erms, X86_FEATURE_ERMS, 53 "=D" (page), 54 "0" (page) 55 : "cc", "memory", "rax", "rcx"); 56} 57 58void copy_page(void *to, void *from); 59 60#ifdef CONFIG_X86_5LEVEL 61/* 62 * User space process size. This is the first address outside the user range. 63 * There are a few constraints that determine this: 64 * 65 * On Intel CPUs, if a SYSCALL instruction is at the highest canonical 66 * address, then that syscall will enter the kernel with a 67 * non-canonical return address, and SYSRET will explode dangerously. 68 * We avoid this particular problem by preventing anything 69 * from being mapped at the maximum canonical address. 70 * 71 * On AMD CPUs in the Ryzen family, there's a nasty bug in which the 72 * CPUs malfunction if they execute code from the highest canonical page. 73 * They'll speculate right off the end of the canonical space, and 74 * bad things happen. This is worked around in the same way as the 75 * Intel problem. 76 * 77 * With page table isolation enabled, we map the LDT in ... [stay tuned] 78 */ 79static __always_inline unsigned long task_size_max(void) 80{ 81 unsigned long ret; 82 83 alternative_io("movq %[small],%0","movq %[large],%0", 84 X86_FEATURE_LA57, 85 "=r" (ret), 86 [small] "i" ((1ul << 47)-PAGE_SIZE), 87 [large] "i" ((1ul << 56)-PAGE_SIZE)); 88 89 return ret; 90} 91#endif /* CONFIG_X86_5LEVEL */ 92 93#endif /* !__ASSEMBLY__ */ 94 95#ifdef CONFIG_X86_VSYSCALL_EMULATION 96# define __HAVE_ARCH_GATE_AREA 1 97#endif 98 99#endif /* _ASM_X86_PAGE_64_H */