pgtable_32.c (2314B)
1// SPDX-License-Identifier: GPL-2.0 2#include <linux/sched.h> 3#include <linux/kernel.h> 4#include <linux/errno.h> 5#include <linux/mm.h> 6#include <linux/nmi.h> 7#include <linux/swap.h> 8#include <linux/smp.h> 9#include <linux/highmem.h> 10#include <linux/pagemap.h> 11#include <linux/spinlock.h> 12 13#include <asm/cpu_entry_area.h> 14#include <asm/fixmap.h> 15#include <asm/e820/api.h> 16#include <asm/tlb.h> 17#include <asm/tlbflush.h> 18#include <asm/io.h> 19#include <linux/vmalloc.h> 20 21unsigned int __VMALLOC_RESERVE = 128 << 20; 22 23/* 24 * Associate a virtual page frame with a given physical page frame 25 * and protection flags for that frame. 26 */ 27void set_pte_vaddr(unsigned long vaddr, pte_t pteval) 28{ 29 pgd_t *pgd; 30 p4d_t *p4d; 31 pud_t *pud; 32 pmd_t *pmd; 33 pte_t *pte; 34 35 pgd = swapper_pg_dir + pgd_index(vaddr); 36 if (pgd_none(*pgd)) { 37 BUG(); 38 return; 39 } 40 p4d = p4d_offset(pgd, vaddr); 41 if (p4d_none(*p4d)) { 42 BUG(); 43 return; 44 } 45 pud = pud_offset(p4d, vaddr); 46 if (pud_none(*pud)) { 47 BUG(); 48 return; 49 } 50 pmd = pmd_offset(pud, vaddr); 51 if (pmd_none(*pmd)) { 52 BUG(); 53 return; 54 } 55 pte = pte_offset_kernel(pmd, vaddr); 56 if (!pte_none(pteval)) 57 set_pte_at(&init_mm, vaddr, pte, pteval); 58 else 59 pte_clear(&init_mm, vaddr, pte); 60 61 /* 62 * It's enough to flush this one mapping. 63 * (PGE mappings get flushed as well) 64 */ 65 flush_tlb_one_kernel(vaddr); 66} 67 68unsigned long __FIXADDR_TOP = 0xfffff000; 69EXPORT_SYMBOL(__FIXADDR_TOP); 70 71/* 72 * vmalloc=size forces the vmalloc area to be exactly 'size' 73 * bytes. This can be used to increase (or decrease) the 74 * vmalloc area - the default is 128m. 75 */ 76static int __init parse_vmalloc(char *arg) 77{ 78 if (!arg) 79 return -EINVAL; 80 81 /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ 82 __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET; 83 return 0; 84} 85early_param("vmalloc", parse_vmalloc); 86 87/* 88 * reservetop=size reserves a hole at the top of the kernel address space which 89 * a hypervisor can load into later. Needed for dynamically loaded hypervisors, 90 * so relocating the fixmap can be done before paging initialization. 91 */ 92static int __init parse_reservetop(char *arg) 93{ 94 unsigned long address; 95 96 if (!arg) 97 return -EINVAL; 98 99 address = memparse(arg, &arg); 100 reserve_top_address(address); 101 early_ioremap_init(); 102 return 0; 103} 104early_param("reservetop", parse_reservetop);