pgtable_64.c (4167B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * This file contains pgtable related functions for 64-bit machines. 4 * 5 * Derived from arch/ppc64/mm/init.c 6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 7 * 8 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) 9 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 10 * Copyright (C) 1996 Paul Mackerras 11 * 12 * Derived from "arch/i386/mm/init.c" 13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 14 * 15 * Dave Engebretsen <engebret@us.ibm.com> 16 * Rework for PPC64 port. 17 */ 18 19#include <linux/signal.h> 20#include <linux/sched.h> 21#include <linux/kernel.h> 22#include <linux/errno.h> 23#include <linux/string.h> 24#include <linux/export.h> 25#include <linux/types.h> 26#include <linux/mman.h> 27#include <linux/mm.h> 28#include <linux/swap.h> 29#include <linux/stddef.h> 30#include <linux/vmalloc.h> 31#include <linux/slab.h> 32#include <linux/hugetlb.h> 33 34#include <asm/page.h> 35#include <asm/mmu_context.h> 36#include <asm/mmu.h> 37#include <asm/smp.h> 38#include <asm/machdep.h> 39#include <asm/tlb.h> 40#include <asm/processor.h> 41#include <asm/cputable.h> 42#include <asm/sections.h> 43#include <asm/firmware.h> 44#include <asm/dma.h> 45 46#include <mm/mmu_decl.h> 47 48 49#ifdef CONFIG_PPC_BOOK3S_64 50/* 51 * partition table and process table for ISA 3.0 52 */ 53struct prtb_entry *process_tb; 54struct patb_entry *partition_tb; 55/* 56 * page table size 57 */ 58unsigned long __pte_index_size; 59EXPORT_SYMBOL(__pte_index_size); 60unsigned long __pmd_index_size; 61EXPORT_SYMBOL(__pmd_index_size); 62unsigned long __pud_index_size; 63EXPORT_SYMBOL(__pud_index_size); 64unsigned long __pgd_index_size; 65EXPORT_SYMBOL(__pgd_index_size); 66unsigned long __pud_cache_index; 67EXPORT_SYMBOL(__pud_cache_index); 68unsigned long __pte_table_size; 69EXPORT_SYMBOL(__pte_table_size); 70unsigned long __pmd_table_size; 71EXPORT_SYMBOL(__pmd_table_size); 72unsigned long __pud_table_size; 73EXPORT_SYMBOL(__pud_table_size); 74unsigned long __pgd_table_size; 75EXPORT_SYMBOL(__pgd_table_size); 76unsigned long __pmd_val_bits; 77EXPORT_SYMBOL(__pmd_val_bits); 78unsigned long __pud_val_bits; 79EXPORT_SYMBOL(__pud_val_bits); 80unsigned long __pgd_val_bits; 81EXPORT_SYMBOL(__pgd_val_bits); 82unsigned long __kernel_virt_start; 83EXPORT_SYMBOL(__kernel_virt_start); 84unsigned long __vmalloc_start; 85EXPORT_SYMBOL(__vmalloc_start); 86unsigned long __vmalloc_end; 87EXPORT_SYMBOL(__vmalloc_end); 88unsigned long __kernel_io_start; 89EXPORT_SYMBOL(__kernel_io_start); 90unsigned long __kernel_io_end; 91struct page *vmemmap; 92EXPORT_SYMBOL(vmemmap); 93unsigned long __pte_frag_nr; 94EXPORT_SYMBOL(__pte_frag_nr); 95unsigned long __pte_frag_size_shift; 96EXPORT_SYMBOL(__pte_frag_size_shift); 97#endif 98 99#ifndef __PAGETABLE_PUD_FOLDED 100/* 4 level page table */ 101struct page *p4d_page(p4d_t p4d) 102{ 103 if (p4d_is_leaf(p4d)) { 104 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) 105 VM_WARN_ON(!p4d_huge(p4d)); 106 return pte_page(p4d_pte(p4d)); 107 } 108 return virt_to_page(p4d_pgtable(p4d)); 109} 110#endif 111 112struct page *pud_page(pud_t pud) 113{ 114 if (pud_is_leaf(pud)) { 115 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) 116 VM_WARN_ON(!pud_huge(pud)); 117 return pte_page(pud_pte(pud)); 118 } 119 return virt_to_page(pud_pgtable(pud)); 120} 121 122/* 123 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags 124 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. 125 */ 126struct page *pmd_page(pmd_t pmd) 127{ 128 if (pmd_is_leaf(pmd)) { 129 /* 130 * vmalloc_to_page may be called on any vmap address (not only 131 * vmalloc), and it uses pmd_page() etc., when huge vmap is 132 * enabled so these checks can't be used. 133 */ 134 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) 135 VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); 136 return pte_page(pmd_pte(pmd)); 137 } 138 return virt_to_page(pmd_page_vaddr(pmd)); 139} 140 141#ifdef CONFIG_STRICT_KERNEL_RWX 142void mark_rodata_ro(void) 143{ 144 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) { 145 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n"); 146 return; 147 } 148 149 if (radix_enabled()) 150 radix__mark_rodata_ro(); 151 else 152 hash__mark_rodata_ro(); 153 154 // mark_initmem_nx() should have already run by now 155 ptdump_check_wx(); 156} 157 158void mark_initmem_nx(void) 159{ 160 if (radix_enabled()) 161 radix__mark_initmem_nx(); 162 else 163 hash__mark_initmem_nx(); 164} 165#endif