page.h (5730B)
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> 4 * Copyright (C) 2012 Regents of the University of California 5 * Copyright (C) 2017 SiFive 6 * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn> 7 */ 8 9#ifndef _ASM_RISCV_PAGE_H 10#define _ASM_RISCV_PAGE_H 11 12#include <linux/pfn.h> 13#include <linux/const.h> 14 15#define PAGE_SHIFT (12) 16#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 17#define PAGE_MASK (~(PAGE_SIZE - 1)) 18 19#ifdef CONFIG_64BIT 20#define HUGE_MAX_HSTATE 2 21#else 22#define HUGE_MAX_HSTATE 1 23#endif 24#define HPAGE_SHIFT PMD_SHIFT 25#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 26#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 27#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 28 29/* 30 * PAGE_OFFSET -- the first address of the first page of memory. 31 * When not using MMU this corresponds to the first free page in 32 * physical memory (aligned on a page boundary). 33 */ 34#ifdef CONFIG_64BIT 35#ifdef CONFIG_MMU 36#define PAGE_OFFSET kernel_map.page_offset 37#else 38#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 39#endif 40/* 41 * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so 42 * define the PAGE_OFFSET value for SV39. 43 */ 44#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL) 45#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL) 46#else 47#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 48#endif /* CONFIG_64BIT */ 49 50#ifndef __ASSEMBLY__ 51 52#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) 53#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 54 55#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) 56#define copy_user_page(vto, vfrom, vaddr, topg) \ 57 memcpy((vto), (vfrom), PAGE_SIZE) 58 59/* 60 * Use struct definitions to apply C type checking 61 */ 62 63/* Page Global Directory entry */ 64typedef struct { 65 unsigned long pgd; 66} pgd_t; 67 68/* Page Table entry */ 69typedef struct { 70 unsigned long pte; 71} pte_t; 72 73typedef struct { 74 unsigned long pgprot; 75} pgprot_t; 76 77typedef struct page *pgtable_t; 78 79#define pte_val(x) ((x).pte) 80#define pgd_val(x) ((x).pgd) 81#define pgprot_val(x) ((x).pgprot) 82 83#define __pte(x) ((pte_t) { (x) }) 84#define __pgd(x) ((pgd_t) { (x) }) 85#define __pgprot(x) ((pgprot_t) { (x) }) 86 87#ifdef CONFIG_64BIT 88#define PTE_FMT "%016lx" 89#else 90#define PTE_FMT "%08lx" 91#endif 92 93#ifdef CONFIG_MMU 94extern unsigned long riscv_pfn_base; 95#define ARCH_PFN_OFFSET (riscv_pfn_base) 96#else 97#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 98#endif /* CONFIG_MMU */ 99 100struct kernel_mapping { 101 unsigned long page_offset; 102 unsigned long virt_addr; 103 uintptr_t phys_addr; 104 uintptr_t size; 105 /* Offset between linear mapping virtual address and kernel load address */ 106 unsigned long va_pa_offset; 107 /* Offset between kernel mapping virtual address and kernel load address */ 108 unsigned long va_kernel_pa_offset; 109 unsigned long va_kernel_xip_pa_offset; 110#ifdef CONFIG_XIP_KERNEL 111 uintptr_t xiprom; 112 uintptr_t xiprom_sz; 113#endif 114}; 115 116extern struct kernel_mapping kernel_map; 117extern phys_addr_t phys_ram_base; 118 119#define is_kernel_mapping(x) \ 120 ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size)) 121 122#define is_linear_mapping(x) \ 123 ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE)) 124 125#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset)) 126#define kernel_mapping_pa_to_va(y) ({ \ 127 unsigned long _y = y; \ 128 (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \ 129 (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \ 130 (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \ 131 }) 132#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) 133 134#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset) 135#define kernel_mapping_va_to_pa(y) ({ \ 136 unsigned long _y = y; \ 137 (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \ 138 ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \ 139 ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \ 140 }) 141 142#define __va_to_pa_nodebug(x) ({ \ 143 unsigned long _x = x; \ 144 is_linear_mapping(_x) ? \ 145 linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ 146 }) 147 148#ifdef CONFIG_DEBUG_VIRTUAL 149extern phys_addr_t __virt_to_phys(unsigned long x); 150extern phys_addr_t __phys_addr_symbol(unsigned long x); 151#else 152#define __virt_to_phys(x) __va_to_pa_nodebug(x) 153#define __phys_addr_symbol(x) __va_to_pa_nodebug(x) 154#endif /* CONFIG_DEBUG_VIRTUAL */ 155 156#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) 157#define __pa(x) __virt_to_phys((unsigned long)(x)) 158#define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x))) 159 160#define phys_to_pfn(phys) (PFN_DOWN(phys)) 161#define pfn_to_phys(pfn) (PFN_PHYS(pfn)) 162 163#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr))) 164#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn))) 165 166#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 167#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 168 169#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) 170#define page_to_bus(page) (page_to_phys(page)) 171#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) 172 173#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) 174 175#ifdef CONFIG_FLATMEM 176#define pfn_valid(pfn) \ 177 (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)) 178#endif 179 180#endif /* __ASSEMBLY__ */ 181 182#define virt_addr_valid(vaddr) ({ \ 183 unsigned long _addr = (unsigned long)vaddr; \ 184 (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \ 185}) 186 187#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 188 189#include <asm-generic/memory_model.h> 190#include <asm-generic/getorder.h> 191 192#endif /* _ASM_RISCV_PAGE_H */