proc-fns.h (4754B)
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * arch/arm/include/asm/proc-fns.h 4 * 5 * Copyright (C) 1997-1999 Russell King 6 * Copyright (C) 2000 Deep Blue Solutions Ltd 7 */ 8#ifndef __ASM_PROCFNS_H 9#define __ASM_PROCFNS_H 10 11#ifdef __KERNEL__ 12 13#include <asm/glue-proc.h> 14#include <asm/page.h> 15 16#ifndef __ASSEMBLY__ 17 18struct mm_struct; 19 20/* 21 * Don't change this structure - ASM code relies on it. 22 */ 23struct processor { 24 /* MISC 25 * get data abort address/flags 26 */ 27 void (*_data_abort)(unsigned long pc); 28 /* 29 * Retrieve prefetch fault address 30 */ 31 unsigned long (*_prefetch_abort)(unsigned long lr); 32 /* 33 * Set up any processor specifics 34 */ 35 void (*_proc_init)(void); 36 /* 37 * Check for processor bugs 38 */ 39 void (*check_bugs)(void); 40 /* 41 * Disable any processor specifics 42 */ 43 void (*_proc_fin)(void); 44 /* 45 * Special stuff for a reset 46 */ 47 void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn)); 48 /* 49 * Idle the processor 50 */ 51 int (*_do_idle)(void); 52 /* 53 * Processor architecture specific 54 */ 55 /* 56 * clean a virtual address range from the 57 * D-cache without flushing the cache. 58 */ 59 void (*dcache_clean_area)(void *addr, int size); 60 61 /* 62 * Set the page table 63 */ 64 void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm); 65 /* 66 * Set a possibly extended PTE. Non-extended PTEs should 67 * ignore 'ext'. 68 */ 69#ifdef CONFIG_ARM_LPAE 70 void (*set_pte_ext)(pte_t *ptep, pte_t pte); 71#else 72 void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); 73#endif 74 75 /* Suspend/resume */ 76 unsigned int suspend_size; 77 void (*do_suspend)(void *); 78 void (*do_resume)(void *); 79}; 80 81#ifndef MULTI_CPU 82static inline void init_proc_vtable(const struct processor *p) 83{ 84} 85 86extern void cpu_proc_init(void); 87extern void cpu_proc_fin(void); 88extern int cpu_do_idle(void); 89extern void cpu_dcache_clean_area(void *, int); 90extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 91#ifdef CONFIG_ARM_LPAE 92extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); 93#else 94extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 95#endif 96extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn)); 97 98/* These three are private to arch/arm/kernel/suspend.c */ 99extern void cpu_do_suspend(void *); 100extern void cpu_do_resume(void *); 101#else 102 103extern struct processor processor; 104#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) 105#include <linux/smp.h> 106/* 107 * This can't be a per-cpu variable because we need to access it before 108 * per-cpu has been initialised. We have a couple of functions that are 109 * called in a pre-emptible context, and so can't use smp_processor_id() 110 * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the 111 * function pointers for these are identical across all CPUs. 112 */ 113extern struct processor *cpu_vtable[]; 114#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f 115#define PROC_TABLE(f) cpu_vtable[0]->f 116static inline void init_proc_vtable(const struct processor *p) 117{ 118 unsigned int cpu = smp_processor_id(); 119 *cpu_vtable[cpu] = *p; 120 WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area != 121 cpu_vtable[0]->dcache_clean_area); 122 WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext != 123 cpu_vtable[0]->set_pte_ext); 124} 125#else 126#define PROC_VTABLE(f) processor.f 127#define PROC_TABLE(f) processor.f 128static inline void init_proc_vtable(const struct processor *p) 129{ 130 processor = *p; 131} 132#endif 133 134#define cpu_proc_init PROC_VTABLE(_proc_init) 135#define cpu_check_bugs PROC_VTABLE(check_bugs) 136#define cpu_proc_fin PROC_VTABLE(_proc_fin) 137#define cpu_reset PROC_VTABLE(reset) 138#define cpu_do_idle PROC_VTABLE(_do_idle) 139#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area) 140#define cpu_set_pte_ext PROC_TABLE(set_pte_ext) 141#define cpu_do_switch_mm PROC_VTABLE(switch_mm) 142 143/* These two are private to arch/arm/kernel/suspend.c */ 144#define cpu_do_suspend PROC_VTABLE(do_suspend) 145#define cpu_do_resume PROC_VTABLE(do_resume) 146#endif 147 148extern void cpu_resume(void); 149 150#include <asm/memory.h> 151 152#ifdef CONFIG_MMU 153 154#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) 155 156#ifdef CONFIG_ARM_LPAE 157 158#define cpu_get_ttbr(nr) \ 159 ({ \ 160 u64 ttbr; \ 161 __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \ 162 : "=r" (ttbr)); \ 163 ttbr; \ 164 }) 165 166#define cpu_get_pgd() \ 167 ({ \ 168 u64 pg = cpu_get_ttbr(0); \ 169 pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ 170 (pgd_t *)phys_to_virt(pg); \ 171 }) 172#else 173#define cpu_get_pgd() \ 174 ({ \ 175 unsigned long pg; \ 176 __asm__("mrc p15, 0, %0, c2, c0, 0" \ 177 : "=r" (pg) : : "cc"); \ 178 pg &= ~0x3fff; \ 179 (pgd_t *)phys_to_virt(pg); \ 180 }) 181#endif 182 183#else /*!CONFIG_MMU */ 184 185#define cpu_switch_mm(pgd,mm) { } 186 187#endif 188 189#endif /* __ASSEMBLY__ */ 190#endif /* __KERNEL__ */ 191#endif /* __ASM_PROCFNS_H */