percpu.h (1744B)
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright 2012 Calxeda, Inc. 4 */ 5#ifndef _ASM_ARM_PERCPU_H_ 6#define _ASM_ARM_PERCPU_H_ 7 8#include <asm/insn.h> 9 10register unsigned long current_stack_pointer asm ("sp"); 11 12/* 13 * Same as asm-generic/percpu.h, except that we store the per cpu offset 14 * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 15 */ 16#ifdef CONFIG_SMP 17static inline void set_my_cpu_offset(unsigned long off) 18{ 19 extern unsigned int smp_on_up; 20 21 if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up) 22 return; 23 24 /* Set TPIDRPRW */ 25 asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); 26} 27 28static __always_inline unsigned long __my_cpu_offset(void) 29{ 30 unsigned long off; 31 32 /* 33 * Read TPIDRPRW. 34 * We want to allow caching the value, so avoid using volatile and 35 * instead use a fake stack read to hazard against barrier(). 36 */ 37 asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t" 38#ifdef CONFIG_CPU_V6 39 "1: \n\t" 40 " .subsection 1 \n\t" 41#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \ 42 !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) 43 "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t" 44 " b 1b \n\t" 45#else 46 "2: ldr %0, 3f \n\t" 47 " ldr %0, [%0] \n\t" 48 " b 1b \n\t" 49 "3: .long __per_cpu_offset \n\t" 50#endif 51 " .previous \n\t" 52 " .pushsection \".alt.smp.init\", \"a\" \n\t" 53 " .long 0b - . \n\t" 54 " b . + (2b - 0b) \n\t" 55 " .popsection \n\t" 56#endif 57 : "=r" (off) 58 : "Q" (*(const unsigned long *)current_stack_pointer)); 59 60 return off; 61} 62#define __my_cpu_offset __my_cpu_offset() 63#else 64#define set_my_cpu_offset(x) do {} while(0) 65 66#endif /* CONFIG_SMP */ 67 68#include <asm-generic/percpu.h> 69 70#endif /* _ASM_ARM_PERCPU_H_ */