cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

percpu.h (8199B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2013 ARM Ltd.
      4 */
      5#ifndef __ASM_PERCPU_H
      6#define __ASM_PERCPU_H
      7
      8#include <linux/preempt.h>
      9
     10#include <asm/alternative.h>
     11#include <asm/cmpxchg.h>
     12#include <asm/stack_pointer.h>
     13#include <asm/sysreg.h>
     14
     15static inline void set_my_cpu_offset(unsigned long off)
     16{
     17	asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
     18				 "msr tpidr_el2, %0",
     19				 ARM64_HAS_VIRT_HOST_EXTN)
     20			:: "r" (off) : "memory");
     21}
     22
     23static inline unsigned long __hyp_my_cpu_offset(void)
     24{
     25	/*
     26	 * Non-VHE hyp code runs with preemption disabled. No need to hazard
     27	 * the register access against barrier() as in __kern_my_cpu_offset.
     28	 */
     29	return read_sysreg(tpidr_el2);
     30}
     31
     32static inline unsigned long __kern_my_cpu_offset(void)
     33{
     34	unsigned long off;
     35
     36	/*
     37	 * We want to allow caching the value, so avoid using volatile and
     38	 * instead use a fake stack read to hazard against barrier().
     39	 */
     40	asm(ALTERNATIVE("mrs %0, tpidr_el1",
     41			"mrs %0, tpidr_el2",
     42			ARM64_HAS_VIRT_HOST_EXTN)
     43		: "=r" (off) :
     44		"Q" (*(const unsigned long *)current_stack_pointer));
     45
     46	return off;
     47}
     48
     49#ifdef __KVM_NVHE_HYPERVISOR__
     50#define __my_cpu_offset __hyp_my_cpu_offset()
     51#else
     52#define __my_cpu_offset __kern_my_cpu_offset()
     53#endif
     54
     55#define PERCPU_RW_OPS(sz)						\
     56static inline unsigned long __percpu_read_##sz(void *ptr)		\
     57{									\
     58	return READ_ONCE(*(u##sz *)ptr);				\
     59}									\
     60									\
     61static inline void __percpu_write_##sz(void *ptr, unsigned long val)	\
     62{									\
     63	WRITE_ONCE(*(u##sz *)ptr, (u##sz)val);				\
     64}
     65
     66#define __PERCPU_OP_CASE(w, sfx, name, sz, op_llsc, op_lse)		\
     67static inline void							\
     68__percpu_##name##_case_##sz(void *ptr, unsigned long val)		\
     69{									\
     70	unsigned int loop;						\
     71	u##sz tmp;							\
     72									\
     73	asm volatile (ARM64_LSE_ATOMIC_INSN(				\
     74	/* LL/SC */							\
     75	"1:	ldxr" #sfx "\t%" #w "[tmp], %[ptr]\n"			\
     76		#op_llsc "\t%" #w "[tmp], %" #w "[tmp], %" #w "[val]\n"	\
     77	"	stxr" #sfx "\t%w[loop], %" #w "[tmp], %[ptr]\n"		\
     78	"	cbnz	%w[loop], 1b",					\
     79	/* LSE atomics */						\
     80		#op_lse "\t%" #w "[val], %[ptr]\n"			\
     81		__nops(3))						\
     82	: [loop] "=&r" (loop), [tmp] "=&r" (tmp),			\
     83	  [ptr] "+Q"(*(u##sz *)ptr)					\
     84	: [val] "r" ((u##sz)(val)));					\
     85}
     86
     87#define __PERCPU_RET_OP_CASE(w, sfx, name, sz, op_llsc, op_lse)		\
     88static inline u##sz							\
     89__percpu_##name##_return_case_##sz(void *ptr, unsigned long val)	\
     90{									\
     91	unsigned int loop;						\
     92	u##sz ret;							\
     93									\
     94	asm volatile (ARM64_LSE_ATOMIC_INSN(				\
     95	/* LL/SC */							\
     96	"1:	ldxr" #sfx "\t%" #w "[ret], %[ptr]\n"			\
     97		#op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n"	\
     98	"	stxr" #sfx "\t%w[loop], %" #w "[ret], %[ptr]\n"		\
     99	"	cbnz	%w[loop], 1b",					\
    100	/* LSE atomics */						\
    101		#op_lse "\t%" #w "[val], %" #w "[ret], %[ptr]\n"	\
    102		#op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n"	\
    103		__nops(2))						\
    104	: [loop] "=&r" (loop), [ret] "=&r" (ret),			\
    105	  [ptr] "+Q"(*(u##sz *)ptr)					\
    106	: [val] "r" ((u##sz)(val)));					\
    107									\
    108	return ret;							\
    109}
    110
    111#define PERCPU_OP(name, op_llsc, op_lse)				\
    112	__PERCPU_OP_CASE(w, b, name,  8, op_llsc, op_lse)		\
    113	__PERCPU_OP_CASE(w, h, name, 16, op_llsc, op_lse)		\
    114	__PERCPU_OP_CASE(w,  , name, 32, op_llsc, op_lse)		\
    115	__PERCPU_OP_CASE( ,  , name, 64, op_llsc, op_lse)
    116
    117#define PERCPU_RET_OP(name, op_llsc, op_lse)				\
    118	__PERCPU_RET_OP_CASE(w, b, name,  8, op_llsc, op_lse)		\
    119	__PERCPU_RET_OP_CASE(w, h, name, 16, op_llsc, op_lse)		\
    120	__PERCPU_RET_OP_CASE(w,  , name, 32, op_llsc, op_lse)		\
    121	__PERCPU_RET_OP_CASE( ,  , name, 64, op_llsc, op_lse)
    122
    123PERCPU_RW_OPS(8)
    124PERCPU_RW_OPS(16)
    125PERCPU_RW_OPS(32)
    126PERCPU_RW_OPS(64)
    127PERCPU_OP(add, add, stadd)
    128PERCPU_OP(andnot, bic, stclr)
    129PERCPU_OP(or, orr, stset)
    130PERCPU_RET_OP(add, add, ldadd)
    131
    132#undef PERCPU_RW_OPS
    133#undef __PERCPU_OP_CASE
    134#undef __PERCPU_RET_OP_CASE
    135#undef PERCPU_OP
    136#undef PERCPU_RET_OP
    137
    138/*
    139 * It would be nice to avoid the conditional call into the scheduler when
    140 * re-enabling preemption for preemptible kernels, but doing that in a way
    141 * which builds inside a module would mean messing directly with the preempt
    142 * count. If you do this, peterz and tglx will hunt you down.
    143 */
    144#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
    145({									\
    146	int __ret;							\
    147	preempt_disable_notrace();					\
    148	__ret = cmpxchg_double_local(	raw_cpu_ptr(&(ptr1)),		\
    149					raw_cpu_ptr(&(ptr2)),		\
    150					o1, o2, n1, n2);		\
    151	preempt_enable_notrace();					\
    152	__ret;								\
    153})
    154
    155#define _pcp_protect(op, pcp, ...)					\
    156({									\
    157	preempt_disable_notrace();					\
    158	op(raw_cpu_ptr(&(pcp)), __VA_ARGS__);				\
    159	preempt_enable_notrace();					\
    160})
    161
    162#define _pcp_protect_return(op, pcp, args...)				\
    163({									\
    164	typeof(pcp) __retval;						\
    165	preempt_disable_notrace();					\
    166	__retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args);	\
    167	preempt_enable_notrace();					\
    168	__retval;							\
    169})
    170
    171#define this_cpu_read_1(pcp)		\
    172	_pcp_protect_return(__percpu_read_8, pcp)
    173#define this_cpu_read_2(pcp)		\
    174	_pcp_protect_return(__percpu_read_16, pcp)
    175#define this_cpu_read_4(pcp)		\
    176	_pcp_protect_return(__percpu_read_32, pcp)
    177#define this_cpu_read_8(pcp)		\
    178	_pcp_protect_return(__percpu_read_64, pcp)
    179
    180#define this_cpu_write_1(pcp, val)	\
    181	_pcp_protect(__percpu_write_8, pcp, (unsigned long)val)
    182#define this_cpu_write_2(pcp, val)	\
    183	_pcp_protect(__percpu_write_16, pcp, (unsigned long)val)
    184#define this_cpu_write_4(pcp, val)	\
    185	_pcp_protect(__percpu_write_32, pcp, (unsigned long)val)
    186#define this_cpu_write_8(pcp, val)	\
    187	_pcp_protect(__percpu_write_64, pcp, (unsigned long)val)
    188
    189#define this_cpu_add_1(pcp, val)	\
    190	_pcp_protect(__percpu_add_case_8, pcp, val)
    191#define this_cpu_add_2(pcp, val)	\
    192	_pcp_protect(__percpu_add_case_16, pcp, val)
    193#define this_cpu_add_4(pcp, val)	\
    194	_pcp_protect(__percpu_add_case_32, pcp, val)
    195#define this_cpu_add_8(pcp, val)	\
    196	_pcp_protect(__percpu_add_case_64, pcp, val)
    197
    198#define this_cpu_add_return_1(pcp, val)	\
    199	_pcp_protect_return(__percpu_add_return_case_8, pcp, val)
    200#define this_cpu_add_return_2(pcp, val)	\
    201	_pcp_protect_return(__percpu_add_return_case_16, pcp, val)
    202#define this_cpu_add_return_4(pcp, val)	\
    203	_pcp_protect_return(__percpu_add_return_case_32, pcp, val)
    204#define this_cpu_add_return_8(pcp, val)	\
    205	_pcp_protect_return(__percpu_add_return_case_64, pcp, val)
    206
    207#define this_cpu_and_1(pcp, val)	\
    208	_pcp_protect(__percpu_andnot_case_8, pcp, ~val)
    209#define this_cpu_and_2(pcp, val)	\
    210	_pcp_protect(__percpu_andnot_case_16, pcp, ~val)
    211#define this_cpu_and_4(pcp, val)	\
    212	_pcp_protect(__percpu_andnot_case_32, pcp, ~val)
    213#define this_cpu_and_8(pcp, val)	\
    214	_pcp_protect(__percpu_andnot_case_64, pcp, ~val)
    215
    216#define this_cpu_or_1(pcp, val)		\
    217	_pcp_protect(__percpu_or_case_8, pcp, val)
    218#define this_cpu_or_2(pcp, val)		\
    219	_pcp_protect(__percpu_or_case_16, pcp, val)
    220#define this_cpu_or_4(pcp, val)		\
    221	_pcp_protect(__percpu_or_case_32, pcp, val)
    222#define this_cpu_or_8(pcp, val)		\
    223	_pcp_protect(__percpu_or_case_64, pcp, val)
    224
    225#define this_cpu_xchg_1(pcp, val)	\
    226	_pcp_protect_return(xchg_relaxed, pcp, val)
    227#define this_cpu_xchg_2(pcp, val)	\
    228	_pcp_protect_return(xchg_relaxed, pcp, val)
    229#define this_cpu_xchg_4(pcp, val)	\
    230	_pcp_protect_return(xchg_relaxed, pcp, val)
    231#define this_cpu_xchg_8(pcp, val)	\
    232	_pcp_protect_return(xchg_relaxed, pcp, val)
    233
    234#define this_cpu_cmpxchg_1(pcp, o, n)	\
    235	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
    236#define this_cpu_cmpxchg_2(pcp, o, n)	\
    237	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
    238#define this_cpu_cmpxchg_4(pcp, o, n)	\
    239	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
    240#define this_cpu_cmpxchg_8(pcp, o, n)	\
    241	_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
    242
    243#ifdef __KVM_NVHE_HYPERVISOR__
    244extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);
    245#define __per_cpu_offset
    246#define per_cpu_offset(cpu)	__hyp_per_cpu_offset((cpu))
    247#endif
    248
    249#include <asm-generic/percpu.h>
    250
    251/* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */
    252#if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_DEBUG_PREEMPT)
    253#undef	this_cpu_ptr
    254#define	this_cpu_ptr		raw_cpu_ptr
    255#undef	__this_cpu_read
    256#define	__this_cpu_read		raw_cpu_read
    257#undef	__this_cpu_write
    258#define	__this_cpu_write	raw_cpu_write
    259#endif
    260
    261#endif /* __ASM_PERCPU_H */