cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atomic_lse.h (8599B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Based on arch/arm/include/asm/atomic.h
      4 *
      5 * Copyright (C) 1996 Russell King.
      6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
      7 * Copyright (C) 2012 ARM Ltd.
      8 */
      9
     10#ifndef __ASM_ATOMIC_LSE_H
     11#define __ASM_ATOMIC_LSE_H
     12
     13#define ATOMIC_OP(op, asm_op)						\
     14static inline void __lse_atomic_##op(int i, atomic_t *v)		\
     15{									\
     16	asm volatile(							\
     17	__LSE_PREAMBLE							\
     18	"	" #asm_op "	%w[i], %[v]\n"				\
     19	: [v] "+Q" (v->counter)						\
     20	: [i] "r" (i));							\
     21}
     22
     23ATOMIC_OP(andnot, stclr)
     24ATOMIC_OP(or, stset)
     25ATOMIC_OP(xor, steor)
     26ATOMIC_OP(add, stadd)
     27
     28static inline void __lse_atomic_sub(int i, atomic_t *v)
     29{
     30	__lse_atomic_add(-i, v);
     31}
     32
     33#undef ATOMIC_OP
     34
     35#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)			\
     36static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)	\
     37{									\
     38	int old;							\
     39									\
     40	asm volatile(							\
     41	__LSE_PREAMBLE							\
     42	"	" #asm_op #mb "	%w[i], %w[old], %[v]"			\
     43	: [v] "+Q" (v->counter),					\
     44	  [old] "=r" (old)						\
     45	: [i] "r" (i)							\
     46	: cl);								\
     47									\
     48	return old;							\
     49}
     50
     51#define ATOMIC_FETCH_OPS(op, asm_op)					\
     52	ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)			\
     53	ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
     54	ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")		\
     55	ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
     56
     57ATOMIC_FETCH_OPS(andnot, ldclr)
     58ATOMIC_FETCH_OPS(or, ldset)
     59ATOMIC_FETCH_OPS(xor, ldeor)
     60ATOMIC_FETCH_OPS(add, ldadd)
     61
     62#undef ATOMIC_FETCH_OP
     63#undef ATOMIC_FETCH_OPS
     64
     65#define ATOMIC_FETCH_OP_SUB(name)					\
     66static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)	\
     67{									\
     68	return __lse_atomic_fetch_add##name(-i, v);			\
     69}
     70
     71ATOMIC_FETCH_OP_SUB(_relaxed)
     72ATOMIC_FETCH_OP_SUB(_acquire)
     73ATOMIC_FETCH_OP_SUB(_release)
     74ATOMIC_FETCH_OP_SUB(        )
     75
     76#undef ATOMIC_FETCH_OP_SUB
     77
     78#define ATOMIC_OP_ADD_SUB_RETURN(name)					\
     79static inline int __lse_atomic_add_return##name(int i, atomic_t *v)	\
     80{									\
     81	return __lse_atomic_fetch_add##name(i, v) + i;			\
     82}									\
     83									\
     84static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)	\
     85{									\
     86	return __lse_atomic_fetch_sub(i, v) - i;			\
     87}
     88
     89ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
     90ATOMIC_OP_ADD_SUB_RETURN(_acquire)
     91ATOMIC_OP_ADD_SUB_RETURN(_release)
     92ATOMIC_OP_ADD_SUB_RETURN(        )
     93
     94#undef ATOMIC_OP_ADD_SUB_RETURN
     95
     96static inline void __lse_atomic_and(int i, atomic_t *v)
     97{
     98	return __lse_atomic_andnot(~i, v);
     99}
    100
    101#define ATOMIC_FETCH_OP_AND(name, mb, cl...)				\
    102static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)	\
    103{									\
    104	return __lse_atomic_fetch_andnot##name(~i, v);			\
    105}
    106
    107ATOMIC_FETCH_OP_AND(_relaxed,   )
    108ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
    109ATOMIC_FETCH_OP_AND(_release,  l, "memory")
    110ATOMIC_FETCH_OP_AND(        , al, "memory")
    111
    112#undef ATOMIC_FETCH_OP_AND
    113
    114#define ATOMIC64_OP(op, asm_op)						\
    115static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)		\
    116{									\
    117	asm volatile(							\
    118	__LSE_PREAMBLE							\
    119	"	" #asm_op "	%[i], %[v]\n"				\
    120	: [v] "+Q" (v->counter)						\
    121	: [i] "r" (i));							\
    122}
    123
    124ATOMIC64_OP(andnot, stclr)
    125ATOMIC64_OP(or, stset)
    126ATOMIC64_OP(xor, steor)
    127ATOMIC64_OP(add, stadd)
    128
    129static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
    130{
    131	__lse_atomic64_add(-i, v);
    132}
    133
    134#undef ATOMIC64_OP
    135
    136#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
    137static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
    138{									\
    139	s64 old;							\
    140									\
    141	asm volatile(							\
    142	__LSE_PREAMBLE							\
    143	"	" #asm_op #mb "	%[i], %[old], %[v]"			\
    144	: [v] "+Q" (v->counter),					\
    145	  [old] "=r" (old)						\
    146	: [i] "r" (i) 							\
    147	: cl);								\
    148									\
    149	return old;							\
    150}
    151
    152#define ATOMIC64_FETCH_OPS(op, asm_op)					\
    153	ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)			\
    154	ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
    155	ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")		\
    156	ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
    157
    158ATOMIC64_FETCH_OPS(andnot, ldclr)
    159ATOMIC64_FETCH_OPS(or, ldset)
    160ATOMIC64_FETCH_OPS(xor, ldeor)
    161ATOMIC64_FETCH_OPS(add, ldadd)
    162
    163#undef ATOMIC64_FETCH_OP
    164#undef ATOMIC64_FETCH_OPS
    165
    166#define ATOMIC64_FETCH_OP_SUB(name)					\
    167static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)	\
    168{									\
    169	return __lse_atomic64_fetch_add##name(-i, v);			\
    170}
    171
    172ATOMIC64_FETCH_OP_SUB(_relaxed)
    173ATOMIC64_FETCH_OP_SUB(_acquire)
    174ATOMIC64_FETCH_OP_SUB(_release)
    175ATOMIC64_FETCH_OP_SUB(        )
    176
    177#undef ATOMIC64_FETCH_OP_SUB
    178
    179#define ATOMIC64_OP_ADD_SUB_RETURN(name)				\
    180static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
    181{									\
    182	return __lse_atomic64_fetch_add##name(i, v) + i;		\
    183}									\
    184									\
    185static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
    186{									\
    187	return __lse_atomic64_fetch_sub##name(i, v) - i;		\
    188}
    189
    190ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
    191ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
    192ATOMIC64_OP_ADD_SUB_RETURN(_release)
    193ATOMIC64_OP_ADD_SUB_RETURN(        )
    194
    195#undef ATOMIC64_OP_ADD_SUB_RETURN
    196
    197static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
    198{
    199	return __lse_atomic64_andnot(~i, v);
    200}
    201
    202#define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
    203static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)	\
    204{									\
    205	return __lse_atomic64_fetch_andnot##name(~i, v);		\
    206}
    207
    208ATOMIC64_FETCH_OP_AND(_relaxed,   )
    209ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
    210ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
    211ATOMIC64_FETCH_OP_AND(        , al, "memory")
    212
    213#undef ATOMIC64_FETCH_OP_AND
    214
    215static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
    216{
    217	unsigned long tmp;
    218
    219	asm volatile(
    220	__LSE_PREAMBLE
    221	"1:	ldr	%x[tmp], %[v]\n"
    222	"	subs	%[ret], %x[tmp], #1\n"
    223	"	b.lt	2f\n"
    224	"	casal	%x[tmp], %[ret], %[v]\n"
    225	"	sub	%x[tmp], %x[tmp], #1\n"
    226	"	sub	%x[tmp], %x[tmp], %[ret]\n"
    227	"	cbnz	%x[tmp], 1b\n"
    228	"2:"
    229	: [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
    230	:
    231	: "cc", "memory");
    232
    233	return (long)v;
    234}
    235
    236#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)			\
    237static __always_inline u##sz						\
    238__lse__cmpxchg_case_##name##sz(volatile void *ptr,			\
    239					      u##sz old,		\
    240					      u##sz new)		\
    241{									\
    242	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
    243	register u##sz x1 asm ("x1") = old;				\
    244	register u##sz x2 asm ("x2") = new;				\
    245	unsigned long tmp;						\
    246									\
    247	asm volatile(							\
    248	__LSE_PREAMBLE							\
    249	"	mov	%" #w "[tmp], %" #w "[old]\n"			\
    250	"	cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"	\
    251	"	mov	%" #w "[ret], %" #w "[tmp]"			\
    252	: [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr),			\
    253	  [tmp] "=&r" (tmp)						\
    254	: [old] "r" (x1), [new] "r" (x2)				\
    255	: cl);								\
    256									\
    257	return x0;							\
    258}
    259
    260__CMPXCHG_CASE(w, b,     ,  8,   )
    261__CMPXCHG_CASE(w, h,     , 16,   )
    262__CMPXCHG_CASE(w,  ,     , 32,   )
    263__CMPXCHG_CASE(x,  ,     , 64,   )
    264__CMPXCHG_CASE(w, b, acq_,  8,  a, "memory")
    265__CMPXCHG_CASE(w, h, acq_, 16,  a, "memory")
    266__CMPXCHG_CASE(w,  , acq_, 32,  a, "memory")
    267__CMPXCHG_CASE(x,  , acq_, 64,  a, "memory")
    268__CMPXCHG_CASE(w, b, rel_,  8,  l, "memory")
    269__CMPXCHG_CASE(w, h, rel_, 16,  l, "memory")
    270__CMPXCHG_CASE(w,  , rel_, 32,  l, "memory")
    271__CMPXCHG_CASE(x,  , rel_, 64,  l, "memory")
    272__CMPXCHG_CASE(w, b,  mb_,  8, al, "memory")
    273__CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
    274__CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
    275__CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
    276
    277#undef __CMPXCHG_CASE
    278
    279#define __CMPXCHG_DBL(name, mb, cl...)					\
    280static __always_inline long						\
    281__lse__cmpxchg_double##name(unsigned long old1,				\
    282					 unsigned long old2,		\
    283					 unsigned long new1,		\
    284					 unsigned long new2,		\
    285					 volatile void *ptr)		\
    286{									\
    287	unsigned long oldval1 = old1;					\
    288	unsigned long oldval2 = old2;					\
    289	register unsigned long x0 asm ("x0") = old1;			\
    290	register unsigned long x1 asm ("x1") = old2;			\
    291	register unsigned long x2 asm ("x2") = new1;			\
    292	register unsigned long x3 asm ("x3") = new2;			\
    293	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
    294									\
    295	asm volatile(							\
    296	__LSE_PREAMBLE							\
    297	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
    298	"	eor	%[old1], %[old1], %[oldval1]\n"			\
    299	"	eor	%[old2], %[old2], %[oldval2]\n"			\
    300	"	orr	%[old1], %[old1], %[old2]"			\
    301	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
    302	  [v] "+Q" (*(unsigned long *)ptr)				\
    303	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
    304	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
    305	: cl);								\
    306									\
    307	return x0;							\
    308}
    309
    310__CMPXCHG_DBL(   ,   )
    311__CMPXCHG_DBL(_mb, al, "memory")
    312
    313#undef __CMPXCHG_DBL
    314
    315#endif	/* __ASM_ATOMIC_LSE_H */