cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atomic-grb.h (2677B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __ASM_SH_ATOMIC_GRB_H
      3#define __ASM_SH_ATOMIC_GRB_H
      4
      5#define ATOMIC_OP(op)							\
      6static inline void arch_atomic_##op(int i, atomic_t *v)			\
      7{									\
      8	int tmp;							\
      9									\
     10	__asm__ __volatile__ (						\
     11		"   .align 2              \n\t"				\
     12		"   mova    1f,   r0      \n\t" /* r0 = end point */	\
     13		"   mov    r15,   r1      \n\t" /* r1 = saved sp */	\
     14		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */	\
     15		"   mov.l  @%1,   %0      \n\t" /* load  old value */	\
     16		" " #op "   %2,   %0      \n\t" /* $op */		\
     17		"   mov.l   %0,   @%1     \n\t" /* store new value */	\
     18		"1: mov     r1,   r15     \n\t" /* LOGOUT */		\
     19		: "=&r" (tmp),						\
     20		  "+r"  (v)						\
     21		: "r"   (i)						\
     22		: "memory" , "r0", "r1");				\
     23}									\
     24
     25#define ATOMIC_OP_RETURN(op)						\
     26static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
     27{									\
     28	int tmp;							\
     29									\
     30	__asm__ __volatile__ (						\
     31		"   .align 2              \n\t"				\
     32		"   mova    1f,   r0      \n\t" /* r0 = end point */	\
     33		"   mov    r15,   r1      \n\t" /* r1 = saved sp */	\
     34		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */	\
     35		"   mov.l  @%1,   %0      \n\t" /* load  old value */	\
     36		" " #op "   %2,   %0      \n\t" /* $op */		\
     37		"   mov.l   %0,   @%1     \n\t" /* store new value */	\
     38		"1: mov     r1,   r15     \n\t" /* LOGOUT */		\
     39		: "=&r" (tmp),						\
     40		  "+r"  (v)						\
     41		: "r"   (i)						\
     42		: "memory" , "r0", "r1");				\
     43									\
     44	return tmp;							\
     45}
     46
     47#define ATOMIC_FETCH_OP(op)						\
     48static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
     49{									\
     50	int res, tmp;							\
     51									\
     52	__asm__ __volatile__ (						\
     53		"   .align 2              \n\t"				\
     54		"   mova    1f,   r0      \n\t" /* r0 = end point */	\
     55		"   mov    r15,   r1      \n\t" /* r1 = saved sp */	\
     56		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */	\
     57		"   mov.l  @%2,   %0      \n\t" /* load old value */	\
     58		"   mov     %0,   %1      \n\t" /* save old value */	\
     59		" " #op "   %3,   %0      \n\t" /* $op */		\
     60		"   mov.l   %0,   @%2     \n\t" /* store new value */	\
     61		"1: mov     r1,   r15     \n\t" /* LOGOUT */		\
     62		: "=&r" (tmp), "=&r" (res), "+r"  (v)			\
     63		: "r"   (i)						\
     64		: "memory" , "r0", "r1");				\
     65									\
     66	return res;							\
     67}
     68
     69#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
     70
     71ATOMIC_OPS(add)
     72ATOMIC_OPS(sub)
     73
     74#undef ATOMIC_OPS
     75#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
     76
     77ATOMIC_OPS(and)
     78ATOMIC_OPS(or)
     79ATOMIC_OPS(xor)
     80
     81#undef ATOMIC_OPS
     82#undef ATOMIC_FETCH_OP
     83#undef ATOMIC_OP_RETURN
     84#undef ATOMIC_OP
     85
     86#endif /* __ASM_SH_ATOMIC_GRB_H */