cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atomic-llsc.h (2186B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __ASM_SH_ATOMIC_LLSC_H
      3#define __ASM_SH_ATOMIC_LLSC_H
      4
      5/*
      6 * SH-4A note:
      7 *
      8 * We basically get atomic_xxx_return() for free compared with
      9 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
     10 * encoding, so the retval is automatically set without having to
     11 * do any special work.
     12 */
     13/*
     14 * To get proper branch prediction for the main line, we must branch
     15 * forward to code at the end of this object's .text section, then
     16 * branch back to restart the operation.
     17 */
     18
     19#define ATOMIC_OP(op)							\
     20static inline void arch_atomic_##op(int i, atomic_t *v)			\
     21{									\
     22	unsigned long tmp;						\
     23									\
     24	__asm__ __volatile__ (						\
     25"1:	movli.l @%2, %0		! atomic_" #op "\n"			\
     26"	" #op "	%1, %0				\n"			\
     27"	movco.l	%0, @%2				\n"			\
     28"	bf	1b				\n"			\
     29	: "=&z" (tmp)							\
     30	: "r" (i), "r" (&v->counter)					\
     31	: "t");								\
     32}
     33
     34#define ATOMIC_OP_RETURN(op)						\
     35static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
     36{									\
     37	unsigned long temp;						\
     38									\
     39	__asm__ __volatile__ (						\
     40"1:	movli.l @%2, %0		! atomic_" #op "_return	\n"		\
     41"	" #op "	%1, %0					\n"		\
     42"	movco.l	%0, @%2					\n"		\
     43"	bf	1b					\n"		\
     44"	synco						\n"		\
     45	: "=&z" (temp)							\
     46	: "r" (i), "r" (&v->counter)					\
     47	: "t");								\
     48									\
     49	return temp;							\
     50}
     51
     52#define ATOMIC_FETCH_OP(op)						\
     53static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
     54{									\
     55	unsigned long res, temp;					\
     56									\
     57	__asm__ __volatile__ (						\
     58"1:	movli.l @%3, %0		! atomic_fetch_" #op "	\n"		\
     59"	mov %0, %1					\n"		\
     60"	" #op "	%2, %0					\n"		\
     61"	movco.l	%0, @%3					\n"		\
     62"	bf	1b					\n"		\
     63"	synco						\n"		\
     64	: "=&z" (temp), "=&r" (res)					\
     65	: "r" (i), "r" (&v->counter)					\
     66	: "t");								\
     67									\
     68	return res;							\
     69}
     70
     71#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
     72
     73ATOMIC_OPS(add)
     74ATOMIC_OPS(sub)
     75
     76#undef ATOMIC_OPS
     77#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
     78
     79ATOMIC_OPS(and)
     80ATOMIC_OPS(or)
     81ATOMIC_OPS(xor)
     82
     83#undef ATOMIC_OPS
     84#undef ATOMIC_FETCH_OP
     85#undef ATOMIC_OP_RETURN
     86#undef ATOMIC_OP
     87
     88#endif /* __ASM_SH_ATOMIC_LLSC_H */