cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atomic.h (7301B)


      1/*
      2 * include/asm-xtensa/atomic.h
      3 *
      4 * Atomic operations that C can't guarantee us.  Useful for resource counting..
      5 *
      6 * This file is subject to the terms and conditions of the GNU General Public
      7 * License.  See the file "COPYING" in the main directory of this archive
      8 * for more details.
      9 *
     10 * Copyright (C) 2001 - 2008 Tensilica Inc.
     11 */
     12
     13#ifndef _XTENSA_ATOMIC_H
     14#define _XTENSA_ATOMIC_H
     15
     16#include <linux/stringify.h>
     17#include <linux/types.h>
     18#include <asm/processor.h>
     19#include <asm/cmpxchg.h>
     20#include <asm/barrier.h>
     21
     22/*
     23 * This Xtensa implementation assumes that the right mechanism
     24 * for exclusion is for locking interrupts to level EXCM_LEVEL.
     25 *
     26 * Locking interrupts looks like this:
     27 *
     28 *    rsil a14, TOPLEVEL
     29 *    <code>
     30 *    wsr  a14, PS
     31 *    rsync
     32 *
     33 * Note that a14 is used here because the register allocation
     34 * done by the compiler is not guaranteed and a window overflow
     35 * may not occur between the rsil and wsr instructions. By using
     36 * a14 in the rsil, the machine is guaranteed to be in a state
     37 * where no register reference will cause an overflow.
     38 */
     39
     40/**
     41 * atomic_read - read atomic variable
     42 * @v: pointer of type atomic_t
     43 *
     44 * Atomically reads the value of @v.
     45 */
     46#define arch_atomic_read(v)		READ_ONCE((v)->counter)
     47
     48/**
     49 * atomic_set - set atomic variable
     50 * @v: pointer of type atomic_t
     51 * @i: required value
     52 *
     53 * Atomically sets the value of @v to @i.
     54 */
     55#define arch_atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
     56
     57#if XCHAL_HAVE_EXCLUSIVE
     58#define ATOMIC_OP(op)							\
     59static inline void arch_atomic_##op(int i, atomic_t *v)			\
     60{									\
     61	unsigned long tmp;						\
     62	int result;							\
     63									\
     64	__asm__ __volatile__(						\
     65			"1:     l32ex   %[tmp], %[addr]\n"		\
     66			"       " #op " %[result], %[tmp], %[i]\n"	\
     67			"       s32ex   %[result], %[addr]\n"		\
     68			"       getex   %[result]\n"			\
     69			"       beqz    %[result], 1b\n"		\
     70			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
     71			: [i] "a" (i), [addr] "a" (v)			\
     72			: "memory"					\
     73			);						\
     74}									\
     75
     76#define ATOMIC_OP_RETURN(op)						\
     77static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
     78{									\
     79	unsigned long tmp;						\
     80	int result;							\
     81									\
     82	__asm__ __volatile__(						\
     83			"1:     l32ex   %[tmp], %[addr]\n"		\
     84			"       " #op " %[result], %[tmp], %[i]\n"	\
     85			"       s32ex   %[result], %[addr]\n"		\
     86			"       getex   %[result]\n"			\
     87			"       beqz    %[result], 1b\n"		\
     88			"       " #op " %[result], %[tmp], %[i]\n"	\
     89			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
     90			: [i] "a" (i), [addr] "a" (v)			\
     91			: "memory"					\
     92			);						\
     93									\
     94	return result;							\
     95}
     96
     97#define ATOMIC_FETCH_OP(op)						\
     98static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
     99{									\
    100	unsigned long tmp;						\
    101	int result;							\
    102									\
    103	__asm__ __volatile__(						\
    104			"1:     l32ex   %[tmp], %[addr]\n"		\
    105			"       " #op " %[result], %[tmp], %[i]\n"	\
    106			"       s32ex   %[result], %[addr]\n"		\
    107			"       getex   %[result]\n"			\
    108			"       beqz    %[result], 1b\n"		\
    109			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
    110			: [i] "a" (i), [addr] "a" (v)			\
    111			: "memory"					\
    112			);						\
    113									\
    114	return tmp;							\
    115}
    116
    117#elif XCHAL_HAVE_S32C1I
    118#define ATOMIC_OP(op)							\
    119static inline void arch_atomic_##op(int i, atomic_t * v)		\
    120{									\
    121	unsigned long tmp;						\
    122	int result;							\
    123									\
    124	__asm__ __volatile__(						\
    125			"1:     l32i    %[tmp], %[mem]\n"		\
    126			"       wsr     %[tmp], scompare1\n"		\
    127			"       " #op " %[result], %[tmp], %[i]\n"	\
    128			"       s32c1i  %[result], %[mem]\n"		\
    129			"       bne     %[result], %[tmp], 1b\n"	\
    130			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
    131			  [mem] "+m" (*v)				\
    132			: [i] "a" (i)					\
    133			: "memory"					\
    134			);						\
    135}									\
    136
    137#define ATOMIC_OP_RETURN(op)						\
    138static inline int arch_atomic_##op##_return(int i, atomic_t * v)	\
    139{									\
    140	unsigned long tmp;						\
    141	int result;							\
    142									\
    143	__asm__ __volatile__(						\
    144			"1:     l32i    %[tmp], %[mem]\n"		\
    145			"       wsr     %[tmp], scompare1\n"		\
    146			"       " #op " %[result], %[tmp], %[i]\n"	\
    147			"       s32c1i  %[result], %[mem]\n"		\
    148			"       bne     %[result], %[tmp], 1b\n"	\
    149			"       " #op " %[result], %[result], %[i]\n"	\
    150			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
    151			  [mem] "+m" (*v)				\
    152			: [i] "a" (i)					\
    153			: "memory"					\
    154			);						\
    155									\
    156	return result;							\
    157}
    158
    159#define ATOMIC_FETCH_OP(op)						\
    160static inline int arch_atomic_fetch_##op(int i, atomic_t * v)		\
    161{									\
    162	unsigned long tmp;						\
    163	int result;							\
    164									\
    165	__asm__ __volatile__(						\
    166			"1:     l32i    %[tmp], %[mem]\n"		\
    167			"       wsr     %[tmp], scompare1\n"		\
    168			"       " #op " %[result], %[tmp], %[i]\n"	\
    169			"       s32c1i  %[result], %[mem]\n"		\
    170			"       bne     %[result], %[tmp], 1b\n"	\
    171			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
    172			  [mem] "+m" (*v)				\
    173			: [i] "a" (i)					\
    174			: "memory"					\
    175			);						\
    176									\
    177	return result;							\
    178}
    179
    180#else /* XCHAL_HAVE_S32C1I */
    181
    182#define ATOMIC_OP(op)							\
    183static inline void arch_atomic_##op(int i, atomic_t * v)		\
    184{									\
    185	unsigned int vval;						\
    186									\
    187	__asm__ __volatile__(						\
    188			"       rsil    a14, "__stringify(TOPLEVEL)"\n"	\
    189			"       l32i    %[result], %[mem]\n"		\
    190			"       " #op " %[result], %[result], %[i]\n"	\
    191			"       s32i    %[result], %[mem]\n"		\
    192			"       wsr     a14, ps\n"			\
    193			"       rsync\n"				\
    194			: [result] "=&a" (vval), [mem] "+m" (*v)	\
    195			: [i] "a" (i)					\
    196			: "a14", "memory"				\
    197			);						\
    198}									\
    199
    200#define ATOMIC_OP_RETURN(op)						\
    201static inline int arch_atomic_##op##_return(int i, atomic_t * v)	\
    202{									\
    203	unsigned int vval;						\
    204									\
    205	__asm__ __volatile__(						\
    206			"       rsil    a14,"__stringify(TOPLEVEL)"\n"	\
    207			"       l32i    %[result], %[mem]\n"		\
    208			"       " #op " %[result], %[result], %[i]\n"	\
    209			"       s32i    %[result], %[mem]\n"		\
    210			"       wsr     a14, ps\n"			\
    211			"       rsync\n"				\
    212			: [result] "=&a" (vval), [mem] "+m" (*v)	\
    213			: [i] "a" (i)					\
    214			: "a14", "memory"				\
    215			);						\
    216									\
    217	return vval;							\
    218}
    219
    220#define ATOMIC_FETCH_OP(op)						\
    221static inline int arch_atomic_fetch_##op(int i, atomic_t * v)		\
    222{									\
    223	unsigned int tmp, vval;						\
    224									\
    225	__asm__ __volatile__(						\
    226			"       rsil    a14,"__stringify(TOPLEVEL)"\n"	\
    227			"       l32i    %[result], %[mem]\n"		\
    228			"       " #op " %[tmp], %[result], %[i]\n"	\
    229			"       s32i    %[tmp], %[mem]\n"		\
    230			"       wsr     a14, ps\n"			\
    231			"       rsync\n"				\
    232			: [result] "=&a" (vval), [tmp] "=&a" (tmp),	\
    233			  [mem] "+m" (*v)				\
    234			: [i] "a" (i)					\
    235			: "a14", "memory"				\
    236			);						\
    237									\
    238	return vval;							\
    239}
    240
    241#endif /* XCHAL_HAVE_S32C1I */
    242
    243#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
    244
    245ATOMIC_OPS(add)
    246ATOMIC_OPS(sub)
    247
    248#undef ATOMIC_OPS
    249#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
    250
    251ATOMIC_OPS(and)
    252ATOMIC_OPS(or)
    253ATOMIC_OPS(xor)
    254
    255#undef ATOMIC_OPS
    256#undef ATOMIC_FETCH_OP
    257#undef ATOMIC_OP_RETURN
    258#undef ATOMIC_OP
    259
    260#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
    261#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
    262
    263#endif /* _XTENSA_ATOMIC_H */