cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atomic.h (9484B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Atomic operations.
      4 *
      5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
      6 */
      7#ifndef _ASM_ATOMIC_H
      8#define _ASM_ATOMIC_H
      9
     10#include <linux/types.h>
     11#include <asm/barrier.h>
     12#include <asm/cmpxchg.h>
     13#include <asm/compiler.h>
     14
     15#if __SIZEOF_LONG__ == 4
     16#define __LL		"ll.w	"
     17#define __SC		"sc.w	"
     18#define __AMADD		"amadd.w	"
     19#define __AMAND_DB	"amand_db.w	"
     20#define __AMOR_DB	"amor_db.w	"
     21#define __AMXOR_DB	"amxor_db.w	"
     22#elif __SIZEOF_LONG__ == 8
     23#define __LL		"ll.d	"
     24#define __SC		"sc.d	"
     25#define __AMADD		"amadd.d	"
     26#define __AMAND_DB	"amand_db.d	"
     27#define __AMOR_DB	"amor_db.d	"
     28#define __AMXOR_DB	"amxor_db.d	"
     29#endif
     30
     31#define ATOMIC_INIT(i)	  { (i) }
     32
     33/*
     34 * arch_atomic_read - read atomic variable
     35 * @v: pointer of type atomic_t
     36 *
     37 * Atomically reads the value of @v.
     38 */
     39#define arch_atomic_read(v)	READ_ONCE((v)->counter)
     40
     41/*
     42 * arch_atomic_set - set atomic variable
     43 * @v: pointer of type atomic_t
     44 * @i: required value
     45 *
     46 * Atomically sets the value of @v to @i.
     47 */
     48#define arch_atomic_set(v, i)	WRITE_ONCE((v)->counter, (i))
     49
     50#define ATOMIC_OP(op, I, asm_op)					\
     51static inline void arch_atomic_##op(int i, atomic_t *v)			\
     52{									\
     53	__asm__ __volatile__(						\
     54	"am"#asm_op"_db.w" " $zero, %1, %0	\n"			\
     55	: "+ZB" (v->counter)						\
     56	: "r" (I)							\
     57	: "memory");							\
     58}
     59
     60#define ATOMIC_OP_RETURN(op, I, asm_op, c_op)				\
     61static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
     62{									\
     63	int result;							\
     64									\
     65	__asm__ __volatile__(						\
     66	"am"#asm_op"_db.w" " %1, %2, %0		\n"			\
     67	: "+ZB" (v->counter), "=&r" (result)				\
     68	: "r" (I)							\
     69	: "memory");							\
     70									\
     71	return result c_op I;						\
     72}
     73
     74#define ATOMIC_FETCH_OP(op, I, asm_op)					\
     75static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
     76{									\
     77	int result;							\
     78									\
     79	__asm__ __volatile__(						\
     80	"am"#asm_op"_db.w" " %1, %2, %0		\n"			\
     81	: "+ZB" (v->counter), "=&r" (result)				\
     82	: "r" (I)							\
     83	: "memory");							\
     84									\
     85	return result;							\
     86}
     87
     88#define ATOMIC_OPS(op, I, asm_op, c_op)					\
     89	ATOMIC_OP(op, I, asm_op)					\
     90	ATOMIC_OP_RETURN(op, I, asm_op, c_op)				\
     91	ATOMIC_FETCH_OP(op, I, asm_op)
     92
     93ATOMIC_OPS(add, i, add, +)
     94ATOMIC_OPS(sub, -i, add, +)
     95
     96#define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
     97#define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
     98#define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
     99#define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
    100
    101#undef ATOMIC_OPS
    102
    103#define ATOMIC_OPS(op, I, asm_op)					\
    104	ATOMIC_OP(op, I, asm_op)					\
    105	ATOMIC_FETCH_OP(op, I, asm_op)
    106
    107ATOMIC_OPS(and, i, and)
    108ATOMIC_OPS(or, i, or)
    109ATOMIC_OPS(xor, i, xor)
    110
    111#define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
    112#define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
    113#define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
    114
    115#undef ATOMIC_OPS
    116#undef ATOMIC_FETCH_OP
    117#undef ATOMIC_OP_RETURN
    118#undef ATOMIC_OP
    119
    120static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
    121{
    122       int prev, rc;
    123
    124	__asm__ __volatile__ (
    125		"0:	ll.w	%[p],  %[c]\n"
    126		"	beq	%[p],  %[u], 1f\n"
    127		"	add.w	%[rc], %[p], %[a]\n"
    128		"	sc.w	%[rc], %[c]\n"
    129		"	beqz	%[rc], 0b\n"
    130		"	b	2f\n"
    131		"1:\n"
    132		__WEAK_LLSC_MB
    133		"2:\n"
    134		: [p]"=&r" (prev), [rc]"=&r" (rc),
    135		  [c]"=ZB" (v->counter)
    136		: [a]"r" (a), [u]"r" (u)
    137		: "memory");
    138
    139	return prev;
    140}
    141#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
    142
    143/*
    144 * arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable
    145 * @i: integer value to subtract
    146 * @v: pointer of type atomic_t
    147 *
    148 * Atomically test @v and subtract @i if @v is greater or equal than @i.
    149 * The function returns the old value of @v minus @i.
    150 */
    151static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
    152{
    153	int result;
    154	int temp;
    155
    156	if (__builtin_constant_p(i)) {
    157		__asm__ __volatile__(
    158		"1:	ll.w	%1, %2		# atomic_sub_if_positive\n"
    159		"	addi.w	%0, %1, %3				\n"
    160		"	or	%1, %0, $zero				\n"
    161		"	blt	%0, $zero, 2f				\n"
    162		"	sc.w	%1, %2					\n"
    163		"	beq	$zero, %1, 1b				\n"
    164		"2:							\n"
    165		__WEAK_LLSC_MB
    166		: "=&r" (result), "=&r" (temp),
    167		  "+" GCC_OFF_SMALL_ASM() (v->counter)
    168		: "I" (-i));
    169	} else {
    170		__asm__ __volatile__(
    171		"1:	ll.w	%1, %2		# atomic_sub_if_positive\n"
    172		"	sub.w	%0, %1, %3				\n"
    173		"	or	%1, %0, $zero				\n"
    174		"	blt	%0, $zero, 2f				\n"
    175		"	sc.w	%1, %2					\n"
    176		"	beq	$zero, %1, 1b				\n"
    177		"2:							\n"
    178		__WEAK_LLSC_MB
    179		: "=&r" (result), "=&r" (temp),
    180		  "+" GCC_OFF_SMALL_ASM() (v->counter)
    181		: "r" (i));
    182	}
    183
    184	return result;
    185}
    186
    187#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
    188#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
    189
    190/*
    191 * arch_atomic_dec_if_positive - decrement by 1 if old value positive
    192 * @v: pointer of type atomic_t
    193 */
    194#define arch_atomic_dec_if_positive(v)	arch_atomic_sub_if_positive(1, v)
    195
    196#ifdef CONFIG_64BIT
    197
    198#define ATOMIC64_INIT(i)    { (i) }
    199
    200/*
    201 * arch_atomic64_read - read atomic variable
    202 * @v: pointer of type atomic64_t
    203 *
    204 */
    205#define arch_atomic64_read(v)	READ_ONCE((v)->counter)
    206
    207/*
    208 * arch_atomic64_set - set atomic variable
    209 * @v: pointer of type atomic64_t
    210 * @i: required value
    211 */
    212#define arch_atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
    213
    214#define ATOMIC64_OP(op, I, asm_op)					\
    215static inline void arch_atomic64_##op(long i, atomic64_t *v)		\
    216{									\
    217	__asm__ __volatile__(						\
    218	"am"#asm_op"_db.d " " $zero, %1, %0	\n"			\
    219	: "+ZB" (v->counter)						\
    220	: "r" (I)							\
    221	: "memory");							\
    222}
    223
    224#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op)					\
    225static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)	\
    226{										\
    227	long result;								\
    228	__asm__ __volatile__(							\
    229	"am"#asm_op"_db.d " " %1, %2, %0		\n"			\
    230	: "+ZB" (v->counter), "=&r" (result)					\
    231	: "r" (I)								\
    232	: "memory");								\
    233										\
    234	return result c_op I;							\
    235}
    236
    237#define ATOMIC64_FETCH_OP(op, I, asm_op)					\
    238static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v)	\
    239{										\
    240	long result;								\
    241										\
    242	__asm__ __volatile__(							\
    243	"am"#asm_op"_db.d " " %1, %2, %0		\n"			\
    244	: "+ZB" (v->counter), "=&r" (result)					\
    245	: "r" (I)								\
    246	: "memory");								\
    247										\
    248	return result;								\
    249}
    250
    251#define ATOMIC64_OPS(op, I, asm_op, c_op)				      \
    252	ATOMIC64_OP(op, I, asm_op)					      \
    253	ATOMIC64_OP_RETURN(op, I, asm_op, c_op)				      \
    254	ATOMIC64_FETCH_OP(op, I, asm_op)
    255
    256ATOMIC64_OPS(add, i, add, +)
    257ATOMIC64_OPS(sub, -i, add, +)
    258
    259#define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
    260#define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
    261#define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed
    262#define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed
    263
    264#undef ATOMIC64_OPS
    265
    266#define ATOMIC64_OPS(op, I, asm_op)					      \
    267	ATOMIC64_OP(op, I, asm_op)					      \
    268	ATOMIC64_FETCH_OP(op, I, asm_op)
    269
    270ATOMIC64_OPS(and, i, and)
    271ATOMIC64_OPS(or, i, or)
    272ATOMIC64_OPS(xor, i, xor)
    273
    274#define arch_atomic64_fetch_and_relaxed	arch_atomic64_fetch_and_relaxed
    275#define arch_atomic64_fetch_or_relaxed	arch_atomic64_fetch_or_relaxed
    276#define arch_atomic64_fetch_xor_relaxed	arch_atomic64_fetch_xor_relaxed
    277
    278#undef ATOMIC64_OPS
    279#undef ATOMIC64_FETCH_OP
    280#undef ATOMIC64_OP_RETURN
    281#undef ATOMIC64_OP
    282
    283static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
    284{
    285       long prev, rc;
    286
    287	__asm__ __volatile__ (
    288		"0:	ll.d	%[p],  %[c]\n"
    289		"	beq	%[p],  %[u], 1f\n"
    290		"	add.d	%[rc], %[p], %[a]\n"
    291		"	sc.d	%[rc], %[c]\n"
    292		"	beqz	%[rc], 0b\n"
    293		"	b	2f\n"
    294		"1:\n"
    295		__WEAK_LLSC_MB
    296		"2:\n"
    297		: [p]"=&r" (prev), [rc]"=&r" (rc),
    298		  [c] "=ZB" (v->counter)
    299		: [a]"r" (a), [u]"r" (u)
    300		: "memory");
    301
    302	return prev;
    303}
    304#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
    305
    306/*
    307 * arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable
    308 * @i: integer value to subtract
    309 * @v: pointer of type atomic64_t
    310 *
    311 * Atomically test @v and subtract @i if @v is greater or equal than @i.
    312 * The function returns the old value of @v minus @i.
    313 */
    314static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
    315{
    316	long result;
    317	long temp;
    318
    319	if (__builtin_constant_p(i)) {
    320		__asm__ __volatile__(
    321		"1:	ll.d	%1, %2	# atomic64_sub_if_positive	\n"
    322		"	addi.d	%0, %1, %3				\n"
    323		"	or	%1, %0, $zero				\n"
    324		"	blt	%0, $zero, 2f				\n"
    325		"	sc.d	%1, %2					\n"
    326		"	beq	%1, $zero, 1b				\n"
    327		"2:							\n"
    328		__WEAK_LLSC_MB
    329		: "=&r" (result), "=&r" (temp),
    330		  "+" GCC_OFF_SMALL_ASM() (v->counter)
    331		: "I" (-i));
    332	} else {
    333		__asm__ __volatile__(
    334		"1:	ll.d	%1, %2	# atomic64_sub_if_positive	\n"
    335		"	sub.d	%0, %1, %3				\n"
    336		"	or	%1, %0, $zero				\n"
    337		"	blt	%0, $zero, 2f				\n"
    338		"	sc.d	%1, %2					\n"
    339		"	beq	%1, $zero, 1b				\n"
    340		"2:							\n"
    341		__WEAK_LLSC_MB
    342		: "=&r" (result), "=&r" (temp),
    343		  "+" GCC_OFF_SMALL_ASM() (v->counter)
    344		: "r" (i));
    345	}
    346
    347	return result;
    348}
    349
    350#define arch_atomic64_cmpxchg(v, o, n) \
    351	((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
    352#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
    353
    354/*
    355 * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
    356 * @v: pointer of type atomic64_t
    357 */
    358#define arch_atomic64_dec_if_positive(v)	arch_atomic64_sub_if_positive(1, v)
    359
    360#endif /* CONFIG_64BIT */
    361
    362#endif /* _ASM_ATOMIC_H */