cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bitops.h (10977B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
      7 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
      8 */
      9#ifndef _ASM_BITOPS_H
     10#define _ASM_BITOPS_H
     11
     12#ifndef _LINUX_BITOPS_H
     13#error only <linux/bitops.h> can be included directly
     14#endif
     15
     16#include <linux/bits.h>
     17#include <linux/compiler.h>
     18#include <linux/types.h>
     19#include <asm/asm.h>
     20#include <asm/barrier.h>
     21#include <asm/byteorder.h>		/* sigh ... */
     22#include <asm/compiler.h>
     23#include <asm/cpu-features.h>
     24#include <asm/sgidefs.h>
     25
     26#define __bit_op(mem, insn, inputs...) do {			\
     27	unsigned long __temp;					\
     28								\
     29	asm volatile(						\
     30	"	.set		push			\n"	\
     31	"	.set		" MIPS_ISA_LEVEL "	\n"	\
     32	"	" __SYNC(full, loongson3_war) "		\n"	\
     33	"1:	" __stringify(LONG_LL)	"	%0, %1	\n"	\
     34	"	" insn		"			\n"	\
     35	"	" __stringify(LONG_SC)	"	%0, %1	\n"	\
     36	"	" __stringify(SC_BEQZ)	"	%0, 1b	\n"	\
     37	"	.set		pop			\n"	\
     38	: "=&r"(__temp), "+" GCC_OFF_SMALL_ASM()(mem)		\
     39	: inputs						\
     40	: __LLSC_CLOBBER);					\
     41} while (0)
     42
     43#define __test_bit_op(mem, ll_dst, insn, inputs...) ({		\
     44	unsigned long __orig, __temp;				\
     45								\
     46	asm volatile(						\
     47	"	.set		push			\n"	\
     48	"	.set		" MIPS_ISA_LEVEL "	\n"	\
     49	"	" __SYNC(full, loongson3_war) "		\n"	\
     50	"1:	" __stringify(LONG_LL) " "	ll_dst ", %2\n"	\
     51	"	" insn		"			\n"	\
     52	"	" __stringify(LONG_SC)	"	%1, %2	\n"	\
     53	"	" __stringify(SC_BEQZ)	"	%1, 1b	\n"	\
     54	"	.set		pop			\n"	\
     55	: "=&r"(__orig), "=&r"(__temp),				\
     56	  "+" GCC_OFF_SMALL_ASM()(mem)				\
     57	: inputs						\
     58	: __LLSC_CLOBBER);					\
     59								\
     60	__orig;							\
     61})
     62
     63/*
     64 * These are the "slower" versions of the functions and are in bitops.c.
     65 * These functions call raw_local_irq_{save,restore}().
     66 */
     67void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
     68void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
     69void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
     70int __mips_test_and_set_bit_lock(unsigned long nr,
     71				 volatile unsigned long *addr);
     72int __mips_test_and_clear_bit(unsigned long nr,
     73			      volatile unsigned long *addr);
     74int __mips_test_and_change_bit(unsigned long nr,
     75			       volatile unsigned long *addr);
     76
     77
     78/*
     79 * set_bit - Atomically set a bit in memory
     80 * @nr: the bit to set
     81 * @addr: the address to start counting from
     82 *
     83 * This function is atomic and may not be reordered.  See __set_bit()
     84 * if you do not require the atomic guarantees.
     85 * Note that @nr may be almost arbitrarily large; this function is not
     86 * restricted to acting on a single-word quantity.
     87 */
     88static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
     89{
     90	volatile unsigned long *m = &addr[BIT_WORD(nr)];
     91	int bit = nr % BITS_PER_LONG;
     92
     93	if (!kernel_uses_llsc) {
     94		__mips_set_bit(nr, addr);
     95		return;
     96	}
     97
     98	if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
     99		__bit_op(*m, __stringify(LONG_INS) " %0, %3, %2, 1", "i"(bit), "r"(~0));
    100		return;
    101	}
    102
    103	__bit_op(*m, "or\t%0, %2", "ir"(BIT(bit)));
    104}
    105
    106/*
    107 * clear_bit - Clears a bit in memory
    108 * @nr: Bit to clear
    109 * @addr: Address to start counting from
    110 *
    111 * clear_bit() is atomic and may not be reordered.  However, it does
    112 * not contain a memory barrier, so if it is used for locking purposes,
    113 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
    114 * in order to ensure changes are visible on other processors.
    115 */
    116static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
    117{
    118	volatile unsigned long *m = &addr[BIT_WORD(nr)];
    119	int bit = nr % BITS_PER_LONG;
    120
    121	if (!kernel_uses_llsc) {
    122		__mips_clear_bit(nr, addr);
    123		return;
    124	}
    125
    126	if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
    127		__bit_op(*m, __stringify(LONG_INS) " %0, $0, %2, 1", "i"(bit));
    128		return;
    129	}
    130
    131	__bit_op(*m, "and\t%0, %2", "ir"(~BIT(bit)));
    132}
    133
    134/*
    135 * clear_bit_unlock - Clears a bit in memory
    136 * @nr: Bit to clear
    137 * @addr: Address to start counting from
    138 *
    139 * clear_bit() is atomic and implies release semantics before the memory
    140 * operation. It can be used for an unlock.
    141 */
    142static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
    143{
    144	smp_mb__before_atomic();
    145	clear_bit(nr, addr);
    146}
    147
    148/*
    149 * change_bit - Toggle a bit in memory
    150 * @nr: Bit to change
    151 * @addr: Address to start counting from
    152 *
    153 * change_bit() is atomic and may not be reordered.
    154 * Note that @nr may be almost arbitrarily large; this function is not
    155 * restricted to acting on a single-word quantity.
    156 */
    157static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
    158{
    159	volatile unsigned long *m = &addr[BIT_WORD(nr)];
    160	int bit = nr % BITS_PER_LONG;
    161
    162	if (!kernel_uses_llsc) {
    163		__mips_change_bit(nr, addr);
    164		return;
    165	}
    166
    167	__bit_op(*m, "xor\t%0, %2", "ir"(BIT(bit)));
    168}
    169
    170/*
    171 * test_and_set_bit_lock - Set a bit and return its old value
    172 * @nr: Bit to set
    173 * @addr: Address to count from
    174 *
    175 * This operation is atomic and implies acquire ordering semantics
    176 * after the memory operation.
    177 */
    178static inline int test_and_set_bit_lock(unsigned long nr,
    179	volatile unsigned long *addr)
    180{
    181	volatile unsigned long *m = &addr[BIT_WORD(nr)];
    182	int bit = nr % BITS_PER_LONG;
    183	unsigned long res, orig;
    184
    185	if (!kernel_uses_llsc) {
    186		res = __mips_test_and_set_bit_lock(nr, addr);
    187	} else {
    188		orig = __test_bit_op(*m, "%0",
    189				     "or\t%1, %0, %3",
    190				     "ir"(BIT(bit)));
    191		res = (orig & BIT(bit)) != 0;
    192	}
    193
    194	smp_llsc_mb();
    195
    196	return res;
    197}
    198
    199/*
    200 * test_and_set_bit - Set a bit and return its old value
    201 * @nr: Bit to set
    202 * @addr: Address to count from
    203 *
    204 * This operation is atomic and cannot be reordered.
    205 * It also implies a memory barrier.
    206 */
    207static inline int test_and_set_bit(unsigned long nr,
    208	volatile unsigned long *addr)
    209{
    210	smp_mb__before_atomic();
    211	return test_and_set_bit_lock(nr, addr);
    212}
    213
    214/*
    215 * test_and_clear_bit - Clear a bit and return its old value
    216 * @nr: Bit to clear
    217 * @addr: Address to count from
    218 *
    219 * This operation is atomic and cannot be reordered.
    220 * It also implies a memory barrier.
    221 */
    222static inline int test_and_clear_bit(unsigned long nr,
    223	volatile unsigned long *addr)
    224{
    225	volatile unsigned long *m = &addr[BIT_WORD(nr)];
    226	int bit = nr % BITS_PER_LONG;
    227	unsigned long res, orig;
    228
    229	smp_mb__before_atomic();
    230
    231	if (!kernel_uses_llsc) {
    232		res = __mips_test_and_clear_bit(nr, addr);
    233	} else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
    234		res = __test_bit_op(*m, "%1",
    235				    __stringify(LONG_EXT) " %0, %1, %3, 1;"
    236				    __stringify(LONG_INS) " %1, $0, %3, 1",
    237				    "i"(bit));
    238	} else {
    239		orig = __test_bit_op(*m, "%0",
    240				     "or\t%1, %0, %3;"
    241				     "xor\t%1, %1, %3",
    242				     "ir"(BIT(bit)));
    243		res = (orig & BIT(bit)) != 0;
    244	}
    245
    246	smp_llsc_mb();
    247
    248	return res;
    249}
    250
    251/*
    252 * test_and_change_bit - Change a bit and return its old value
    253 * @nr: Bit to change
    254 * @addr: Address to count from
    255 *
    256 * This operation is atomic and cannot be reordered.
    257 * It also implies a memory barrier.
    258 */
    259static inline int test_and_change_bit(unsigned long nr,
    260	volatile unsigned long *addr)
    261{
    262	volatile unsigned long *m = &addr[BIT_WORD(nr)];
    263	int bit = nr % BITS_PER_LONG;
    264	unsigned long res, orig;
    265
    266	smp_mb__before_atomic();
    267
    268	if (!kernel_uses_llsc) {
    269		res = __mips_test_and_change_bit(nr, addr);
    270	} else {
    271		orig = __test_bit_op(*m, "%0",
    272				     "xor\t%1, %0, %3",
    273				     "ir"(BIT(bit)));
    274		res = (orig & BIT(bit)) != 0;
    275	}
    276
    277	smp_llsc_mb();
    278
    279	return res;
    280}
    281
    282#undef __bit_op
    283#undef __test_bit_op
    284
    285#include <asm-generic/bitops/non-atomic.h>
    286
    287/*
    288 * __clear_bit_unlock - Clears a bit in memory
    289 * @nr: Bit to clear
    290 * @addr: Address to start counting from
    291 *
    292 * __clear_bit() is non-atomic and implies release semantics before the memory
    293 * operation. It can be used for an unlock if no other CPUs can concurrently
    294 * modify other bits in the word.
    295 */
    296static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
    297{
    298	smp_mb__before_llsc();
    299	__clear_bit(nr, addr);
    300	nudge_writes();
    301}
    302
    303/*
    304 * Return the bit position (0..63) of the most significant 1 bit in a word
    305 * Returns -1 if no 1 bit exists
    306 */
    307static __always_inline unsigned long __fls(unsigned long word)
    308{
    309	int num;
    310
    311	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
    312	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
    313		__asm__(
    314		"	.set	push					\n"
    315		"	.set	"MIPS_ISA_LEVEL"			\n"
    316		"	clz	%0, %1					\n"
    317		"	.set	pop					\n"
    318		: "=r" (num)
    319		: "r" (word));
    320
    321		return 31 - num;
    322	}
    323
    324	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
    325	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
    326		__asm__(
    327		"	.set	push					\n"
    328		"	.set	"MIPS_ISA_LEVEL"			\n"
    329		"	dclz	%0, %1					\n"
    330		"	.set	pop					\n"
    331		: "=r" (num)
    332		: "r" (word));
    333
    334		return 63 - num;
    335	}
    336
    337	num = BITS_PER_LONG - 1;
    338
    339#if BITS_PER_LONG == 64
    340	if (!(word & (~0ul << 32))) {
    341		num -= 32;
    342		word <<= 32;
    343	}
    344#endif
    345	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
    346		num -= 16;
    347		word <<= 16;
    348	}
    349	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
    350		num -= 8;
    351		word <<= 8;
    352	}
    353	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
    354		num -= 4;
    355		word <<= 4;
    356	}
    357	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
    358		num -= 2;
    359		word <<= 2;
    360	}
    361	if (!(word & (~0ul << (BITS_PER_LONG-1))))
    362		num -= 1;
    363	return num;
    364}
    365
    366/*
    367 * __ffs - find first bit in word.
    368 * @word: The word to search
    369 *
    370 * Returns 0..SZLONG-1
    371 * Undefined if no bit exists, so code should check against 0 first.
    372 */
    373static __always_inline unsigned long __ffs(unsigned long word)
    374{
    375	return __fls(word & -word);
    376}
    377
    378/*
    379 * fls - find last bit set.
    380 * @word: The word to search
    381 *
    382 * This is defined the same way as ffs.
    383 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
    384 */
    385static inline int fls(unsigned int x)
    386{
    387	int r;
    388
    389	if (!__builtin_constant_p(x) &&
    390	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
    391		__asm__(
    392		"	.set	push					\n"
    393		"	.set	"MIPS_ISA_LEVEL"			\n"
    394		"	clz	%0, %1					\n"
    395		"	.set	pop					\n"
    396		: "=r" (x)
    397		: "r" (x));
    398
    399		return 32 - x;
    400	}
    401
    402	r = 32;
    403	if (!x)
    404		return 0;
    405	if (!(x & 0xffff0000u)) {
    406		x <<= 16;
    407		r -= 16;
    408	}
    409	if (!(x & 0xff000000u)) {
    410		x <<= 8;
    411		r -= 8;
    412	}
    413	if (!(x & 0xf0000000u)) {
    414		x <<= 4;
    415		r -= 4;
    416	}
    417	if (!(x & 0xc0000000u)) {
    418		x <<= 2;
    419		r -= 2;
    420	}
    421	if (!(x & 0x80000000u)) {
    422		x <<= 1;
    423		r -= 1;
    424	}
    425	return r;
    426}
    427
    428#include <asm-generic/bitops/fls64.h>
    429
    430/*
    431 * ffs - find first bit set.
    432 * @word: The word to search
    433 *
    434 * This is defined the same way as
    435 * the libc and compiler builtin ffs routines, therefore
    436 * differs in spirit from the below ffz (man ffs).
    437 */
    438static inline int ffs(int word)
    439{
    440	if (!word)
    441		return 0;
    442
    443	return fls(word & -word);
    444}
    445
    446#include <asm-generic/bitops/ffz.h>
    447
    448#ifdef __KERNEL__
    449
    450#include <asm-generic/bitops/sched.h>
    451
    452#include <asm/arch_hweight.h>
    453#include <asm-generic/bitops/const_hweight.h>
    454
    455#include <asm-generic/bitops/le.h>
    456#include <asm-generic/bitops/ext2-atomic.h>
    457
    458#endif /* __KERNEL__ */
    459
    460#endif /* _ASM_BITOPS_H */