cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bitops.h (12621B)


      1#ifndef _M68K_BITOPS_H
      2#define _M68K_BITOPS_H
      3/*
      4 * Copyright 1992, Linus Torvalds.
      5 *
      6 * This file is subject to the terms and conditions of the GNU General Public
      7 * License.  See the file COPYING in the main directory of this archive
      8 * for more details.
      9 */
     10
     11#ifndef _LINUX_BITOPS_H
     12#error only <linux/bitops.h> can be included directly
     13#endif
     14
     15#include <linux/compiler.h>
     16#include <asm/barrier.h>
     17
     18/*
     19 *	Bit access functions vary across the ColdFire and 68k families.
     20 *	So we will break them out here, and then macro in the ones we want.
     21 *
     22 *	ColdFire - supports standard bset/bclr/bchg with register operand only
     23 *	68000    - supports standard bset/bclr/bchg with memory operand
     24 *	>= 68020 - also supports the bfset/bfclr/bfchg instructions
     25 *
     26 *	Although it is possible to use only the bset/bclr/bchg with register
     27 *	operands on all platforms you end up with larger generated code.
     28 *	So we use the best form possible on a given platform.
     29 */
     30
     31static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
     32{
     33	char *p = (char *)vaddr + (nr ^ 31) / 8;
     34
     35	__asm__ __volatile__ ("bset %1,(%0)"
     36		:
     37		: "a" (p), "di" (nr & 7)
     38		: "memory");
     39}
     40
     41static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
     42{
     43	char *p = (char *)vaddr + (nr ^ 31) / 8;
     44
     45	__asm__ __volatile__ ("bset %1,%0"
     46		: "+m" (*p)
     47		: "di" (nr & 7));
     48}
     49
     50static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
     51{
     52	__asm__ __volatile__ ("bfset %1{%0:#1}"
     53		:
     54		: "d" (nr ^ 31), "o" (*vaddr)
     55		: "memory");
     56}
     57
     58#if defined(CONFIG_COLDFIRE)
     59#define	set_bit(nr, vaddr)	bset_reg_set_bit(nr, vaddr)
     60#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
     61#define	set_bit(nr, vaddr)	bset_mem_set_bit(nr, vaddr)
     62#else
     63#define set_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
     64				bset_mem_set_bit(nr, vaddr) : \
     65				bfset_mem_set_bit(nr, vaddr))
     66#endif
     67
     68#define __set_bit(nr, vaddr)	set_bit(nr, vaddr)
     69
     70
     71static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
     72{
     73	char *p = (char *)vaddr + (nr ^ 31) / 8;
     74
     75	__asm__ __volatile__ ("bclr %1,(%0)"
     76		:
     77		: "a" (p), "di" (nr & 7)
     78		: "memory");
     79}
     80
     81static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
     82{
     83	char *p = (char *)vaddr + (nr ^ 31) / 8;
     84
     85	__asm__ __volatile__ ("bclr %1,%0"
     86		: "+m" (*p)
     87		: "di" (nr & 7));
     88}
     89
     90static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
     91{
     92	__asm__ __volatile__ ("bfclr %1{%0:#1}"
     93		:
     94		: "d" (nr ^ 31), "o" (*vaddr)
     95		: "memory");
     96}
     97
     98#if defined(CONFIG_COLDFIRE)
     99#define	clear_bit(nr, vaddr)	bclr_reg_clear_bit(nr, vaddr)
    100#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
    101#define	clear_bit(nr, vaddr)	bclr_mem_clear_bit(nr, vaddr)
    102#else
    103#define clear_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
    104				bclr_mem_clear_bit(nr, vaddr) : \
    105				bfclr_mem_clear_bit(nr, vaddr))
    106#endif
    107
    108#define __clear_bit(nr, vaddr)	clear_bit(nr, vaddr)
    109
    110
    111static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
    112{
    113	char *p = (char *)vaddr + (nr ^ 31) / 8;
    114
    115	__asm__ __volatile__ ("bchg %1,(%0)"
    116		:
    117		: "a" (p), "di" (nr & 7)
    118		: "memory");
    119}
    120
    121static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
    122{
    123	char *p = (char *)vaddr + (nr ^ 31) / 8;
    124
    125	__asm__ __volatile__ ("bchg %1,%0"
    126		: "+m" (*p)
    127		: "di" (nr & 7));
    128}
    129
    130static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
    131{
    132	__asm__ __volatile__ ("bfchg %1{%0:#1}"
    133		:
    134		: "d" (nr ^ 31), "o" (*vaddr)
    135		: "memory");
    136}
    137
    138#if defined(CONFIG_COLDFIRE)
    139#define	change_bit(nr, vaddr)	bchg_reg_change_bit(nr, vaddr)
    140#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
    141#define	change_bit(nr, vaddr)	bchg_mem_change_bit(nr, vaddr)
    142#else
    143#define change_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
    144				bchg_mem_change_bit(nr, vaddr) : \
    145				bfchg_mem_change_bit(nr, vaddr))
    146#endif
    147
    148#define __change_bit(nr, vaddr)	change_bit(nr, vaddr)
    149
    150
    151static inline int test_bit(int nr, const volatile unsigned long *vaddr)
    152{
    153	return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
    154}
    155
    156
    157static inline int bset_reg_test_and_set_bit(int nr,
    158					    volatile unsigned long *vaddr)
    159{
    160	char *p = (char *)vaddr + (nr ^ 31) / 8;
    161	char retval;
    162
    163	__asm__ __volatile__ ("bset %2,(%1); sne %0"
    164		: "=d" (retval)
    165		: "a" (p), "di" (nr & 7)
    166		: "memory");
    167	return retval;
    168}
    169
    170static inline int bset_mem_test_and_set_bit(int nr,
    171					    volatile unsigned long *vaddr)
    172{
    173	char *p = (char *)vaddr + (nr ^ 31) / 8;
    174	char retval;
    175
    176	__asm__ __volatile__ ("bset %2,%1; sne %0"
    177		: "=d" (retval), "+m" (*p)
    178		: "di" (nr & 7));
    179	return retval;
    180}
    181
    182static inline int bfset_mem_test_and_set_bit(int nr,
    183					     volatile unsigned long *vaddr)
    184{
    185	char retval;
    186
    187	__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
    188		: "=d" (retval)
    189		: "d" (nr ^ 31), "o" (*vaddr)
    190		: "memory");
    191	return retval;
    192}
    193
    194#if defined(CONFIG_COLDFIRE)
    195#define	test_and_set_bit(nr, vaddr)	bset_reg_test_and_set_bit(nr, vaddr)
    196#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
    197#define	test_and_set_bit(nr, vaddr)	bset_mem_test_and_set_bit(nr, vaddr)
    198#else
    199#define test_and_set_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
    200					bset_mem_test_and_set_bit(nr, vaddr) : \
    201					bfset_mem_test_and_set_bit(nr, vaddr))
    202#endif
    203
    204#define __test_and_set_bit(nr, vaddr)	test_and_set_bit(nr, vaddr)
    205
    206
    207static inline int bclr_reg_test_and_clear_bit(int nr,
    208					      volatile unsigned long *vaddr)
    209{
    210	char *p = (char *)vaddr + (nr ^ 31) / 8;
    211	char retval;
    212
    213	__asm__ __volatile__ ("bclr %2,(%1); sne %0"
    214		: "=d" (retval)
    215		: "a" (p), "di" (nr & 7)
    216		: "memory");
    217	return retval;
    218}
    219
    220static inline int bclr_mem_test_and_clear_bit(int nr,
    221					      volatile unsigned long *vaddr)
    222{
    223	char *p = (char *)vaddr + (nr ^ 31) / 8;
    224	char retval;
    225
    226	__asm__ __volatile__ ("bclr %2,%1; sne %0"
    227		: "=d" (retval), "+m" (*p)
    228		: "di" (nr & 7));
    229	return retval;
    230}
    231
    232static inline int bfclr_mem_test_and_clear_bit(int nr,
    233					       volatile unsigned long *vaddr)
    234{
    235	char retval;
    236
    237	__asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
    238		: "=d" (retval)
    239		: "d" (nr ^ 31), "o" (*vaddr)
    240		: "memory");
    241	return retval;
    242}
    243
    244#if defined(CONFIG_COLDFIRE)
    245#define	test_and_clear_bit(nr, vaddr)	bclr_reg_test_and_clear_bit(nr, vaddr)
    246#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
    247#define	test_and_clear_bit(nr, vaddr)	bclr_mem_test_and_clear_bit(nr, vaddr)
    248#else
    249#define test_and_clear_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
    250					bclr_mem_test_and_clear_bit(nr, vaddr) : \
    251					bfclr_mem_test_and_clear_bit(nr, vaddr))
    252#endif
    253
    254#define __test_and_clear_bit(nr, vaddr)	test_and_clear_bit(nr, vaddr)
    255
    256
    257static inline int bchg_reg_test_and_change_bit(int nr,
    258					       volatile unsigned long *vaddr)
    259{
    260	char *p = (char *)vaddr + (nr ^ 31) / 8;
    261	char retval;
    262
    263	__asm__ __volatile__ ("bchg %2,(%1); sne %0"
    264		: "=d" (retval)
    265		: "a" (p), "di" (nr & 7)
    266		: "memory");
    267	return retval;
    268}
    269
    270static inline int bchg_mem_test_and_change_bit(int nr,
    271					       volatile unsigned long *vaddr)
    272{
    273	char *p = (char *)vaddr + (nr ^ 31) / 8;
    274	char retval;
    275
    276	__asm__ __volatile__ ("bchg %2,%1; sne %0"
    277		: "=d" (retval), "+m" (*p)
    278		: "di" (nr & 7));
    279	return retval;
    280}
    281
    282static inline int bfchg_mem_test_and_change_bit(int nr,
    283						volatile unsigned long *vaddr)
    284{
    285	char retval;
    286
    287	__asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
    288		: "=d" (retval)
    289		: "d" (nr ^ 31), "o" (*vaddr)
    290		: "memory");
    291	return retval;
    292}
    293
    294#if defined(CONFIG_COLDFIRE)
    295#define	test_and_change_bit(nr, vaddr)	bchg_reg_test_and_change_bit(nr, vaddr)
    296#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
    297#define	test_and_change_bit(nr, vaddr)	bchg_mem_test_and_change_bit(nr, vaddr)
    298#else
    299#define test_and_change_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
    300					bchg_mem_test_and_change_bit(nr, vaddr) : \
    301					bfchg_mem_test_and_change_bit(nr, vaddr))
    302#endif
    303
    304#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
    305
    306
    307/*
    308 *	The true 68020 and more advanced processors support the "bfffo"
    309 *	instruction for finding bits. ColdFire and simple 68000 parts
    310 *	(including CPU32) do not support this. They simply use the generic
    311 *	functions.
    312 */
    313#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
    314#include <asm-generic/bitops/ffz.h>
    315#else
    316
    317static inline int find_first_zero_bit(const unsigned long *vaddr,
    318				      unsigned size)
    319{
    320	const unsigned long *p = vaddr;
    321	int res = 32;
    322	unsigned int words;
    323	unsigned long num;
    324
    325	if (!size)
    326		return 0;
    327
    328	words = (size + 31) >> 5;
    329	while (!(num = ~*p++)) {
    330		if (!--words)
    331			goto out;
    332	}
    333
    334	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
    335			      : "=d" (res) : "d" (num & -num));
    336	res ^= 31;
    337out:
    338	res += ((long)p - (long)vaddr - 4) * 8;
    339	return res < size ? res : size;
    340}
    341#define find_first_zero_bit find_first_zero_bit
    342
    343static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
    344				     int offset)
    345{
    346	const unsigned long *p = vaddr + (offset >> 5);
    347	int bit = offset & 31UL, res;
    348
    349	if (offset >= size)
    350		return size;
    351
    352	if (bit) {
    353		unsigned long num = ~*p++ & (~0UL << bit);
    354		offset -= bit;
    355
    356		/* Look for zero in first longword */
    357		__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
    358				      : "=d" (res) : "d" (num & -num));
    359		if (res < 32) {
    360			offset += res ^ 31;
    361			return offset < size ? offset : size;
    362		}
    363		offset += 32;
    364
    365		if (offset >= size)
    366			return size;
    367	}
    368	/* No zero yet, search remaining full bytes for a zero */
    369	return offset + find_first_zero_bit(p, size - offset);
    370}
    371#define find_next_zero_bit find_next_zero_bit
    372
    373static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
    374{
    375	const unsigned long *p = vaddr;
    376	int res = 32;
    377	unsigned int words;
    378	unsigned long num;
    379
    380	if (!size)
    381		return 0;
    382
    383	words = (size + 31) >> 5;
    384	while (!(num = *p++)) {
    385		if (!--words)
    386			goto out;
    387	}
    388
    389	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
    390			      : "=d" (res) : "d" (num & -num));
    391	res ^= 31;
    392out:
    393	res += ((long)p - (long)vaddr - 4) * 8;
    394	return res < size ? res : size;
    395}
    396#define find_first_bit find_first_bit
    397
    398static inline int find_next_bit(const unsigned long *vaddr, int size,
    399				int offset)
    400{
    401	const unsigned long *p = vaddr + (offset >> 5);
    402	int bit = offset & 31UL, res;
    403
    404	if (offset >= size)
    405		return size;
    406
    407	if (bit) {
    408		unsigned long num = *p++ & (~0UL << bit);
    409		offset -= bit;
    410
    411		/* Look for one in first longword */
    412		__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
    413				      : "=d" (res) : "d" (num & -num));
    414		if (res < 32) {
    415			offset += res ^ 31;
    416			return offset < size ? offset : size;
    417		}
    418		offset += 32;
    419
    420		if (offset >= size)
    421			return size;
    422	}
    423	/* No one yet, search remaining full bytes for a one */
    424	return offset + find_first_bit(p, size - offset);
    425}
    426#define find_next_bit find_next_bit
    427
    428/*
    429 * ffz = Find First Zero in word. Undefined if no zero exists,
    430 * so code should check against ~0UL first..
    431 */
    432static inline unsigned long ffz(unsigned long word)
    433{
    434	int res;
    435
    436	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
    437			      : "=d" (res) : "d" (~word & -~word));
    438	return res ^ 31;
    439}
    440
    441#endif
    442
    443#ifdef __KERNEL__
    444
    445#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
    446
    447/*
    448 *	The newer ColdFire family members support a "bitrev" instruction
    449 *	and we can use that to implement a fast ffs. Older Coldfire parts,
    450 *	and normal 68000 parts don't have anything special, so we use the
    451 *	generic functions for those.
    452 */
    453#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
    454	!defined(CONFIG_M68000)
    455static inline unsigned long __ffs(unsigned long x)
    456{
    457	__asm__ __volatile__ ("bitrev %0; ff1 %0"
    458		: "=d" (x)
    459		: "0" (x));
    460	return x;
    461}
    462
    463static inline int ffs(int x)
    464{
    465	if (!x)
    466		return 0;
    467	return __ffs(x) + 1;
    468}
    469
    470#else
    471#include <asm-generic/bitops/ffs.h>
    472#include <asm-generic/bitops/__ffs.h>
    473#endif
    474
    475#include <asm-generic/bitops/fls.h>
    476#include <asm-generic/bitops/__fls.h>
    477
    478#else
    479
    480/*
    481 *	ffs: find first bit set. This is defined the same way as
    482 *	the libc and compiler builtin ffs routines, therefore
    483 *	differs in spirit from the above ffz (man ffs).
    484 */
    485static inline int ffs(int x)
    486{
    487	int cnt;
    488
    489	__asm__ ("bfffo %1{#0:#0},%0"
    490		: "=d" (cnt)
    491		: "dm" (x & -x));
    492	return 32 - cnt;
    493}
    494
    495static inline unsigned long __ffs(unsigned long x)
    496{
    497	return ffs(x) - 1;
    498}
    499
    500/*
    501 *	fls: find last bit set.
    502 */
    503static inline int fls(unsigned int x)
    504{
    505	int cnt;
    506
    507	__asm__ ("bfffo %1{#0,#0},%0"
    508		: "=d" (cnt)
    509		: "dm" (x));
    510	return 32 - cnt;
    511}
    512
    513static inline int __fls(int x)
    514{
    515	return fls(x) - 1;
    516}
    517
    518#endif
    519
    520/* Simple test-and-set bit locks */
    521#define test_and_set_bit_lock	test_and_set_bit
    522#define clear_bit_unlock	clear_bit
    523#define __clear_bit_unlock	clear_bit_unlock
    524
    525#include <asm-generic/bitops/ext2-atomic.h>
    526#include <asm-generic/bitops/fls64.h>
    527#include <asm-generic/bitops/sched.h>
    528#include <asm-generic/bitops/hweight.h>
    529#include <asm-generic/bitops/le.h>
    530#endif /* __KERNEL__ */
    531
    532#endif /* _M68K_BITOPS_H */