cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bitops.h (5736B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _PARISC_BITOPS_H
      3#define _PARISC_BITOPS_H
      4
      5#ifndef _LINUX_BITOPS_H
      6#error only <linux/bitops.h> can be included directly
      7#endif
      8
      9#include <linux/compiler.h>
     10#include <asm/types.h>
     11#include <asm/byteorder.h>
     12#include <asm/barrier.h>
     13#include <linux/atomic.h>
     14
     15/* compiler build environment sanity checks: */
     16#if !defined(CONFIG_64BIT) && defined(__LP64__)
     17#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
     18#endif
     19#if defined(CONFIG_64BIT) && !defined(__LP64__)
     20#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
     21#endif
     22
     23/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
     24 * on use of volatile and __*_bit() (set/clear/change):
     25 *	*_bit() want use of volatile.
     26 *	__*_bit() are "relaxed" and don't use spinlock or volatile.
     27 */
     28
     29static __inline__ void set_bit(int nr, volatile unsigned long * addr)
     30{
     31	unsigned long mask = BIT_MASK(nr);
     32	unsigned long flags;
     33
     34	addr += BIT_WORD(nr);
     35	_atomic_spin_lock_irqsave(addr, flags);
     36	*addr |= mask;
     37	_atomic_spin_unlock_irqrestore(addr, flags);
     38}
     39
     40static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
     41{
     42	unsigned long mask = BIT_MASK(nr);
     43	unsigned long flags;
     44
     45	addr += BIT_WORD(nr);
     46	_atomic_spin_lock_irqsave(addr, flags);
     47	*addr &= ~mask;
     48	_atomic_spin_unlock_irqrestore(addr, flags);
     49}
     50
     51static __inline__ void change_bit(int nr, volatile unsigned long * addr)
     52{
     53	unsigned long mask = BIT_MASK(nr);
     54	unsigned long flags;
     55
     56	addr += BIT_WORD(nr);
     57	_atomic_spin_lock_irqsave(addr, flags);
     58	*addr ^= mask;
     59	_atomic_spin_unlock_irqrestore(addr, flags);
     60}
     61
     62static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
     63{
     64	unsigned long mask = BIT_MASK(nr);
     65	unsigned long old;
     66	unsigned long flags;
     67	int set;
     68
     69	addr += BIT_WORD(nr);
     70	_atomic_spin_lock_irqsave(addr, flags);
     71	old = *addr;
     72	set = (old & mask) ? 1 : 0;
     73	if (!set)
     74		*addr = old | mask;
     75	_atomic_spin_unlock_irqrestore(addr, flags);
     76
     77	return set;
     78}
     79
     80static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
     81{
     82	unsigned long mask = BIT_MASK(nr);
     83	unsigned long old;
     84	unsigned long flags;
     85	int set;
     86
     87	addr += BIT_WORD(nr);
     88	_atomic_spin_lock_irqsave(addr, flags);
     89	old = *addr;
     90	set = (old & mask) ? 1 : 0;
     91	if (set)
     92		*addr = old & ~mask;
     93	_atomic_spin_unlock_irqrestore(addr, flags);
     94
     95	return set;
     96}
     97
     98static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
     99{
    100	unsigned long mask = BIT_MASK(nr);
    101	unsigned long oldbit;
    102	unsigned long flags;
    103
    104	addr += BIT_WORD(nr);
    105	_atomic_spin_lock_irqsave(addr, flags);
    106	oldbit = *addr;
    107	*addr = oldbit ^ mask;
    108	_atomic_spin_unlock_irqrestore(addr, flags);
    109
    110	return (oldbit & mask) ? 1 : 0;
    111}
    112
    113#include <asm-generic/bitops/non-atomic.h>
    114
    115/**
    116 * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
    117 * @word: The word to search
    118 *
    119 * __ffs() return is undefined if no bit is set.
    120 *
    121 * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
    122 * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
    123 * (with help from willy/jejb to get the semantics right)
    124 *
    125 * This algorithm avoids branches by making use of nullification.
    126 * One side effect of "extr" instructions is it sets PSW[N] bit.
    127 * How PSW[N] (nullify next insn) gets set is determined by the 
    128 * "condition" field (eg "<>" or "TR" below) in the extr* insn.
    129 * Only the 1st and one of either the 2cd or 3rd insn will get executed.
    130 * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
    131 * cycles for each mispredicted branch.
    132 */
    133
    134static __inline__ unsigned long __ffs(unsigned long x)
    135{
    136	unsigned long ret;
    137
    138	__asm__(
    139#ifdef CONFIG_64BIT
    140		" ldi       63,%1\n"
    141		" extrd,u,*<>  %0,63,32,%%r0\n"
    142		" extrd,u,*TR  %0,31,32,%0\n"	/* move top 32-bits down */
    143		" addi    -32,%1,%1\n"
    144#else
    145		" ldi       31,%1\n"
    146#endif
    147		" extru,<>  %0,31,16,%%r0\n"
    148		" extru,TR  %0,15,16,%0\n"	/* xxxx0000 -> 0000xxxx */
    149		" addi    -16,%1,%1\n"
    150		" extru,<>  %0,31,8,%%r0\n"
    151		" extru,TR  %0,23,8,%0\n"	/* 0000xx00 -> 000000xx */
    152		" addi    -8,%1,%1\n"
    153		" extru,<>  %0,31,4,%%r0\n"
    154		" extru,TR  %0,27,4,%0\n"	/* 000000x0 -> 0000000x */
    155		" addi    -4,%1,%1\n"
    156		" extru,<>  %0,31,2,%%r0\n"
    157		" extru,TR  %0,29,2,%0\n"	/* 0000000y, 1100b -> 0011b */
    158		" addi    -2,%1,%1\n"
    159		" extru,=  %0,31,1,%%r0\n"	/* check last bit */
    160		" addi    -1,%1,%1\n"
    161			: "+r" (x), "=r" (ret) );
    162	return ret;
    163}
    164
    165#include <asm-generic/bitops/ffz.h>
    166
    167/*
    168 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
    169 * This is defined the same way as the libc and compiler builtin
    170 * ffs routines, therefore differs in spirit from the above ffz (man ffs).
    171 */
    172static __inline__ int ffs(int x)
    173{
    174	return x ? (__ffs((unsigned long)x) + 1) : 0;
    175}
    176
    177/*
    178 * fls: find last (most significant) bit set.
    179 * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
    180 */
    181
    182static __inline__ int fls(unsigned int x)
    183{
    184	int ret;
    185	if (!x)
    186		return 0;
    187
    188	__asm__(
    189	"	ldi		1,%1\n"
    190	"	extru,<>	%0,15,16,%%r0\n"
    191	"	zdep,TR		%0,15,16,%0\n"		/* xxxx0000 */
    192	"	addi		16,%1,%1\n"
    193	"	extru,<>	%0,7,8,%%r0\n"
    194	"	zdep,TR		%0,23,24,%0\n"		/* xx000000 */
    195	"	addi		8,%1,%1\n"
    196	"	extru,<>	%0,3,4,%%r0\n"
    197	"	zdep,TR		%0,27,28,%0\n"		/* x0000000 */
    198	"	addi		4,%1,%1\n"
    199	"	extru,<>	%0,1,2,%%r0\n"
    200	"	zdep,TR		%0,29,30,%0\n"		/* y0000000 (y&3 = 0) */
    201	"	addi		2,%1,%1\n"
    202	"	extru,=		%0,0,1,%%r0\n"
    203	"	addi		1,%1,%1\n"		/* if y & 8, add 1 */
    204		: "+r" (x), "=r" (ret) );
    205
    206	return ret;
    207}
    208
    209#include <asm-generic/bitops/__fls.h>
    210#include <asm-generic/bitops/fls64.h>
    211#include <asm-generic/bitops/hweight.h>
    212#include <asm-generic/bitops/lock.h>
    213#include <asm-generic/bitops/sched.h>
    214#include <asm-generic/bitops/le.h>
    215#include <asm-generic/bitops/ext2-atomic-setbit.h>
    216
    217#endif /* _PARISC_BITOPS_H */