cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cmpxchg.h (4277B)


      1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
      2#ifndef _UAPI_ASM_IA64_CMPXCHG_H
      3#define _UAPI_ASM_IA64_CMPXCHG_H
      4
      5/*
      6 * Compare/Exchange, forked from asm/intrinsics.h
      7 * which was:
      8 *
      9 *	Copyright (C) 2002-2003 Hewlett-Packard Co
     10 *	David Mosberger-Tang <davidm@hpl.hp.com>
     11 */
     12
     13#ifndef __ASSEMBLY__
     14
     15#include <linux/types.h>
     16/* include compiler specific intrinsics */
     17#include <asm/ia64regs.h>
     18#ifdef __INTEL_COMPILER
     19# include <asm/intel_intrin.h>
     20#else
     21# include <asm/gcc_intrin.h>
     22#endif
     23
     24/*
     25 * This function doesn't exist, so you'll get a linker error if
     26 * something tries to do an invalid xchg().
     27 */
     28extern void ia64_xchg_called_with_bad_pointer(void);
     29
     30#define __xchg(x, ptr, size)						\
     31({									\
     32	unsigned long __xchg_result;					\
     33									\
     34	switch (size) {							\
     35	case 1:								\
     36		__xchg_result = ia64_xchg1((__u8 *)ptr, x);		\
     37		break;							\
     38									\
     39	case 2:								\
     40		__xchg_result = ia64_xchg2((__u16 *)ptr, x);		\
     41		break;							\
     42									\
     43	case 4:								\
     44		__xchg_result = ia64_xchg4((__u32 *)ptr, x);		\
     45		break;							\
     46									\
     47	case 8:								\
     48		__xchg_result = ia64_xchg8((__u64 *)ptr, x);		\
     49		break;							\
     50	default:							\
     51		ia64_xchg_called_with_bad_pointer();			\
     52	}								\
     53	__xchg_result;							\
     54})
     55
     56#ifndef __KERNEL__
     57#define xchg(ptr, x)							\
     58({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
     59#endif
     60
     61/*
     62 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
     63 * store NEW in MEM.  Return the initial value in MEM.  Success is
     64 * indicated by comparing RETURN with OLD.
     65 */
     66
     67/*
     68 * This function doesn't exist, so you'll get a linker error
     69 * if something tries to do an invalid cmpxchg().
     70 */
     71extern long ia64_cmpxchg_called_with_bad_pointer(void);
     72
     73#define ia64_cmpxchg(sem, ptr, old, new, size)				\
     74({									\
     75	__u64 _o_, _r_;							\
     76									\
     77	switch (size) {							\
     78	case 1:								\
     79		_o_ = (__u8) (long) (old);				\
     80		break;							\
     81	case 2:								\
     82		_o_ = (__u16) (long) (old);				\
     83		break;							\
     84	case 4:								\
     85		_o_ = (__u32) (long) (old);				\
     86		break;							\
     87	case 8:								\
     88		_o_ = (__u64) (long) (old);				\
     89		break;							\
     90	default:							\
     91		break;							\
     92	}								\
     93	switch (size) {							\
     94	case 1:								\
     95		_r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_);	\
     96		break;							\
     97									\
     98	case 2:								\
     99		_r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_);	\
    100		break;							\
    101									\
    102	case 4:								\
    103		_r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_);	\
    104		break;							\
    105									\
    106	case 8:								\
    107		_r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_);	\
    108		break;							\
    109									\
    110	default:							\
    111		_r_ = ia64_cmpxchg_called_with_bad_pointer();		\
    112		break;							\
    113	}								\
    114	(__typeof__(old)) _r_;						\
    115})
    116
    117#define cmpxchg_acq(ptr, o, n)	\
    118	ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
    119#define cmpxchg_rel(ptr, o, n)	\
    120	ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
    121
    122/*
    123 * Worse still - early processor implementations actually just ignored
    124 * the acquire/release and did a full fence all the time.  Unfortunately
    125 * this meant a lot of badly written code that used .acq when they really
    126 * wanted .rel became legacy out in the wild - so when we made a cpu
    127 * that strictly did the .acq or .rel ... all that code started breaking - so
    128 * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
    129 */
    130
    131#ifndef __KERNEL__
    132/* for compatibility with other platforms: */
    133#define cmpxchg(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
    134#define cmpxchg64(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
    135
    136#define cmpxchg_local		cmpxchg
    137#define cmpxchg64_local		cmpxchg64
    138#endif
    139
    140#ifdef CONFIG_IA64_DEBUG_CMPXCHG
    141# define CMPXCHG_BUGCHECK_DECL	int _cmpxchg_bugcheck_count = 128;
    142# define CMPXCHG_BUGCHECK(v)						\
    143do {									\
    144	if (_cmpxchg_bugcheck_count-- <= 0) {				\
    145		void *ip;						\
    146		extern int _printk(const char *fmt, ...);		\
    147		ip = (void *) ia64_getreg(_IA64_REG_IP);		\
    148		_printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\
    149		break;							\
    150	}								\
    151} while (0)
    152#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
    153# define CMPXCHG_BUGCHECK_DECL
    154# define CMPXCHG_BUGCHECK(v)
    155#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
    156
    157#endif /* !__ASSEMBLY__ */
    158
    159#endif /* _UAPI_ASM_IA64_CMPXCHG_H */