cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

compiler.h (5696B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _TOOLS_LINUX_COMPILER_H_
      3#define _TOOLS_LINUX_COMPILER_H_
      4
      5#include <linux/compiler_types.h>
      6
      7#ifndef __compiletime_error
      8# define __compiletime_error(message)
      9#endif
     10
     11#ifdef __OPTIMIZE__
     12# define __compiletime_assert(condition, msg, prefix, suffix)		\
     13	do {								\
     14		extern void prefix ## suffix(void) __compiletime_error(msg); \
     15		if (!(condition))					\
     16			prefix ## suffix();				\
     17	} while (0)
     18#else
     19# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
     20#endif
     21
     22#define _compiletime_assert(condition, msg, prefix, suffix) \
     23	__compiletime_assert(condition, msg, prefix, suffix)
     24
     25/**
     26 * compiletime_assert - break build and emit msg if condition is false
     27 * @condition: a compile-time constant condition to check
     28 * @msg:       a message to emit if condition is false
     29 *
     30 * In tradition of POSIX assert, this macro will break the build if the
     31 * supplied condition is *false*, emitting the supplied error message if the
     32 * compiler has support to do so.
     33 */
     34#define compiletime_assert(condition, msg) \
     35	_compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
     36
     37/* Optimization barrier */
     38/* The "volatile" is due to gcc bugs */
     39#define barrier() __asm__ __volatile__("": : :"memory")
     40
     41#ifndef __always_inline
     42# define __always_inline	inline __attribute__((always_inline))
     43#endif
     44
     45#ifndef noinline
     46#define noinline
     47#endif
     48
     49/* Are two types/vars the same type (ignoring qualifiers)? */
     50#ifndef __same_type
     51# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
     52#endif
     53
     54#ifdef __ANDROID__
     55/*
     56 * FIXME: Big hammer to get rid of tons of:
     57 *   "warning: always_inline function might not be inlinable"
     58 *
     59 * At least on android-ndk-r12/platforms/android-24/arch-arm
     60 */
     61#undef __always_inline
     62#define __always_inline	inline
     63#endif
     64
     65#define __user
     66#define __rcu
     67#define __read_mostly
     68
     69#ifndef __attribute_const__
     70# define __attribute_const__
     71#endif
     72
     73#ifndef __maybe_unused
     74# define __maybe_unused		__attribute__((unused))
     75#endif
     76
     77#ifndef __used
     78# define __used		__attribute__((__unused__))
     79#endif
     80
     81#ifndef __packed
     82# define __packed		__attribute__((__packed__))
     83#endif
     84
     85#ifndef __force
     86# define __force
     87#endif
     88
     89#ifndef __weak
     90# define __weak			__attribute__((weak))
     91#endif
     92
     93#ifndef likely
     94# define likely(x)		__builtin_expect(!!(x), 1)
     95#endif
     96
     97#ifndef unlikely
     98# define unlikely(x)		__builtin_expect(!!(x), 0)
     99#endif
    100
    101#ifndef __init
    102# define __init
    103#endif
    104
    105#include <linux/types.h>
    106
    107/*
    108 * Following functions are taken from kernel sources and
    109 * break aliasing rules in their original form.
    110 *
    111 * While kernel is compiled with -fno-strict-aliasing,
    112 * perf uses -Wstrict-aliasing=3 which makes build fail
    113 * under gcc 4.4.
    114 *
    115 * Using extra __may_alias__ type to allow aliasing
    116 * in this case.
    117 */
    118typedef __u8  __attribute__((__may_alias__))  __u8_alias_t;
    119typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
    120typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
    121typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
    122
    123static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
    124{
    125	switch (size) {
    126	case 1: *(__u8_alias_t  *) res = *(volatile __u8_alias_t  *) p; break;
    127	case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
    128	case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
    129	case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
    130	default:
    131		barrier();
    132		__builtin_memcpy((void *)res, (const void *)p, size);
    133		barrier();
    134	}
    135}
    136
    137static __always_inline void __write_once_size(volatile void *p, void *res, int size)
    138{
    139	switch (size) {
    140	case 1: *(volatile  __u8_alias_t *) p = *(__u8_alias_t  *) res; break;
    141	case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
    142	case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
    143	case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
    144	default:
    145		barrier();
    146		__builtin_memcpy((void *)p, (const void *)res, size);
    147		barrier();
    148	}
    149}
    150
    151/*
    152 * Prevent the compiler from merging or refetching reads or writes. The
    153 * compiler is also forbidden from reordering successive instances of
    154 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
    155 * particular ordering. One way to make the compiler aware of ordering is to
    156 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
    157 * statements.
    158 *
    159 * These two macros will also work on aggregate data types like structs or
    160 * unions. If the size of the accessed data type exceeds the word size of
    161 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
    162 * fall back to memcpy and print a compile-time warning.
    163 *
    164 * Their two major use cases are: (1) Mediating communication between
    165 * process-level code and irq/NMI handlers, all running on the same CPU,
    166 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
    167 * mutilate accesses that either do not require ordering or that interact
    168 * with an explicit memory barrier or atomic instruction that provides the
    169 * required ordering.
    170 */
    171
    172#define READ_ONCE(x)					\
    173({							\
    174	union { typeof(x) __val; char __c[1]; } __u =	\
    175		{ .__c = { 0 } };			\
    176	__read_once_size(&(x), __u.__c, sizeof(x));	\
    177	__u.__val;					\
    178})
    179
    180#define WRITE_ONCE(x, val)				\
    181({							\
    182	union { typeof(x) __val; char __c[1]; } __u =	\
    183		{ .__val = (val) }; 			\
    184	__write_once_size(&(x), __u.__c, sizeof(x));	\
    185	__u.__val;					\
    186})
    187
    188
    189#ifndef __fallthrough
    190# define __fallthrough
    191#endif
    192
    193/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
    194#define ___PASTE(a, b) a##b
    195#define __PASTE(a, b) ___PASTE(a, b)
    196
    197#endif /* _TOOLS_LINUX_COMPILER_H */