cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

compiler.h (7837B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __LINUX_COMPILER_H
      3#define __LINUX_COMPILER_H
      4
      5#include <linux/compiler_types.h>
      6
      7#ifndef __ASSEMBLY__
      8
      9#ifdef __KERNEL__
     10
     11/*
     12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
     13 * to disable branch tracing on a per file basis.
     14 */
     15#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
     16    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
     17void ftrace_likely_update(struct ftrace_likely_data *f, int val,
     18			  int expect, int is_constant);
     19
     20#define likely_notrace(x)	__builtin_expect(!!(x), 1)
     21#define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
     22
     23#define __branch_check__(x, expect, is_constant) ({			\
     24			long ______r;					\
     25			static struct ftrace_likely_data		\
     26				__aligned(4)				\
     27				__section("_ftrace_annotated_branch")	\
     28				______f = {				\
     29				.data.func = __func__,			\
     30				.data.file = __FILE__,			\
     31				.data.line = __LINE__,			\
     32			};						\
     33			______r = __builtin_expect(!!(x), expect);	\
     34			ftrace_likely_update(&______f, ______r,		\
     35					     expect, is_constant);	\
     36			______r;					\
     37		})
     38
     39/*
     40 * Using __builtin_constant_p(x) to ignore cases where the return
     41 * value is always the same.  This idea is taken from a similar patch
     42 * written by Daniel Walker.
     43 */
     44# ifndef likely
     45#  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
     46# endif
     47# ifndef unlikely
     48#  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
     49# endif
     50
     51#ifdef CONFIG_PROFILE_ALL_BRANCHES
     52/*
     53 * "Define 'is'", Bill Clinton
     54 * "Define 'if'", Steven Rostedt
     55 */
     56#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
     57
     58#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
     59
     60#define __trace_if_value(cond) ({			\
     61	static struct ftrace_branch_data		\
     62		__aligned(4)				\
     63		__section("_ftrace_branch")		\
     64		__if_trace = {				\
     65			.func = __func__,		\
     66			.file = __FILE__,		\
     67			.line = __LINE__,		\
     68		};					\
     69	(cond) ?					\
     70		(__if_trace.miss_hit[1]++,1) :		\
     71		(__if_trace.miss_hit[0]++,0);		\
     72})
     73
     74#endif /* CONFIG_PROFILE_ALL_BRANCHES */
     75
     76#else
     77# define likely(x)	__builtin_expect(!!(x), 1)
     78# define unlikely(x)	__builtin_expect(!!(x), 0)
     79# define likely_notrace(x)	likely(x)
     80# define unlikely_notrace(x)	unlikely(x)
     81#endif
     82
     83/* Optimization barrier */
     84#ifndef barrier
     85/* The "volatile" is due to gcc bugs */
     86# define barrier() __asm__ __volatile__("": : :"memory")
     87#endif
     88
     89#ifndef barrier_data
     90/*
     91 * This version is i.e. to prevent dead stores elimination on @ptr
     92 * where gcc and llvm may behave differently when otherwise using
     93 * normal barrier(): while gcc behavior gets along with a normal
     94 * barrier(), llvm needs an explicit input variable to be assumed
     95 * clobbered. The issue is as follows: while the inline asm might
     96 * access any memory it wants, the compiler could have fit all of
     97 * @ptr into memory registers instead, and since @ptr never escaped
     98 * from that, it proved that the inline asm wasn't touching any of
     99 * it. This version works well with both compilers, i.e. we're telling
    100 * the compiler that the inline asm absolutely may see the contents
    101 * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
    102 */
    103# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
    104#endif
    105
    106/* workaround for GCC PR82365 if needed */
    107#ifndef barrier_before_unreachable
    108# define barrier_before_unreachable() do { } while (0)
    109#endif
    110
    111/* Unreachable code */
    112#ifdef CONFIG_OBJTOOL
    113/*
    114 * These macros help objtool understand GCC code flow for unreachable code.
    115 * The __COUNTER__ based labels are a hack to make each instance of the macros
    116 * unique, to convince GCC not to merge duplicate inline asm statements.
    117 */
    118#define __stringify_label(n) #n
    119
    120#define __annotate_unreachable(c) ({					\
    121	asm volatile(__stringify_label(c) ":\n\t"			\
    122		     ".pushsection .discard.unreachable\n\t"		\
    123		     ".long " __stringify_label(c) "b - .\n\t"		\
    124		     ".popsection\n\t" : : "i" (c));			\
    125})
    126#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
    127
    128/* Annotate a C jump table to allow objtool to follow the code flow */
    129#define __annotate_jump_table __section(".rodata..c_jump_table")
    130
    131#else /* !CONFIG_OBJTOOL */
    132#define annotate_unreachable()
    133#define __annotate_jump_table
    134#endif /* CONFIG_OBJTOOL */
    135
    136#ifndef unreachable
    137# define unreachable() do {		\
    138	annotate_unreachable();		\
    139	__builtin_unreachable();	\
    140} while (0)
    141#endif
    142
    143/*
    144 * KENTRY - kernel entry point
    145 * This can be used to annotate symbols (functions or data) that are used
    146 * without their linker symbol being referenced explicitly. For example,
    147 * interrupt vector handlers, or functions in the kernel image that are found
    148 * programatically.
    149 *
    150 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
    151 * are handled in their own way (with KEEP() in linker scripts).
    152 *
    153 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
    154 * linker script. For example an architecture could KEEP() its entire
    155 * boot/exception vector code rather than annotate each function and data.
    156 */
    157#ifndef KENTRY
    158# define KENTRY(sym)						\
    159	extern typeof(sym) sym;					\
    160	static const unsigned long __kentry_##sym		\
    161	__used							\
    162	__attribute__((__section__("___kentry+" #sym)))		\
    163	= (unsigned long)&sym;
    164#endif
    165
    166#ifndef RELOC_HIDE
    167# define RELOC_HIDE(ptr, off)					\
    168  ({ unsigned long __ptr;					\
    169     __ptr = (unsigned long) (ptr);				\
    170    (typeof(ptr)) (__ptr + (off)); })
    171#endif
    172
    173#define absolute_pointer(val)	RELOC_HIDE((void *)(val), 0)
    174
    175#ifndef OPTIMIZER_HIDE_VAR
    176/* Make the optimizer believe the variable can be manipulated arbitrarily. */
    177#define OPTIMIZER_HIDE_VAR(var)						\
    178	__asm__ ("" : "=r" (var) : "0" (var))
    179#endif
    180
    181/* Not-quite-unique ID. */
    182#ifndef __UNIQUE_ID
    183# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
    184#endif
    185
    186/**
    187 * data_race - mark an expression as containing intentional data races
    188 *
    189 * This data_race() macro is useful for situations in which data races
    190 * should be forgiven.  One example is diagnostic code that accesses
    191 * shared variables but is not a part of the core synchronization design.
    192 *
    193 * This macro *does not* affect normal code generation, but is a hint
    194 * to tooling that data races here are to be ignored.
    195 */
    196#define data_race(expr)							\
    197({									\
    198	__unqual_scalar_typeof(({ expr; })) __v = ({			\
    199		__kcsan_disable_current();				\
    200		expr;							\
    201	});								\
    202	__kcsan_enable_current();					\
    203	__v;								\
    204})
    205
    206/*
    207 * With CONFIG_CFI_CLANG, the compiler replaces function addresses in
    208 * instrumented C code with jump table addresses. Architectures that
    209 * support CFI can define this macro to return the actual function address
    210 * when needed.
    211 */
    212#ifndef function_nocfi
    213#define function_nocfi(x) (x)
    214#endif
    215
    216#endif /* __KERNEL__ */
    217
    218/*
    219 * Force the compiler to emit 'sym' as a symbol, so that we can reference
    220 * it from inline assembler. Necessary in case 'sym' could be inlined
    221 * otherwise, or eliminated entirely due to lack of references that are
    222 * visible to the compiler.
    223 */
    224#define __ADDRESSABLE(sym) \
    225	static void * __section(".discard.addressable") __used \
    226		__UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
    227
    228/**
    229 * offset_to_ptr - convert a relative memory offset to an absolute pointer
    230 * @off:	the address of the 32-bit offset value
    231 */
    232static inline void *offset_to_ptr(const int *off)
    233{
    234	return (void *)((unsigned long)off + *off);
    235}
    236
    237#endif /* __ASSEMBLY__ */
    238
    239/* &a[0] degrades to a pointer: a different type from an array */
    240#define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
    241
    242/*
    243 * This is needed in functions which generate the stack canary, see
    244 * arch/x86/kernel/smpboot.c::start_secondary() for an example.
    245 */
    246#define prevent_tail_call_optimization()	mb()
    247
    248#include <asm/rwonce.h>
    249
    250#endif /* __LINUX_COMPILER_H */