cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rwonce.h (2932B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Prevent the compiler from merging or refetching reads or writes. The
      4 * compiler is also forbidden from reordering successive instances of
      5 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
      6 * particular ordering. One way to make the compiler aware of ordering is to
      7 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
      8 * statements.
      9 *
     10 * These two macros will also work on aggregate data types like structs or
     11 * unions.
     12 *
     13 * Their two major use cases are: (1) Mediating communication between
     14 * process-level code and irq/NMI handlers, all running on the same CPU,
     15 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
     16 * mutilate accesses that either do not require ordering or that interact
     17 * with an explicit memory barrier or atomic instruction that provides the
     18 * required ordering.
     19 */
     20#ifndef __ASM_GENERIC_RWONCE_H
     21#define __ASM_GENERIC_RWONCE_H
     22
     23#ifndef __ASSEMBLY__
     24
     25#include <linux/compiler_types.h>
     26#include <linux/kasan-checks.h>
     27#include <linux/kcsan-checks.h>
     28
     29/*
     30 * Yes, this permits 64-bit accesses on 32-bit architectures. These will
     31 * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
     32 * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
     33 * (e.g. a virtual address) and a strong prevailing wind.
     34 */
     35#define compiletime_assert_rwonce_type(t)					\
     36	compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long),	\
     37		"Unsupported access size for {READ,WRITE}_ONCE().")
     38
     39/*
     40 * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
     41 * atomicity. Note that this may result in tears!
     42 */
     43#ifndef __READ_ONCE
     44#define __READ_ONCE(x)	(*(const volatile __unqual_scalar_typeof(x) *)&(x))
     45#endif
     46
     47#define READ_ONCE(x)							\
     48({									\
     49	compiletime_assert_rwonce_type(x);				\
     50	__READ_ONCE(x);							\
     51})
     52
     53#define __WRITE_ONCE(x, val)						\
     54do {									\
     55	*(volatile typeof(x) *)&(x) = (val);				\
     56} while (0)
     57
     58#define WRITE_ONCE(x, val)						\
     59do {									\
     60	compiletime_assert_rwonce_type(x);				\
     61	__WRITE_ONCE(x, val);						\
     62} while (0)
     63
     64static __no_sanitize_or_inline
     65unsigned long __read_once_word_nocheck(const void *addr)
     66{
     67	return __READ_ONCE(*(unsigned long *)addr);
     68}
     69
     70/*
     71 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
     72 * word from memory atomically but without telling KASAN/KCSAN. This is
     73 * usually used by unwinding code when walking the stack of a running process.
     74 */
     75#define READ_ONCE_NOCHECK(x)						\
     76({									\
     77	compiletime_assert(sizeof(x) == sizeof(unsigned long),		\
     78		"Unsupported access size for READ_ONCE_NOCHECK().");	\
     79	(typeof(x))__read_once_word_nocheck(&(x));			\
     80})
     81
     82static __no_kasan_or_inline
     83unsigned long read_word_at_a_time(const void *addr)
     84{
     85	kasan_check_read(addr, 1);
     86	return *(unsigned long *)addr;
     87}
     88
     89#endif /* __ASSEMBLY__ */
     90#endif	/* __ASM_GENERIC_RWONCE_H */