cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

barrier.h (7300B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * Generic barrier definitions.
      4 *
      5 * It should be possible to use these on really simple architectures,
      6 * but it serves more as a starting point for new ports.
      7 *
      8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
      9 * Written by David Howells (dhowells@redhat.com)
     10 */
     11#ifndef __ASM_GENERIC_BARRIER_H
     12#define __ASM_GENERIC_BARRIER_H
     13
     14#ifndef __ASSEMBLY__
     15
     16#include <linux/compiler.h>
     17#include <linux/kcsan-checks.h>
     18#include <asm/rwonce.h>
     19
     20#ifndef nop
     21#define nop()	asm volatile ("nop")
     22#endif
     23
     24/*
     25 * Architectures that want generic instrumentation can define __ prefixed
     26 * variants of all barriers.
     27 */
     28
     29#ifdef __mb
     30#define mb()	do { kcsan_mb(); __mb(); } while (0)
     31#endif
     32
     33#ifdef __rmb
     34#define rmb()	do { kcsan_rmb(); __rmb(); } while (0)
     35#endif
     36
     37#ifdef __wmb
     38#define wmb()	do { kcsan_wmb(); __wmb(); } while (0)
     39#endif
     40
     41#ifdef __dma_rmb
     42#define dma_rmb()	do { kcsan_rmb(); __dma_rmb(); } while (0)
     43#endif
     44
     45#ifdef __dma_wmb
     46#define dma_wmb()	do { kcsan_wmb(); __dma_wmb(); } while (0)
     47#endif
     48
     49/*
     50 * Force strict CPU ordering. And yes, this is required on UP too when we're
     51 * talking to devices.
     52 *
     53 * Fall back to compiler barriers if nothing better is provided.
     54 */
     55
     56#ifndef mb
     57#define mb()	barrier()
     58#endif
     59
     60#ifndef rmb
     61#define rmb()	mb()
     62#endif
     63
     64#ifndef wmb
     65#define wmb()	mb()
     66#endif
     67
     68#ifndef dma_rmb
     69#define dma_rmb()	rmb()
     70#endif
     71
     72#ifndef dma_wmb
     73#define dma_wmb()	wmb()
     74#endif
     75
     76#ifndef __smp_mb
     77#define __smp_mb()	mb()
     78#endif
     79
     80#ifndef __smp_rmb
     81#define __smp_rmb()	rmb()
     82#endif
     83
     84#ifndef __smp_wmb
     85#define __smp_wmb()	wmb()
     86#endif
     87
     88#ifdef CONFIG_SMP
     89
     90#ifndef smp_mb
     91#define smp_mb()	do { kcsan_mb(); __smp_mb(); } while (0)
     92#endif
     93
     94#ifndef smp_rmb
     95#define smp_rmb()	do { kcsan_rmb(); __smp_rmb(); } while (0)
     96#endif
     97
     98#ifndef smp_wmb
     99#define smp_wmb()	do { kcsan_wmb(); __smp_wmb(); } while (0)
    100#endif
    101
    102#else	/* !CONFIG_SMP */
    103
    104#ifndef smp_mb
    105#define smp_mb()	barrier()
    106#endif
    107
    108#ifndef smp_rmb
    109#define smp_rmb()	barrier()
    110#endif
    111
    112#ifndef smp_wmb
    113#define smp_wmb()	barrier()
    114#endif
    115
    116#endif	/* CONFIG_SMP */
    117
    118#ifndef __smp_store_mb
    119#define __smp_store_mb(var, value)  do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
    120#endif
    121
    122#ifndef __smp_mb__before_atomic
    123#define __smp_mb__before_atomic()	__smp_mb()
    124#endif
    125
    126#ifndef __smp_mb__after_atomic
    127#define __smp_mb__after_atomic()	__smp_mb()
    128#endif
    129
    130#ifndef __smp_store_release
    131#define __smp_store_release(p, v)					\
    132do {									\
    133	compiletime_assert_atomic_type(*p);				\
    134	__smp_mb();							\
    135	WRITE_ONCE(*p, v);						\
    136} while (0)
    137#endif
    138
    139#ifndef __smp_load_acquire
    140#define __smp_load_acquire(p)						\
    141({									\
    142	__unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);		\
    143	compiletime_assert_atomic_type(*p);				\
    144	__smp_mb();							\
    145	(typeof(*p))___p1;						\
    146})
    147#endif
    148
    149#ifdef CONFIG_SMP
    150
    151#ifndef smp_store_mb
    152#define smp_store_mb(var, value)  do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
    153#endif
    154
    155#ifndef smp_mb__before_atomic
    156#define smp_mb__before_atomic()	do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
    157#endif
    158
    159#ifndef smp_mb__after_atomic
    160#define smp_mb__after_atomic()	do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
    161#endif
    162
    163#ifndef smp_store_release
    164#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
    165#endif
    166
    167#ifndef smp_load_acquire
    168#define smp_load_acquire(p) __smp_load_acquire(p)
    169#endif
    170
    171#else	/* !CONFIG_SMP */
    172
    173#ifndef smp_store_mb
    174#define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); barrier(); } while (0)
    175#endif
    176
    177#ifndef smp_mb__before_atomic
    178#define smp_mb__before_atomic()	barrier()
    179#endif
    180
    181#ifndef smp_mb__after_atomic
    182#define smp_mb__after_atomic()	barrier()
    183#endif
    184
    185#ifndef smp_store_release
    186#define smp_store_release(p, v)						\
    187do {									\
    188	compiletime_assert_atomic_type(*p);				\
    189	barrier();							\
    190	WRITE_ONCE(*p, v);						\
    191} while (0)
    192#endif
    193
    194#ifndef smp_load_acquire
    195#define smp_load_acquire(p)						\
    196({									\
    197	__unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);		\
    198	compiletime_assert_atomic_type(*p);				\
    199	barrier();							\
    200	(typeof(*p))___p1;						\
    201})
    202#endif
    203
    204#endif	/* CONFIG_SMP */
    205
    206/* Barriers for virtual machine guests when talking to an SMP host */
    207#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
    208#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
    209#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
    210#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
    211#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
    212#define virt_mb__after_atomic()	do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
    213#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
    214#define virt_load_acquire(p) __smp_load_acquire(p)
    215
    216/**
    217 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
    218 *
    219 * A control dependency provides a LOAD->STORE order, the additional RMB
    220 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
    221 * aka. (load)-ACQUIRE.
    222 *
    223 * Architectures that do not do load speculation can have this be barrier().
    224 */
    225#ifndef smp_acquire__after_ctrl_dep
    226#define smp_acquire__after_ctrl_dep()		smp_rmb()
    227#endif
    228
    229/**
    230 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
    231 * @ptr: pointer to the variable to wait on
    232 * @cond: boolean expression to wait for
    233 *
    234 * Equivalent to using READ_ONCE() on the condition variable.
    235 *
    236 * Due to C lacking lambda expressions we load the value of *ptr into a
    237 * pre-named variable @VAL to be used in @cond.
    238 */
    239#ifndef smp_cond_load_relaxed
    240#define smp_cond_load_relaxed(ptr, cond_expr) ({		\
    241	typeof(ptr) __PTR = (ptr);				\
    242	__unqual_scalar_typeof(*ptr) VAL;			\
    243	for (;;) {						\
    244		VAL = READ_ONCE(*__PTR);			\
    245		if (cond_expr)					\
    246			break;					\
    247		cpu_relax();					\
    248	}							\
    249	(typeof(*ptr))VAL;					\
    250})
    251#endif
    252
    253/**
    254 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
    255 * @ptr: pointer to the variable to wait on
    256 * @cond: boolean expression to wait for
    257 *
    258 * Equivalent to using smp_load_acquire() on the condition variable but employs
    259 * the control dependency of the wait to reduce the barrier on many platforms.
    260 */
    261#ifndef smp_cond_load_acquire
    262#define smp_cond_load_acquire(ptr, cond_expr) ({		\
    263	__unqual_scalar_typeof(*ptr) _val;			\
    264	_val = smp_cond_load_relaxed(ptr, cond_expr);		\
    265	smp_acquire__after_ctrl_dep();				\
    266	(typeof(*ptr))_val;					\
    267})
    268#endif
    269
    270/*
    271 * pmem_wmb() ensures that all stores for which the modification
    272 * are written to persistent storage by preceding instructions have
    273 * updated persistent storage before any data  access or data transfer
    274 * caused by subsequent instructions is initiated.
    275 */
    276#ifndef pmem_wmb
    277#define pmem_wmb()	wmb()
    278#endif
    279
    280/*
    281 * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
    282 * this kind of memory accesses, the CPU may wait for prior accesses to be
    283 * merged with subsequent ones. In some situation, such wait is bad for the
    284 * performance. io_stop_wc() can be used to prevent the merging of
    285 * write-combining memory accesses before this macro with those after it.
    286 */
    287#ifndef io_stop_wc
    288#define io_stop_wc() do { } while (0)
    289#endif
    290
    291#endif /* !__ASSEMBLY__ */
    292#endif /* __ASM_GENERIC_BARRIER_H */