cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

barrier.h (2414B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Memory barrier definitions.  This is based on information published
      4 * in the Processor Abstraction Layer and the System Abstraction Layer
      5 * manual.
      6 *
      7 * Copyright (C) 1998-2003 Hewlett-Packard Co
      8 *	David Mosberger-Tang <davidm@hpl.hp.com>
      9 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
     10 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
     11 */
     12#ifndef _ASM_IA64_BARRIER_H
     13#define _ASM_IA64_BARRIER_H
     14
     15#include <linux/compiler.h>
     16
     17/*
     18 * Macros to force memory ordering.  In these descriptions, "previous"
     19 * and "subsequent" refer to program order; "visible" means that all
     20 * architecturally visible effects of a memory access have occurred
     21 * (at a minimum, this means the memory has been read or written).
     22 *
     23 *   wmb():	Guarantees that all preceding stores to memory-
     24 *		like regions are visible before any subsequent
     25 *		stores and that all following stores will be
     26 *		visible only after all previous stores.
     27 *   rmb():	Like wmb(), but for reads.
     28 *   mb():	wmb()/rmb() combo, i.e., all previous memory
     29 *		accesses are visible before all subsequent
     30 *		accesses and vice versa.  This is also known as
     31 *		a "fence."
     32 *
     33 * Note: "mb()" and its variants cannot be used as a fence to order
     34 * accesses to memory mapped I/O registers.  For that, mf.a needs to
     35 * be used.  However, we don't want to always use mf.a because (a)
     36 * it's (presumably) much slower than mf and (b) mf.a is supported for
     37 * sequential memory pages only.
     38 */
     39#define mb()		ia64_mf()
     40#define rmb()		mb()
     41#define wmb()		mb()
     42
     43#define dma_rmb()	mb()
     44#define dma_wmb()	mb()
     45
     46# define __smp_mb()	mb()
     47
     48#define __smp_mb__before_atomic()	barrier()
     49#define __smp_mb__after_atomic()	barrier()
     50
     51/*
     52 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
     53 * need for asm trickery!
     54 */
     55
     56#define __smp_store_release(p, v)						\
     57do {									\
     58	compiletime_assert_atomic_type(*p);				\
     59	barrier();							\
     60	WRITE_ONCE(*p, v);						\
     61} while (0)
     62
     63#define __smp_load_acquire(p)						\
     64({									\
     65	typeof(*p) ___p1 = READ_ONCE(*p);				\
     66	compiletime_assert_atomic_type(*p);				\
     67	barrier();							\
     68	___p1;								\
     69})
     70
     71/*
     72 * The group barrier in front of the rsm & ssm are necessary to ensure
     73 * that none of the previous instructions in the same group are
     74 * affected by the rsm/ssm.
     75 */
     76
     77#include <asm-generic/barrier.h>
     78
     79#endif /* _ASM_IA64_BARRIER_H */