cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

barrier.h (2458B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Based on arch/arm/include/asm/barrier.h
      4 *
      5 * Copyright (C) 2012 ARM Ltd.
      6 * Copyright (C) 2013 Regents of the University of California
      7 * Copyright (C) 2017 SiFive
      8 */
      9
     10#ifndef _ASM_RISCV_BARRIER_H
     11#define _ASM_RISCV_BARRIER_H
     12
     13#ifndef __ASSEMBLY__
     14
     15#define nop()		__asm__ __volatile__ ("nop")
     16
     17#define RISCV_FENCE(p, s) \
     18	__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
     19
     20/* These barriers need to enforce ordering on both devices or memory. */
     21#define mb()		RISCV_FENCE(iorw,iorw)
     22#define rmb()		RISCV_FENCE(ir,ir)
     23#define wmb()		RISCV_FENCE(ow,ow)
     24
     25/* These barriers do not need to enforce ordering on devices, just memory. */
     26#define __smp_mb()	RISCV_FENCE(rw,rw)
     27#define __smp_rmb()	RISCV_FENCE(r,r)
     28#define __smp_wmb()	RISCV_FENCE(w,w)
     29
     30#define __smp_store_release(p, v)					\
     31do {									\
     32	compiletime_assert_atomic_type(*p);				\
     33	RISCV_FENCE(rw,w);						\
     34	WRITE_ONCE(*p, v);						\
     35} while (0)
     36
     37#define __smp_load_acquire(p)						\
     38({									\
     39	typeof(*p) ___p1 = READ_ONCE(*p);				\
     40	compiletime_assert_atomic_type(*p);				\
     41	RISCV_FENCE(r,rw);						\
     42	___p1;								\
     43})
     44
     45/*
     46 * This is a very specific barrier: it's currently only used in two places in
     47 * the kernel, both in the scheduler.  See include/linux/spinlock.h for the two
     48 * orderings it guarantees, but the "critical section is RCsc" guarantee
     49 * mandates a barrier on RISC-V.  The sequence looks like:
     50 *
     51 *    lr.aq lock
     52 *    sc    lock <= LOCKED
     53 *    smp_mb__after_spinlock()
     54 *    // critical section
     55 *    lr    lock
     56 *    sc.rl lock <= UNLOCKED
     57 *
     58 * The AQ/RL pair provides a RCpc critical section, but there's not really any
     59 * way we can take advantage of that here because the ordering is only enforced
     60 * on that one lock.  Thus, we're just doing a full fence.
     61 *
     62 * Since we allow writeX to be called from preemptive regions we need at least
     63 * an "o" in the predecessor set to ensure device writes are visible before the
     64 * task is marked as available for scheduling on a new hart.  While I don't see
     65 * any concrete reason we need a full IO fence, it seems safer to just upgrade
     66 * this in order to avoid any IO crossing a scheduling boundary.  In both
     67 * instances the scheduler pairs this with an mb(), so nothing is necessary on
     68 * the new hart.
     69 */
     70#define smp_mb__after_spinlock()	RISCV_FENCE(iorw,iorw)
     71
     72#include <asm-generic/barrier.h>
     73
     74#endif /* __ASSEMBLY__ */
     75
     76#endif /* _ASM_RISCV_BARRIER_H */