cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

linux-kernel.def (4705B)


      1// SPDX-License-Identifier: GPL-2.0+
      2//
      3// An earlier version of this file appeared in the companion webpage for
      4// "Frightening small children and disconcerting grown-ups: Concurrency
      5// in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
      6// which appeared in ASPLOS 2018.
      7
      8// ONCE
      9READ_ONCE(X) __load{once}(X)
     10WRITE_ONCE(X,V) { __store{once}(X,V); }
     11
     12// Release Acquire and friends
     13smp_store_release(X,V) { __store{release}(*X,V); }
     14smp_load_acquire(X) __load{acquire}(*X)
     15rcu_assign_pointer(X,V) { __store{release}(X,V); }
     16rcu_dereference(X) __load{once}(X)
     17smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
     18
     19// Fences
     20smp_mb() { __fence{mb}; }
     21smp_rmb() { __fence{rmb}; }
     22smp_wmb() { __fence{wmb}; }
     23smp_mb__before_atomic() { __fence{before-atomic}; }
     24smp_mb__after_atomic() { __fence{after-atomic}; }
     25smp_mb__after_spinlock() { __fence{after-spinlock}; }
     26smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
     27barrier() { __fence{barrier}; }
     28
     29// Exchange
     30xchg(X,V)  __xchg{mb}(X,V)
     31xchg_relaxed(X,V) __xchg{once}(X,V)
     32xchg_release(X,V) __xchg{release}(X,V)
     33xchg_acquire(X,V) __xchg{acquire}(X,V)
     34cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
     35cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
     36cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
     37cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
     38
     39// Spinlocks
     40spin_lock(X) { __lock(X); }
     41spin_unlock(X) { __unlock(X); }
     42spin_trylock(X) __trylock(X)
     43spin_is_locked(X) __islocked(X)
     44
     45// RCU
     46rcu_read_lock() { __fence{rcu-lock}; }
     47rcu_read_unlock() { __fence{rcu-unlock}; }
     48synchronize_rcu() { __fence{sync-rcu}; }
     49synchronize_rcu_expedited() { __fence{sync-rcu}; }
     50
     51// SRCU
     52srcu_read_lock(X)  __srcu{srcu-lock}(X)
     53srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); }
     54synchronize_srcu(X)  { __srcu{sync-srcu}(X); }
     55synchronize_srcu_expedited(X)  { __srcu{sync-srcu}(X); }
     56
     57// Atomic
     58atomic_read(X) READ_ONCE(*X)
     59atomic_set(X,V) { WRITE_ONCE(*X,V); }
     60atomic_read_acquire(X) smp_load_acquire(X)
     61atomic_set_release(X,V) { smp_store_release(X,V); }
     62
     63atomic_add(V,X) { __atomic_op(X,+,V); }
     64atomic_sub(V,X) { __atomic_op(X,-,V); }
     65atomic_inc(X)   { __atomic_op(X,+,1); }
     66atomic_dec(X)   { __atomic_op(X,-,1); }
     67
     68atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
     69atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
     70atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
     71atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
     72atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
     73atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
     74atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
     75atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
     76
     77atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
     78atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
     79atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
     80atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
     81atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
     82atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
     83atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
     84atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
     85
     86atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
     87atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
     88atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
     89atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
     90atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
     91atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
     92atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
     93atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
     94
     95atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
     96atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
     97atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
     98atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
     99atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
    100atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
    101atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
    102atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
    103
    104atomic_xchg(X,V) __xchg{mb}(X,V)
    105atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
    106atomic_xchg_release(X,V) __xchg{release}(X,V)
    107atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
    108atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
    109atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
    110atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
    111atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
    112
    113atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
    114atomic_dec_and_test(X)  __atomic_op_return{mb}(X,-,1) == 0
    115atomic_inc_and_test(X)  __atomic_op_return{mb}(X,+,1) == 0
    116atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0