cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qspinlock.h (3150B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_QSPINLOCK_H
      3#define _ASM_X86_QSPINLOCK_H
      4
      5#include <linux/jump_label.h>
      6#include <asm/cpufeature.h>
      7#include <asm-generic/qspinlock_types.h>
      8#include <asm/paravirt.h>
      9#include <asm/rmwcc.h>
     10
     11#define _Q_PENDING_LOOPS	(1 << 9)
     12
     13#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
     14static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
     15{
     16	u32 val;
     17
     18	/*
     19	 * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
     20	 * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
     21	 * statement expression, which GCC doesn't like.
     22	 */
     23	val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
     24			       "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
     25	val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
     26
     27	return val;
     28}
     29
     30#ifdef CONFIG_PARAVIRT_SPINLOCKS
     31extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
     32extern void __pv_init_lock_hash(void);
     33extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
     34extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
     35extern bool nopvspin;
     36
     37#define	queued_spin_unlock queued_spin_unlock
     38/**
     39 * queued_spin_unlock - release a queued spinlock
     40 * @lock : Pointer to queued spinlock structure
     41 *
     42 * A smp_store_release() on the least-significant byte.
     43 */
     44static inline void native_queued_spin_unlock(struct qspinlock *lock)
     45{
     46	smp_store_release(&lock->locked, 0);
     47}
     48
     49static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
     50{
     51	pv_queued_spin_lock_slowpath(lock, val);
     52}
     53
     54static inline void queued_spin_unlock(struct qspinlock *lock)
     55{
     56	kcsan_release();
     57	pv_queued_spin_unlock(lock);
     58}
     59
     60#define vcpu_is_preempted vcpu_is_preempted
     61static inline bool vcpu_is_preempted(long cpu)
     62{
     63	return pv_vcpu_is_preempted(cpu);
     64}
     65#endif
     66
     67#ifdef CONFIG_PARAVIRT
     68/*
     69 * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
     70 *
     71 * Native (and PV wanting native due to vCPU pinning) should disable this key.
     72 * It is done in this backwards fashion to only have a single direction change,
     73 * which removes ordering between native_pv_spin_init() and HV setup.
     74 */
     75DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
     76
     77void native_pv_lock_init(void) __init;
     78
     79/*
     80 * Shortcut for the queued_spin_lock_slowpath() function that allows
     81 * virt to hijack it.
     82 *
     83 * Returns:
     84 *   true - lock has been negotiated, all done;
     85 *   false - queued_spin_lock_slowpath() will do its thing.
     86 */
     87#define virt_spin_lock virt_spin_lock
     88static inline bool virt_spin_lock(struct qspinlock *lock)
     89{
     90	if (!static_branch_likely(&virt_spin_lock_key))
     91		return false;
     92
     93	/*
     94	 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
     95	 * back to a Test-and-Set spinlock, because fair locks have
     96	 * horrible lock 'holder' preemption issues.
     97	 */
     98
     99	do {
    100		while (atomic_read(&lock->val) != 0)
    101			cpu_relax();
    102	} while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
    103
    104	return true;
    105}
    106#else
    107static inline void native_pv_lock_init(void)
    108{
    109}
    110#endif /* CONFIG_PARAVIRT */
    111
    112#include <asm-generic/qspinlock.h>
    113
    114#endif /* _ASM_X86_QSPINLOCK_H */