cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spinlock.h (2868B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __ASM_SPINLOCK_H
      3#define __ASM_SPINLOCK_H
      4
      5#include <asm/barrier.h>
      6#include <asm/ldcw.h>
      7#include <asm/processor.h>
      8#include <asm/spinlock_types.h>
      9
     10static inline int arch_spin_is_locked(arch_spinlock_t *x)
     11{
     12	volatile unsigned int *a = __ldcw_align(x);
     13	return READ_ONCE(*a) == 0;
     14}
     15
     16static inline void arch_spin_lock(arch_spinlock_t *x)
     17{
     18	volatile unsigned int *a;
     19
     20	a = __ldcw_align(x);
     21	while (__ldcw(a) == 0)
     22		while (*a == 0)
     23			continue;
     24}
     25
     26static inline void arch_spin_unlock(arch_spinlock_t *x)
     27{
     28	volatile unsigned int *a;
     29
     30	a = __ldcw_align(x);
     31	/* Release with ordered store. */
     32	__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
     33}
     34
     35static inline int arch_spin_trylock(arch_spinlock_t *x)
     36{
     37	volatile unsigned int *a;
     38
     39	a = __ldcw_align(x);
     40	return __ldcw(a) != 0;
     41}
     42
     43/*
     44 * Read-write spinlocks, allowing multiple readers but only one writer.
     45 * Unfair locking as Writers could be starved indefinitely by Reader(s)
     46 *
     47 * The spinlock itself is contained in @counter and access to it is
     48 * serialized with @lock_mutex.
     49 */
     50
     51/* 1 - lock taken successfully */
     52static inline int arch_read_trylock(arch_rwlock_t *rw)
     53{
     54	int ret = 0;
     55	unsigned long flags;
     56
     57	local_irq_save(flags);
     58	arch_spin_lock(&(rw->lock_mutex));
     59
     60	/*
     61	 * zero means writer holds the lock exclusively, deny Reader.
     62	 * Otherwise grant lock to first/subseq reader
     63	 */
     64	if (rw->counter > 0) {
     65		rw->counter--;
     66		ret = 1;
     67	}
     68
     69	arch_spin_unlock(&(rw->lock_mutex));
     70	local_irq_restore(flags);
     71
     72	return ret;
     73}
     74
     75/* 1 - lock taken successfully */
     76static inline int arch_write_trylock(arch_rwlock_t *rw)
     77{
     78	int ret = 0;
     79	unsigned long flags;
     80
     81	local_irq_save(flags);
     82	arch_spin_lock(&(rw->lock_mutex));
     83
     84	/*
     85	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
     86	 * deny writer. Otherwise if unlocked grant to writer
     87	 * Hence the claim that Linux rwlocks are unfair to writers.
     88	 * (can be starved for an indefinite time by readers).
     89	 */
     90	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
     91		rw->counter = 0;
     92		ret = 1;
     93	}
     94	arch_spin_unlock(&(rw->lock_mutex));
     95	local_irq_restore(flags);
     96
     97	return ret;
     98}
     99
    100static inline void arch_read_lock(arch_rwlock_t *rw)
    101{
    102	while (!arch_read_trylock(rw))
    103		cpu_relax();
    104}
    105
    106static inline void arch_write_lock(arch_rwlock_t *rw)
    107{
    108	while (!arch_write_trylock(rw))
    109		cpu_relax();
    110}
    111
    112static inline void arch_read_unlock(arch_rwlock_t *rw)
    113{
    114	unsigned long flags;
    115
    116	local_irq_save(flags);
    117	arch_spin_lock(&(rw->lock_mutex));
    118	rw->counter++;
    119	arch_spin_unlock(&(rw->lock_mutex));
    120	local_irq_restore(flags);
    121}
    122
    123static inline void arch_write_unlock(arch_rwlock_t *rw)
    124{
    125	unsigned long flags;
    126
    127	local_irq_save(flags);
    128	arch_spin_lock(&(rw->lock_mutex));
    129	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
    130	arch_spin_unlock(&(rw->lock_mutex));
    131	local_irq_restore(flags);
    132}
    133
    134#endif /* __ASM_SPINLOCK_H */