cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

simple_spinlock.h (6005B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H
      3#define _ASM_POWERPC_SIMPLE_SPINLOCK_H
      4
      5/*
      6 * Simple spin lock operations.
      7 *
      8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
      9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
     10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
     11 *	Rework to support virtual processors
     12 *
     13 * Type of int is used as a full 64b word is not necessary.
     14 *
     15 * (the type definitions are in asm/simple_spinlock_types.h)
     16 */
     17#include <linux/irqflags.h>
     18#include <asm/paravirt.h>
     19#include <asm/paca.h>
     20#include <asm/synch.h>
     21#include <asm/ppc-opcode.h>
     22
     23#ifdef CONFIG_PPC64
     24/* use 0x800000yy when locked, where yy == CPU number */
     25#ifdef __BIG_ENDIAN__
     26#define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
     27#else
     28#define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
     29#endif
     30#else
     31#define LOCK_TOKEN	1
     32#endif
     33
     34static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
     35{
     36	return lock.slock == 0;
     37}
     38
     39static inline int arch_spin_is_locked(arch_spinlock_t *lock)
     40{
     41	return !arch_spin_value_unlocked(READ_ONCE(*lock));
     42}
     43
     44/*
     45 * This returns the old value in the lock, so we succeeded
     46 * in getting the lock if the return value is 0.
     47 */
     48static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
     49{
     50	unsigned long tmp, token;
     51
     52	token = LOCK_TOKEN;
     53	__asm__ __volatile__(
     54"1:	lwarx		%0,0,%2,1\n\
     55	cmpwi		0,%0,0\n\
     56	bne-		2f\n\
     57	stwcx.		%1,0,%2\n\
     58	bne-		1b\n"
     59	PPC_ACQUIRE_BARRIER
     60"2:"
     61	: "=&r" (tmp)
     62	: "r" (token), "r" (&lock->slock)
     63	: "cr0", "memory");
     64
     65	return tmp;
     66}
     67
     68static inline int arch_spin_trylock(arch_spinlock_t *lock)
     69{
     70	return __arch_spin_trylock(lock) == 0;
     71}
     72
     73/*
     74 * On a system with shared processors (that is, where a physical
     75 * processor is multiplexed between several virtual processors),
     76 * there is no point spinning on a lock if the holder of the lock
     77 * isn't currently scheduled on a physical processor.  Instead
     78 * we detect this situation and ask the hypervisor to give the
     79 * rest of our timeslice to the lock holder.
     80 *
     81 * So that we can tell which virtual processor is holding a lock,
     82 * we put 0x80000000 | smp_processor_id() in the lock when it is
     83 * held.  Conveniently, we have a word in the paca that holds this
     84 * value.
     85 */
     86
     87#if defined(CONFIG_PPC_SPLPAR)
     88/* We only yield to the hypervisor if we are in shared processor mode */
     89void splpar_spin_yield(arch_spinlock_t *lock);
     90void splpar_rw_yield(arch_rwlock_t *lock);
     91#else /* SPLPAR */
     92static inline void splpar_spin_yield(arch_spinlock_t *lock) {}
     93static inline void splpar_rw_yield(arch_rwlock_t *lock) {}
     94#endif
     95
     96static inline void spin_yield(arch_spinlock_t *lock)
     97{
     98	if (is_shared_processor())
     99		splpar_spin_yield(lock);
    100	else
    101		barrier();
    102}
    103
    104static inline void rw_yield(arch_rwlock_t *lock)
    105{
    106	if (is_shared_processor())
    107		splpar_rw_yield(lock);
    108	else
    109		barrier();
    110}
    111
    112static inline void arch_spin_lock(arch_spinlock_t *lock)
    113{
    114	while (1) {
    115		if (likely(__arch_spin_trylock(lock) == 0))
    116			break;
    117		do {
    118			HMT_low();
    119			if (is_shared_processor())
    120				splpar_spin_yield(lock);
    121		} while (unlikely(lock->slock != 0));
    122		HMT_medium();
    123	}
    124}
    125
    126static inline void arch_spin_unlock(arch_spinlock_t *lock)
    127{
    128	__asm__ __volatile__("# arch_spin_unlock\n\t"
    129				PPC_RELEASE_BARRIER: : :"memory");
    130	lock->slock = 0;
    131}
    132
    133/*
    134 * Read-write spinlocks, allowing multiple readers
    135 * but only one writer.
    136 *
    137 * NOTE! it is quite common to have readers in interrupts
    138 * but no interrupt writers. For those circumstances we
    139 * can "mix" irq-safe locks - any writer needs to get a
    140 * irq-safe write-lock, but readers can get non-irqsafe
    141 * read-locks.
    142 */
    143
    144#ifdef CONFIG_PPC64
    145#define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
    146#define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
    147#else
    148#define __DO_SIGN_EXTEND
    149#define WRLOCK_TOKEN		(-1)
    150#endif
    151
    152/*
    153 * This returns the old value in the lock + 1,
    154 * so we got a read lock if the return value is > 0.
    155 */
    156static inline long __arch_read_trylock(arch_rwlock_t *rw)
    157{
    158	long tmp;
    159
    160	__asm__ __volatile__(
    161"1:	lwarx		%0,0,%1,1\n"
    162	__DO_SIGN_EXTEND
    163"	addic.		%0,%0,1\n\
    164	ble-		2f\n"
    165"	stwcx.		%0,0,%1\n\
    166	bne-		1b\n"
    167	PPC_ACQUIRE_BARRIER
    168"2:"	: "=&r" (tmp)
    169	: "r" (&rw->lock)
    170	: "cr0", "xer", "memory");
    171
    172	return tmp;
    173}
    174
    175/*
    176 * This returns the old value in the lock,
    177 * so we got the write lock if the return value is 0.
    178 */
    179static inline long __arch_write_trylock(arch_rwlock_t *rw)
    180{
    181	long tmp, token;
    182
    183	token = WRLOCK_TOKEN;
    184	__asm__ __volatile__(
    185"1:	lwarx		%0,0,%2,1\n\
    186	cmpwi		0,%0,0\n\
    187	bne-		2f\n"
    188"	stwcx.		%1,0,%2\n\
    189	bne-		1b\n"
    190	PPC_ACQUIRE_BARRIER
    191"2:"	: "=&r" (tmp)
    192	: "r" (token), "r" (&rw->lock)
    193	: "cr0", "memory");
    194
    195	return tmp;
    196}
    197
    198static inline void arch_read_lock(arch_rwlock_t *rw)
    199{
    200	while (1) {
    201		if (likely(__arch_read_trylock(rw) > 0))
    202			break;
    203		do {
    204			HMT_low();
    205			if (is_shared_processor())
    206				splpar_rw_yield(rw);
    207		} while (unlikely(rw->lock < 0));
    208		HMT_medium();
    209	}
    210}
    211
    212static inline void arch_write_lock(arch_rwlock_t *rw)
    213{
    214	while (1) {
    215		if (likely(__arch_write_trylock(rw) == 0))
    216			break;
    217		do {
    218			HMT_low();
    219			if (is_shared_processor())
    220				splpar_rw_yield(rw);
    221		} while (unlikely(rw->lock != 0));
    222		HMT_medium();
    223	}
    224}
    225
    226static inline int arch_read_trylock(arch_rwlock_t *rw)
    227{
    228	return __arch_read_trylock(rw) > 0;
    229}
    230
    231static inline int arch_write_trylock(arch_rwlock_t *rw)
    232{
    233	return __arch_write_trylock(rw) == 0;
    234}
    235
    236static inline void arch_read_unlock(arch_rwlock_t *rw)
    237{
    238	long tmp;
    239
    240	__asm__ __volatile__(
    241	"# read_unlock\n\t"
    242	PPC_RELEASE_BARRIER
    243"1:	lwarx		%0,0,%1\n\
    244	addic		%0,%0,-1\n"
    245"	stwcx.		%0,0,%1\n\
    246	bne-		1b"
    247	: "=&r"(tmp)
    248	: "r"(&rw->lock)
    249	: "cr0", "xer", "memory");
    250}
    251
    252static inline void arch_write_unlock(arch_rwlock_t *rw)
    253{
    254	__asm__ __volatile__("# write_unlock\n\t"
    255				PPC_RELEASE_BARRIER: : :"memory");
    256	rw->lock = 0;
    257}
    258
    259#define arch_spin_relax(lock)	spin_yield(lock)
    260#define arch_read_relax(lock)	rw_yield(lock)
    261#define arch_write_relax(lock)	rw_yield(lock)
    262
    263#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */