cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spinlock_up.h (2126B)


      1#ifndef __LINUX_SPINLOCK_UP_H
      2#define __LINUX_SPINLOCK_UP_H
      3
      4#ifndef __LINUX_SPINLOCK_H
      5# error "please don't include this file directly"
      6#endif
      7
      8#include <asm/processor.h>	/* for cpu_relax() */
      9#include <asm/barrier.h>
     10
     11/*
     12 * include/linux/spinlock_up.h - UP-debug version of spinlocks.
     13 *
     14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
     15 * Released under the General Public License (GPL).
     16 *
     17 * In the debug case, 1 means unlocked, 0 means locked. (the values
     18 * are inverted, to catch initialization bugs)
     19 *
     20 * No atomicity anywhere, we are on UP. However, we still need
     21 * the compiler barriers, because we do not want the compiler to
     22 * move potentially faulting instructions (notably user accesses)
     23 * into the locked sequence, resulting in non-atomic execution.
     24 */
     25
     26#ifdef CONFIG_DEBUG_SPINLOCK
     27#define arch_spin_is_locked(x)		((x)->slock == 0)
     28
     29static inline void arch_spin_lock(arch_spinlock_t *lock)
     30{
     31	lock->slock = 0;
     32	barrier();
     33}
     34
     35static inline int arch_spin_trylock(arch_spinlock_t *lock)
     36{
     37	char oldval = lock->slock;
     38
     39	lock->slock = 0;
     40	barrier();
     41
     42	return oldval > 0;
     43}
     44
     45static inline void arch_spin_unlock(arch_spinlock_t *lock)
     46{
     47	barrier();
     48	lock->slock = 1;
     49}
     50
     51/*
     52 * Read-write spinlocks. No debug version.
     53 */
     54#define arch_read_lock(lock)		do { barrier(); (void)(lock); } while (0)
     55#define arch_write_lock(lock)		do { barrier(); (void)(lock); } while (0)
     56#define arch_read_trylock(lock)	({ barrier(); (void)(lock); 1; })
     57#define arch_write_trylock(lock)	({ barrier(); (void)(lock); 1; })
     58#define arch_read_unlock(lock)		do { barrier(); (void)(lock); } while (0)
     59#define arch_write_unlock(lock)	do { barrier(); (void)(lock); } while (0)
     60
     61#else /* DEBUG_SPINLOCK */
     62#define arch_spin_is_locked(lock)	((void)(lock), 0)
     63/* for sched/core.c and kernel_lock.c: */
     64# define arch_spin_lock(lock)		do { barrier(); (void)(lock); } while (0)
     65# define arch_spin_unlock(lock)	do { barrier(); (void)(lock); } while (0)
     66# define arch_spin_trylock(lock)	({ barrier(); (void)(lock); 1; })
     67#endif /* DEBUG_SPINLOCK */
     68
     69#define arch_spin_is_contended(lock)	(((void)(lock), 0))
     70
     71#endif /* __LINUX_SPINLOCK_UP_H */