cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spinlock.h (14014B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __LINUX_SPINLOCK_H
      3#define __LINUX_SPINLOCK_H
      4
      5/*
      6 * include/linux/spinlock.h - generic spinlock/rwlock declarations
      7 *
      8 * here's the role of the various spinlock/rwlock related include files:
      9 *
     10 * on SMP builds:
     11 *
     12 *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
     13 *                        initializers
     14 *
     15 *  linux/spinlock_types_raw:
     16 *			  The raw types and initializers
     17 *  linux/spinlock_types.h:
     18 *                        defines the generic type and initializers
     19 *
     20 *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
     21 *                        implementations, mostly inline assembly code
     22 *
     23 *   (also included on UP-debug builds:)
     24 *
     25 *  linux/spinlock_api_smp.h:
     26 *                        contains the prototypes for the _spin_*() APIs.
     27 *
     28 *  linux/spinlock.h:     builds the final spin_*() APIs.
     29 *
     30 * on UP builds:
     31 *
     32 *  linux/spinlock_type_up.h:
     33 *                        contains the generic, simplified UP spinlock type.
     34 *                        (which is an empty structure on non-debug builds)
     35 *
     36 *  linux/spinlock_types_raw:
     37 *			  The raw RT types and initializers
     38 *  linux/spinlock_types.h:
     39 *                        defines the generic type and initializers
     40 *
     41 *  linux/spinlock_up.h:
     42 *                        contains the arch_spin_*()/etc. version of UP
     43 *                        builds. (which are NOPs on non-debug, non-preempt
     44 *                        builds)
     45 *
     46 *   (included on UP-non-debug builds:)
     47 *
     48 *  linux/spinlock_api_up.h:
     49 *                        builds the _spin_*() APIs.
     50 *
     51 *  linux/spinlock.h:     builds the final spin_*() APIs.
     52 */
     53
     54#include <linux/typecheck.h>
     55#include <linux/preempt.h>
     56#include <linux/linkage.h>
     57#include <linux/compiler.h>
     58#include <linux/irqflags.h>
     59#include <linux/thread_info.h>
     60#include <linux/stringify.h>
     61#include <linux/bottom_half.h>
     62#include <linux/lockdep.h>
     63#include <asm/barrier.h>
     64#include <asm/mmiowb.h>
     65
     66
     67/*
     68 * Must define these before including other files, inline functions need them
     69 */
     70#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
     71
     72#define LOCK_SECTION_START(extra)               \
     73        ".subsection 1\n\t"                     \
     74        extra                                   \
     75        ".ifndef " LOCK_SECTION_NAME "\n\t"     \
     76        LOCK_SECTION_NAME ":\n\t"               \
     77        ".endif\n"
     78
     79#define LOCK_SECTION_END                        \
     80        ".previous\n\t"
     81
     82#define __lockfunc __section(".spinlock.text")
     83
     84/*
     85 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
     86 */
     87#include <linux/spinlock_types.h>
     88
     89/*
     90 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
     91 */
     92#ifdef CONFIG_SMP
     93# include <asm/spinlock.h>
     94#else
     95# include <linux/spinlock_up.h>
     96#endif
     97
     98#ifdef CONFIG_DEBUG_SPINLOCK
     99  extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
    100				   struct lock_class_key *key, short inner);
    101
    102# define raw_spin_lock_init(lock)					\
    103do {									\
    104	static struct lock_class_key __key;				\
    105									\
    106	__raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN);	\
    107} while (0)
    108
    109#else
    110# define raw_spin_lock_init(lock)				\
    111	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
    112#endif
    113
    114#define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
    115
    116#ifdef arch_spin_is_contended
    117#define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
    118#else
    119#define raw_spin_is_contended(lock)	(((void)(lock), 0))
    120#endif /*arch_spin_is_contended*/
    121
    122/*
    123 * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
    124 * between program-order earlier lock acquisitions and program-order later
    125 * memory accesses.
    126 *
    127 * This guarantees that the following two properties hold:
    128 *
    129 *   1) Given the snippet:
    130 *
    131 *	  { X = 0;  Y = 0; }
    132 *
    133 *	  CPU0				CPU1
    134 *
    135 *	  WRITE_ONCE(X, 1);		WRITE_ONCE(Y, 1);
    136 *	  spin_lock(S);			smp_mb();
    137 *	  smp_mb__after_spinlock();	r1 = READ_ONCE(X);
    138 *	  r0 = READ_ONCE(Y);
    139 *	  spin_unlock(S);
    140 *
    141 *      it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
    142 *      and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
    143 *      preceding the call to smp_mb__after_spinlock() in __schedule() and in
    144 *      try_to_wake_up().
    145 *
    146 *   2) Given the snippet:
    147 *
    148 *  { X = 0;  Y = 0; }
    149 *
    150 *  CPU0		CPU1				CPU2
    151 *
    152 *  spin_lock(S);	spin_lock(S);			r1 = READ_ONCE(Y);
    153 *  WRITE_ONCE(X, 1);	smp_mb__after_spinlock();	smp_rmb();
    154 *  spin_unlock(S);	r0 = READ_ONCE(X);		r2 = READ_ONCE(X);
    155 *			WRITE_ONCE(Y, 1);
    156 *			spin_unlock(S);
    157 *
    158 *      it is forbidden that CPU0's critical section executes before CPU1's
    159 *      critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
    160 *      and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
    161 *      preceding the calls to smp_rmb() in try_to_wake_up() for similar
    162 *      snippets but "projected" onto two CPUs.
    163 *
    164 * Property (2) upgrades the lock to an RCsc lock.
    165 *
    166 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
    167 * the LL/SC loop, they need no further barriers. Similarly all our TSO
    168 * architectures imply an smp_mb() for each atomic instruction and equally don't
    169 * need more.
    170 *
    171 * Architectures that can implement ACQUIRE better need to take care.
    172 */
    173#ifndef smp_mb__after_spinlock
    174#define smp_mb__after_spinlock()	kcsan_mb()
    175#endif
    176
    177#ifdef CONFIG_DEBUG_SPINLOCK
    178 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
    179 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
    180 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
    181#else
    182static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
    183{
    184	__acquire(lock);
    185	arch_spin_lock(&lock->raw_lock);
    186	mmiowb_spin_lock();
    187}
    188
    189static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
    190{
    191	int ret = arch_spin_trylock(&(lock)->raw_lock);
    192
    193	if (ret)
    194		mmiowb_spin_lock();
    195
    196	return ret;
    197}
    198
    199static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
    200{
    201	mmiowb_spin_unlock();
    202	arch_spin_unlock(&lock->raw_lock);
    203	__release(lock);
    204}
    205#endif
    206
    207/*
    208 * Define the various spin_lock methods.  Note we define these
    209 * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
    210 * various methods are defined as nops in the case they are not
    211 * required.
    212 */
    213#define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
    214
    215#define raw_spin_lock(lock)	_raw_spin_lock(lock)
    216
    217#ifdef CONFIG_DEBUG_LOCK_ALLOC
    218# define raw_spin_lock_nested(lock, subclass) \
    219	_raw_spin_lock_nested(lock, subclass)
    220
    221# define raw_spin_lock_nest_lock(lock, nest_lock)			\
    222	 do {								\
    223		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
    224		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
    225	 } while (0)
    226#else
    227/*
    228 * Always evaluate the 'subclass' argument to avoid that the compiler
    229 * warns about set-but-not-used variables when building with
    230 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
    231 */
    232# define raw_spin_lock_nested(lock, subclass)		\
    233	_raw_spin_lock(((void)(subclass), (lock)))
    234# define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
    235#endif
    236
    237#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
    238
    239#define raw_spin_lock_irqsave(lock, flags)			\
    240	do {						\
    241		typecheck(unsigned long, flags);	\
    242		flags = _raw_spin_lock_irqsave(lock);	\
    243	} while (0)
    244
    245#ifdef CONFIG_DEBUG_LOCK_ALLOC
    246#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
    247	do {								\
    248		typecheck(unsigned long, flags);			\
    249		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
    250	} while (0)
    251#else
    252#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
    253	do {								\
    254		typecheck(unsigned long, flags);			\
    255		flags = _raw_spin_lock_irqsave(lock);			\
    256	} while (0)
    257#endif
    258
    259#else
    260
    261#define raw_spin_lock_irqsave(lock, flags)		\
    262	do {						\
    263		typecheck(unsigned long, flags);	\
    264		_raw_spin_lock_irqsave(lock, flags);	\
    265	} while (0)
    266
    267#define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
    268	raw_spin_lock_irqsave(lock, flags)
    269
    270#endif
    271
    272#define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
    273#define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
    274#define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
    275#define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
    276
    277#define raw_spin_unlock_irqrestore(lock, flags)		\
    278	do {							\
    279		typecheck(unsigned long, flags);		\
    280		_raw_spin_unlock_irqrestore(lock, flags);	\
    281	} while (0)
    282#define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
    283
    284#define raw_spin_trylock_bh(lock) \
    285	__cond_lock(lock, _raw_spin_trylock_bh(lock))
    286
    287#define raw_spin_trylock_irq(lock) \
    288({ \
    289	local_irq_disable(); \
    290	raw_spin_trylock(lock) ? \
    291	1 : ({ local_irq_enable(); 0;  }); \
    292})
    293
    294#define raw_spin_trylock_irqsave(lock, flags) \
    295({ \
    296	local_irq_save(flags); \
    297	raw_spin_trylock(lock) ? \
    298	1 : ({ local_irq_restore(flags); 0; }); \
    299})
    300
    301#ifndef CONFIG_PREEMPT_RT
    302/* Include rwlock functions for !RT */
    303#include <linux/rwlock.h>
    304#endif
    305
    306/*
    307 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
    308 */
    309#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
    310# include <linux/spinlock_api_smp.h>
    311#else
    312# include <linux/spinlock_api_up.h>
    313#endif
    314
    315/* Non PREEMPT_RT kernel, map to raw spinlocks: */
    316#ifndef CONFIG_PREEMPT_RT
    317
    318/*
    319 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
    320 */
    321
    322static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
    323{
    324	return &lock->rlock;
    325}
    326
    327#ifdef CONFIG_DEBUG_SPINLOCK
    328
    329# define spin_lock_init(lock)					\
    330do {								\
    331	static struct lock_class_key __key;			\
    332								\
    333	__raw_spin_lock_init(spinlock_check(lock),		\
    334			     #lock, &__key, LD_WAIT_CONFIG);	\
    335} while (0)
    336
    337#else
    338
    339# define spin_lock_init(_lock)			\
    340do {						\
    341	spinlock_check(_lock);			\
    342	*(_lock) = __SPIN_LOCK_UNLOCKED(_lock);	\
    343} while (0)
    344
    345#endif
    346
    347static __always_inline void spin_lock(spinlock_t *lock)
    348{
    349	raw_spin_lock(&lock->rlock);
    350}
    351
    352static __always_inline void spin_lock_bh(spinlock_t *lock)
    353{
    354	raw_spin_lock_bh(&lock->rlock);
    355}
    356
    357static __always_inline int spin_trylock(spinlock_t *lock)
    358{
    359	return raw_spin_trylock(&lock->rlock);
    360}
    361
    362#define spin_lock_nested(lock, subclass)			\
    363do {								\
    364	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
    365} while (0)
    366
    367#define spin_lock_nest_lock(lock, nest_lock)				\
    368do {									\
    369	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
    370} while (0)
    371
    372static __always_inline void spin_lock_irq(spinlock_t *lock)
    373{
    374	raw_spin_lock_irq(&lock->rlock);
    375}
    376
    377#define spin_lock_irqsave(lock, flags)				\
    378do {								\
    379	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
    380} while (0)
    381
    382#define spin_lock_irqsave_nested(lock, flags, subclass)			\
    383do {									\
    384	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
    385} while (0)
    386
    387static __always_inline void spin_unlock(spinlock_t *lock)
    388{
    389	raw_spin_unlock(&lock->rlock);
    390}
    391
    392static __always_inline void spin_unlock_bh(spinlock_t *lock)
    393{
    394	raw_spin_unlock_bh(&lock->rlock);
    395}
    396
    397static __always_inline void spin_unlock_irq(spinlock_t *lock)
    398{
    399	raw_spin_unlock_irq(&lock->rlock);
    400}
    401
    402static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
    403{
    404	raw_spin_unlock_irqrestore(&lock->rlock, flags);
    405}
    406
    407static __always_inline int spin_trylock_bh(spinlock_t *lock)
    408{
    409	return raw_spin_trylock_bh(&lock->rlock);
    410}
    411
    412static __always_inline int spin_trylock_irq(spinlock_t *lock)
    413{
    414	return raw_spin_trylock_irq(&lock->rlock);
    415}
    416
    417#define spin_trylock_irqsave(lock, flags)			\
    418({								\
    419	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
    420})
    421
    422/**
    423 * spin_is_locked() - Check whether a spinlock is locked.
    424 * @lock: Pointer to the spinlock.
    425 *
    426 * This function is NOT required to provide any memory ordering
    427 * guarantees; it could be used for debugging purposes or, when
    428 * additional synchronization is needed, accompanied with other
    429 * constructs (memory barriers) enforcing the synchronization.
    430 *
    431 * Returns: 1 if @lock is locked, 0 otherwise.
    432 *
    433 * Note that the function only tells you that the spinlock is
    434 * seen to be locked, not that it is locked on your CPU.
    435 *
    436 * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
    437 * the return value is always 0 (see include/linux/spinlock_up.h).
    438 * Therefore you should not rely heavily on the return value.
    439 */
    440static __always_inline int spin_is_locked(spinlock_t *lock)
    441{
    442	return raw_spin_is_locked(&lock->rlock);
    443}
    444
    445static __always_inline int spin_is_contended(spinlock_t *lock)
    446{
    447	return raw_spin_is_contended(&lock->rlock);
    448}
    449
    450#define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
    451
    452#else  /* !CONFIG_PREEMPT_RT */
    453# include <linux/spinlock_rt.h>
    454#endif /* CONFIG_PREEMPT_RT */
    455
    456/*
    457 * Pull the atomic_t declaration:
    458 * (asm-mips/atomic.h needs above definitions)
    459 */
    460#include <linux/atomic.h>
    461/**
    462 * atomic_dec_and_lock - lock on reaching reference count zero
    463 * @atomic: the atomic counter
    464 * @lock: the spinlock in question
    465 *
    466 * Decrements @atomic by 1.  If the result is 0, returns true and locks
    467 * @lock.  Returns false for all other cases.
    468 */
    469extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
    470#define atomic_dec_and_lock(atomic, lock) \
    471		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
    472
    473extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
    474					unsigned long *flags);
    475#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
    476		__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
    477
    478int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
    479			     size_t max_size, unsigned int cpu_mult,
    480			     gfp_t gfp, const char *name,
    481			     struct lock_class_key *key);
    482
    483#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp)    \
    484	({								     \
    485		static struct lock_class_key key;			     \
    486		int ret;						     \
    487									     \
    488		ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size,   \
    489					       cpu_mult, gfp, #locks, &key); \
    490		ret;							     \
    491	})
    492
    493void free_bucket_spinlocks(spinlock_t *locks);
    494
    495#endif /* __LINUX_SPINLOCK_H */