cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spinlock.c (10214B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (2004) Linus Torvalds
      4 *
      5 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
      6 *
      7 * Copyright (2004, 2005) Ingo Molnar
      8 *
      9 * This file contains the spinlock/rwlock implementations for the
     10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
     11 *
     12 * Note that some architectures have special knowledge about the
     13 * stack frames of these functions in their profile_pc. If you
     14 * change anything significant here that could change the stack
     15 * frame contact the architecture maintainers.
     16 */
     17
     18#include <linux/linkage.h>
     19#include <linux/preempt.h>
     20#include <linux/spinlock.h>
     21#include <linux/interrupt.h>
     22#include <linux/debug_locks.h>
     23#include <linux/export.h>
     24
     25#ifdef CONFIG_MMIOWB
     26#ifndef arch_mmiowb_state
     27DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
     28EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
     29#endif
     30#endif
     31
     32/*
     33 * If lockdep is enabled then we use the non-preemption spin-ops
     34 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
     35 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
     36 */
     37#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
     38/*
     39 * The __lock_function inlines are taken from
     40 * spinlock : include/linux/spinlock_api_smp.h
     41 * rwlock   : include/linux/rwlock_api_smp.h
     42 */
     43#else
     44
     45/*
     46 * Some architectures can relax in favour of the CPU owning the lock.
     47 */
     48#ifndef arch_read_relax
     49# define arch_read_relax(l)	cpu_relax()
     50#endif
     51#ifndef arch_write_relax
     52# define arch_write_relax(l)	cpu_relax()
     53#endif
     54#ifndef arch_spin_relax
     55# define arch_spin_relax(l)	cpu_relax()
     56#endif
     57
     58/*
     59 * We build the __lock_function inlines here. They are too large for
     60 * inlining all over the place, but here is only one user per function
     61 * which embeds them into the calling _lock_function below.
     62 *
     63 * This could be a long-held lock. We both prepare to spin for a long
     64 * time (making _this_ CPU preemptible if possible), and we also signal
     65 * towards that other CPU that it should break the lock ASAP.
     66 */
     67#define BUILD_LOCK_OPS(op, locktype)					\
     68void __lockfunc __raw_##op##_lock(locktype##_t *lock)			\
     69{									\
     70	for (;;) {							\
     71		preempt_disable();					\
     72		if (likely(do_raw_##op##_trylock(lock)))		\
     73			break;						\
     74		preempt_enable();					\
     75									\
     76		arch_##op##_relax(&lock->raw_lock);			\
     77	}								\
     78}									\
     79									\
     80unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)	\
     81{									\
     82	unsigned long flags;						\
     83									\
     84	for (;;) {							\
     85		preempt_disable();					\
     86		local_irq_save(flags);					\
     87		if (likely(do_raw_##op##_trylock(lock)))		\
     88			break;						\
     89		local_irq_restore(flags);				\
     90		preempt_enable();					\
     91									\
     92		arch_##op##_relax(&lock->raw_lock);			\
     93	}								\
     94									\
     95	return flags;							\
     96}									\
     97									\
     98void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)		\
     99{									\
    100	_raw_##op##_lock_irqsave(lock);					\
    101}									\
    102									\
    103void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)		\
    104{									\
    105	unsigned long flags;						\
    106									\
    107	/*							*/	\
    108	/* Careful: we must exclude softirqs too, hence the	*/	\
    109	/* irq-disabling. We use the generic preemption-aware	*/	\
    110	/* function:						*/	\
    111	/**/								\
    112	flags = _raw_##op##_lock_irqsave(lock);				\
    113	local_bh_disable();						\
    114	local_irq_restore(flags);					\
    115}									\
    116
    117/*
    118 * Build preemption-friendly versions of the following
    119 * lock-spinning functions:
    120 *
    121 *         __[spin|read|write]_lock()
    122 *         __[spin|read|write]_lock_irq()
    123 *         __[spin|read|write]_lock_irqsave()
    124 *         __[spin|read|write]_lock_bh()
    125 */
    126BUILD_LOCK_OPS(spin, raw_spinlock);
    127
    128#ifndef CONFIG_PREEMPT_RT
    129BUILD_LOCK_OPS(read, rwlock);
    130BUILD_LOCK_OPS(write, rwlock);
    131#endif
    132
    133#endif
    134
    135#ifndef CONFIG_INLINE_SPIN_TRYLOCK
    136int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
    137{
    138	return __raw_spin_trylock(lock);
    139}
    140EXPORT_SYMBOL(_raw_spin_trylock);
    141#endif
    142
    143#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
    144int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
    145{
    146	return __raw_spin_trylock_bh(lock);
    147}
    148EXPORT_SYMBOL(_raw_spin_trylock_bh);
    149#endif
    150
    151#ifndef CONFIG_INLINE_SPIN_LOCK
    152void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
    153{
    154	__raw_spin_lock(lock);
    155}
    156EXPORT_SYMBOL(_raw_spin_lock);
    157#endif
    158
    159#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
    160unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
    161{
    162	return __raw_spin_lock_irqsave(lock);
    163}
    164EXPORT_SYMBOL(_raw_spin_lock_irqsave);
    165#endif
    166
    167#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
    168void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
    169{
    170	__raw_spin_lock_irq(lock);
    171}
    172EXPORT_SYMBOL(_raw_spin_lock_irq);
    173#endif
    174
    175#ifndef CONFIG_INLINE_SPIN_LOCK_BH
    176void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
    177{
    178	__raw_spin_lock_bh(lock);
    179}
    180EXPORT_SYMBOL(_raw_spin_lock_bh);
    181#endif
    182
    183#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
    184void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
    185{
    186	__raw_spin_unlock(lock);
    187}
    188EXPORT_SYMBOL(_raw_spin_unlock);
    189#endif
    190
    191#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
    192void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
    193{
    194	__raw_spin_unlock_irqrestore(lock, flags);
    195}
    196EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
    197#endif
    198
    199#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
    200void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
    201{
    202	__raw_spin_unlock_irq(lock);
    203}
    204EXPORT_SYMBOL(_raw_spin_unlock_irq);
    205#endif
    206
    207#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
    208void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
    209{
    210	__raw_spin_unlock_bh(lock);
    211}
    212EXPORT_SYMBOL(_raw_spin_unlock_bh);
    213#endif
    214
    215#ifndef CONFIG_PREEMPT_RT
    216
    217#ifndef CONFIG_INLINE_READ_TRYLOCK
    218int __lockfunc _raw_read_trylock(rwlock_t *lock)
    219{
    220	return __raw_read_trylock(lock);
    221}
    222EXPORT_SYMBOL(_raw_read_trylock);
    223#endif
    224
    225#ifndef CONFIG_INLINE_READ_LOCK
    226void __lockfunc _raw_read_lock(rwlock_t *lock)
    227{
    228	__raw_read_lock(lock);
    229}
    230EXPORT_SYMBOL(_raw_read_lock);
    231#endif
    232
    233#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
    234unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
    235{
    236	return __raw_read_lock_irqsave(lock);
    237}
    238EXPORT_SYMBOL(_raw_read_lock_irqsave);
    239#endif
    240
    241#ifndef CONFIG_INLINE_READ_LOCK_IRQ
    242void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
    243{
    244	__raw_read_lock_irq(lock);
    245}
    246EXPORT_SYMBOL(_raw_read_lock_irq);
    247#endif
    248
    249#ifndef CONFIG_INLINE_READ_LOCK_BH
    250void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
    251{
    252	__raw_read_lock_bh(lock);
    253}
    254EXPORT_SYMBOL(_raw_read_lock_bh);
    255#endif
    256
    257#ifndef CONFIG_INLINE_READ_UNLOCK
    258void __lockfunc _raw_read_unlock(rwlock_t *lock)
    259{
    260	__raw_read_unlock(lock);
    261}
    262EXPORT_SYMBOL(_raw_read_unlock);
    263#endif
    264
    265#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
    266void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
    267{
    268	__raw_read_unlock_irqrestore(lock, flags);
    269}
    270EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
    271#endif
    272
    273#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
    274void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
    275{
    276	__raw_read_unlock_irq(lock);
    277}
    278EXPORT_SYMBOL(_raw_read_unlock_irq);
    279#endif
    280
    281#ifndef CONFIG_INLINE_READ_UNLOCK_BH
    282void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
    283{
    284	__raw_read_unlock_bh(lock);
    285}
    286EXPORT_SYMBOL(_raw_read_unlock_bh);
    287#endif
    288
    289#ifndef CONFIG_INLINE_WRITE_TRYLOCK
    290int __lockfunc _raw_write_trylock(rwlock_t *lock)
    291{
    292	return __raw_write_trylock(lock);
    293}
    294EXPORT_SYMBOL(_raw_write_trylock);
    295#endif
    296
    297#ifndef CONFIG_INLINE_WRITE_LOCK
    298void __lockfunc _raw_write_lock(rwlock_t *lock)
    299{
    300	__raw_write_lock(lock);
    301}
    302EXPORT_SYMBOL(_raw_write_lock);
    303
    304#ifndef CONFIG_DEBUG_LOCK_ALLOC
    305#define __raw_write_lock_nested(lock, subclass)	__raw_write_lock(((void)(subclass), (lock)))
    306#endif
    307
    308void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass)
    309{
    310	__raw_write_lock_nested(lock, subclass);
    311}
    312EXPORT_SYMBOL(_raw_write_lock_nested);
    313#endif
    314
    315#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
    316unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
    317{
    318	return __raw_write_lock_irqsave(lock);
    319}
    320EXPORT_SYMBOL(_raw_write_lock_irqsave);
    321#endif
    322
    323#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
    324void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
    325{
    326	__raw_write_lock_irq(lock);
    327}
    328EXPORT_SYMBOL(_raw_write_lock_irq);
    329#endif
    330
    331#ifndef CONFIG_INLINE_WRITE_LOCK_BH
    332void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
    333{
    334	__raw_write_lock_bh(lock);
    335}
    336EXPORT_SYMBOL(_raw_write_lock_bh);
    337#endif
    338
    339#ifndef CONFIG_INLINE_WRITE_UNLOCK
    340void __lockfunc _raw_write_unlock(rwlock_t *lock)
    341{
    342	__raw_write_unlock(lock);
    343}
    344EXPORT_SYMBOL(_raw_write_unlock);
    345#endif
    346
    347#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
    348void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
    349{
    350	__raw_write_unlock_irqrestore(lock, flags);
    351}
    352EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
    353#endif
    354
    355#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
    356void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
    357{
    358	__raw_write_unlock_irq(lock);
    359}
    360EXPORT_SYMBOL(_raw_write_unlock_irq);
    361#endif
    362
    363#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
    364void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
    365{
    366	__raw_write_unlock_bh(lock);
    367}
    368EXPORT_SYMBOL(_raw_write_unlock_bh);
    369#endif
    370
    371#endif /* !CONFIG_PREEMPT_RT */
    372
    373#ifdef CONFIG_DEBUG_LOCK_ALLOC
    374
    375void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
    376{
    377	preempt_disable();
    378	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
    379	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
    380}
    381EXPORT_SYMBOL(_raw_spin_lock_nested);
    382
    383unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
    384						   int subclass)
    385{
    386	unsigned long flags;
    387
    388	local_irq_save(flags);
    389	preempt_disable();
    390	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
    391	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
    392	return flags;
    393}
    394EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
    395
    396void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
    397				     struct lockdep_map *nest_lock)
    398{
    399	preempt_disable();
    400	spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
    401	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
    402}
    403EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
    404
    405#endif
    406
    407notrace int in_lock_functions(unsigned long addr)
    408{
    409	/* Linker adds these: start and end of __lockfunc functions */
    410	extern char __lock_text_start[], __lock_text_end[];
    411
    412	return addr >= (unsigned long)__lock_text_start
    413	&& addr < (unsigned long)__lock_text_end;
    414}
    415EXPORT_SYMBOL(in_lock_functions);