cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spinlock_debug.c (5975B)


      1/*
      2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
      3 * Released under the General Public License (GPL).
      4 *
      5 * This file contains the spinlock/rwlock implementations for
      6 * DEBUG_SPINLOCK.
      7 */
      8
      9#include <linux/spinlock.h>
     10#include <linux/nmi.h>
     11#include <linux/interrupt.h>
     12#include <linux/debug_locks.h>
     13#include <linux/delay.h>
     14#include <linux/export.h>
     15
     16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
     17			  struct lock_class_key *key, short inner)
     18{
     19#ifdef CONFIG_DEBUG_LOCK_ALLOC
     20	/*
     21	 * Make sure we are not reinitializing a held lock:
     22	 */
     23	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
     24	lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
     25#endif
     26	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
     27	lock->magic = SPINLOCK_MAGIC;
     28	lock->owner = SPINLOCK_OWNER_INIT;
     29	lock->owner_cpu = -1;
     30}
     31
     32EXPORT_SYMBOL(__raw_spin_lock_init);
     33
     34#ifndef CONFIG_PREEMPT_RT
     35void __rwlock_init(rwlock_t *lock, const char *name,
     36		   struct lock_class_key *key)
     37{
     38#ifdef CONFIG_DEBUG_LOCK_ALLOC
     39	/*
     40	 * Make sure we are not reinitializing a held lock:
     41	 */
     42	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
     43	lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
     44#endif
     45	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
     46	lock->magic = RWLOCK_MAGIC;
     47	lock->owner = SPINLOCK_OWNER_INIT;
     48	lock->owner_cpu = -1;
     49}
     50
     51EXPORT_SYMBOL(__rwlock_init);
     52#endif
     53
     54static void spin_dump(raw_spinlock_t *lock, const char *msg)
     55{
     56	struct task_struct *owner = READ_ONCE(lock->owner);
     57
     58	if (owner == SPINLOCK_OWNER_INIT)
     59		owner = NULL;
     60	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
     61		msg, raw_smp_processor_id(),
     62		current->comm, task_pid_nr(current));
     63	printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
     64			".owner_cpu: %d\n",
     65		lock, READ_ONCE(lock->magic),
     66		owner ? owner->comm : "<none>",
     67		owner ? task_pid_nr(owner) : -1,
     68		READ_ONCE(lock->owner_cpu));
     69	dump_stack();
     70}
     71
     72static void spin_bug(raw_spinlock_t *lock, const char *msg)
     73{
     74	if (!debug_locks_off())
     75		return;
     76
     77	spin_dump(lock, msg);
     78}
     79
     80#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
     81
     82static inline void
     83debug_spin_lock_before(raw_spinlock_t *lock)
     84{
     85	SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
     86	SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
     87	SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
     88							lock, "cpu recursion");
     89}
     90
     91static inline void debug_spin_lock_after(raw_spinlock_t *lock)
     92{
     93	WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
     94	WRITE_ONCE(lock->owner, current);
     95}
     96
     97static inline void debug_spin_unlock(raw_spinlock_t *lock)
     98{
     99	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
    100	SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
    101	SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
    102	SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
    103							lock, "wrong CPU");
    104	WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
    105	WRITE_ONCE(lock->owner_cpu, -1);
    106}
    107
    108/*
    109 * We are now relying on the NMI watchdog to detect lockup instead of doing
    110 * the detection here with an unfair lock which can cause problem of its own.
    111 */
    112void do_raw_spin_lock(raw_spinlock_t *lock)
    113{
    114	debug_spin_lock_before(lock);
    115	arch_spin_lock(&lock->raw_lock);
    116	mmiowb_spin_lock();
    117	debug_spin_lock_after(lock);
    118}
    119
    120int do_raw_spin_trylock(raw_spinlock_t *lock)
    121{
    122	int ret = arch_spin_trylock(&lock->raw_lock);
    123
    124	if (ret) {
    125		mmiowb_spin_lock();
    126		debug_spin_lock_after(lock);
    127	}
    128#ifndef CONFIG_SMP
    129	/*
    130	 * Must not happen on UP:
    131	 */
    132	SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
    133#endif
    134	return ret;
    135}
    136
    137void do_raw_spin_unlock(raw_spinlock_t *lock)
    138{
    139	mmiowb_spin_unlock();
    140	debug_spin_unlock(lock);
    141	arch_spin_unlock(&lock->raw_lock);
    142}
    143
    144#ifndef CONFIG_PREEMPT_RT
    145static void rwlock_bug(rwlock_t *lock, const char *msg)
    146{
    147	if (!debug_locks_off())
    148		return;
    149
    150	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
    151		msg, raw_smp_processor_id(), current->comm,
    152		task_pid_nr(current), lock);
    153	dump_stack();
    154}
    155
    156#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
    157
    158void do_raw_read_lock(rwlock_t *lock)
    159{
    160	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
    161	arch_read_lock(&lock->raw_lock);
    162}
    163
    164int do_raw_read_trylock(rwlock_t *lock)
    165{
    166	int ret = arch_read_trylock(&lock->raw_lock);
    167
    168#ifndef CONFIG_SMP
    169	/*
    170	 * Must not happen on UP:
    171	 */
    172	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
    173#endif
    174	return ret;
    175}
    176
    177void do_raw_read_unlock(rwlock_t *lock)
    178{
    179	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
    180	arch_read_unlock(&lock->raw_lock);
    181}
    182
    183static inline void debug_write_lock_before(rwlock_t *lock)
    184{
    185	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
    186	RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
    187	RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
    188							lock, "cpu recursion");
    189}
    190
    191static inline void debug_write_lock_after(rwlock_t *lock)
    192{
    193	WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
    194	WRITE_ONCE(lock->owner, current);
    195}
    196
    197static inline void debug_write_unlock(rwlock_t *lock)
    198{
    199	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
    200	RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
    201	RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
    202							lock, "wrong CPU");
    203	WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
    204	WRITE_ONCE(lock->owner_cpu, -1);
    205}
    206
    207void do_raw_write_lock(rwlock_t *lock)
    208{
    209	debug_write_lock_before(lock);
    210	arch_write_lock(&lock->raw_lock);
    211	debug_write_lock_after(lock);
    212}
    213
    214int do_raw_write_trylock(rwlock_t *lock)
    215{
    216	int ret = arch_write_trylock(&lock->raw_lock);
    217
    218	if (ret)
    219		debug_write_lock_after(lock);
    220#ifndef CONFIG_SMP
    221	/*
    222	 * Must not happen on UP:
    223	 */
    224	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
    225#endif
    226	return ret;
    227}
    228
    229void do_raw_write_unlock(rwlock_t *lock)
    230{
    231	debug_write_unlock(lock);
    232	arch_write_unlock(&lock->raw_lock);
    233}
    234
    235#endif /* !CONFIG_PREEMPT_RT */