cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ww_mutex.h (13037B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance
      4 *
      5 * Original mutex implementation started by Ingo Molnar:
      6 *
      7 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
      8 *
      9 * Wait/Die implementation:
     10 *  Copyright (C) 2013 Canonical Ltd.
     11 * Choice of algorithm:
     12 *  Copyright (C) 2018 WMWare Inc.
     13 *
     14 * This file contains the main data structure and API definitions.
     15 */
     16
     17#ifndef __LINUX_WW_MUTEX_H
     18#define __LINUX_WW_MUTEX_H
     19
     20#include <linux/mutex.h>
     21#include <linux/rtmutex.h>
     22
     23#if defined(CONFIG_DEBUG_MUTEXES) || \
     24   (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES))
     25#define DEBUG_WW_MUTEXES
     26#endif
     27
     28#ifndef CONFIG_PREEMPT_RT
     29#define WW_MUTEX_BASE			mutex
     30#define ww_mutex_base_init(l,n,k)	__mutex_init(l,n,k)
     31#define ww_mutex_base_is_locked(b)	mutex_is_locked((b))
     32#else
     33#define WW_MUTEX_BASE			rt_mutex
     34#define ww_mutex_base_init(l,n,k)	__rt_mutex_init(l,n,k)
     35#define ww_mutex_base_is_locked(b)	rt_mutex_base_is_locked(&(b)->rtmutex)
     36#endif
     37
     38struct ww_class {
     39	atomic_long_t stamp;
     40	struct lock_class_key acquire_key;
     41	struct lock_class_key mutex_key;
     42	const char *acquire_name;
     43	const char *mutex_name;
     44	unsigned int is_wait_die;
     45};
     46
     47struct ww_mutex {
     48	struct WW_MUTEX_BASE base;
     49	struct ww_acquire_ctx *ctx;
     50#ifdef DEBUG_WW_MUTEXES
     51	struct ww_class *ww_class;
     52#endif
     53};
     54
     55struct ww_acquire_ctx {
     56	struct task_struct *task;
     57	unsigned long stamp;
     58	unsigned int acquired;
     59	unsigned short wounded;
     60	unsigned short is_wait_die;
     61#ifdef DEBUG_WW_MUTEXES
     62	unsigned int done_acquire;
     63	struct ww_class *ww_class;
     64	void *contending_lock;
     65#endif
     66#ifdef CONFIG_DEBUG_LOCK_ALLOC
     67	struct lockdep_map dep_map;
     68#endif
     69#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
     70	unsigned int deadlock_inject_interval;
     71	unsigned int deadlock_inject_countdown;
     72#endif
     73};
     74
     75#define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die)	    \
     76		{ .stamp = ATOMIC_LONG_INIT(0) \
     77		, .acquire_name = #ww_class "_acquire" \
     78		, .mutex_name = #ww_class "_mutex" \
     79		, .is_wait_die = _is_wait_die }
     80
     81#define DEFINE_WD_CLASS(classname) \
     82	struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
     83
     84#define DEFINE_WW_CLASS(classname) \
     85	struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
     86
     87/**
     88 * ww_mutex_init - initialize the w/w mutex
     89 * @lock: the mutex to be initialized
     90 * @ww_class: the w/w class the mutex should belong to
     91 *
     92 * Initialize the w/w mutex to unlocked state and associate it with the given
     93 * class. Static define macro for w/w mutex is not provided and this function
     94 * is the only way to properly initialize the w/w mutex.
     95 *
     96 * It is not allowed to initialize an already locked mutex.
     97 */
     98static inline void ww_mutex_init(struct ww_mutex *lock,
     99				 struct ww_class *ww_class)
    100{
    101	ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
    102	lock->ctx = NULL;
    103#ifdef DEBUG_WW_MUTEXES
    104	lock->ww_class = ww_class;
    105#endif
    106}
    107
    108/**
    109 * ww_acquire_init - initialize a w/w acquire context
    110 * @ctx: w/w acquire context to initialize
    111 * @ww_class: w/w class of the context
    112 *
    113 * Initializes an context to acquire multiple mutexes of the given w/w class.
    114 *
    115 * Context-based w/w mutex acquiring can be done in any order whatsoever within
    116 * a given lock class. Deadlocks will be detected and handled with the
    117 * wait/die logic.
    118 *
    119 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
    120 * result in undetected deadlocks and is so forbidden. Mixing different contexts
    121 * for the same w/w class when acquiring mutexes can also result in undetected
    122 * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
    123 * enabling CONFIG_PROVE_LOCKING.
    124 *
    125 * Nesting of acquire contexts for _different_ w/w classes is possible, subject
    126 * to the usual locking rules between different lock classes.
    127 *
    128 * An acquire context must be released with ww_acquire_fini by the same task
    129 * before the memory is freed. It is recommended to allocate the context itself
    130 * on the stack.
    131 */
    132static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
    133				   struct ww_class *ww_class)
    134{
    135	ctx->task = current;
    136	ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
    137	ctx->acquired = 0;
    138	ctx->wounded = false;
    139	ctx->is_wait_die = ww_class->is_wait_die;
    140#ifdef DEBUG_WW_MUTEXES
    141	ctx->ww_class = ww_class;
    142	ctx->done_acquire = 0;
    143	ctx->contending_lock = NULL;
    144#endif
    145#ifdef CONFIG_DEBUG_LOCK_ALLOC
    146	debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
    147	lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
    148			 &ww_class->acquire_key, 0);
    149	mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
    150#endif
    151#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
    152	ctx->deadlock_inject_interval = 1;
    153	ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
    154#endif
    155}
    156
    157/**
    158 * ww_acquire_done - marks the end of the acquire phase
    159 * @ctx: the acquire context
    160 *
    161 * Marks the end of the acquire phase, any further w/w mutex lock calls using
    162 * this context are forbidden.
    163 *
    164 * Calling this function is optional, it is just useful to document w/w mutex
    165 * code and clearly designated the acquire phase from actually using the locked
    166 * data structures.
    167 */
    168static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
    169{
    170#ifdef DEBUG_WW_MUTEXES
    171	lockdep_assert_held(ctx);
    172
    173	DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
    174	ctx->done_acquire = 1;
    175#endif
    176}
    177
    178/**
    179 * ww_acquire_fini - releases a w/w acquire context
    180 * @ctx: the acquire context to free
    181 *
    182 * Releases a w/w acquire context. This must be called _after_ all acquired w/w
    183 * mutexes have been released with ww_mutex_unlock.
    184 */
    185static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
    186{
    187#ifdef CONFIG_DEBUG_LOCK_ALLOC
    188	mutex_release(&ctx->dep_map, _THIS_IP_);
    189#endif
    190#ifdef DEBUG_WW_MUTEXES
    191	DEBUG_LOCKS_WARN_ON(ctx->acquired);
    192	if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
    193		/*
    194		 * lockdep will normally handle this,
    195		 * but fail without anyway
    196		 */
    197		ctx->done_acquire = 1;
    198
    199	if (!IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC))
    200		/* ensure ww_acquire_fini will still fail if called twice */
    201		ctx->acquired = ~0U;
    202#endif
    203}
    204
    205/**
    206 * ww_mutex_lock - acquire the w/w mutex
    207 * @lock: the mutex to be acquired
    208 * @ctx: w/w acquire context, or NULL to acquire only a single lock.
    209 *
    210 * Lock the w/w mutex exclusively for this task.
    211 *
    212 * Deadlocks within a given w/w class of locks are detected and handled with the
    213 * wait/die algorithm. If the lock isn't immediately available this function
    214 * will either sleep until it is (wait case). Or it selects the current context
    215 * for backing off by returning -EDEADLK (die case). Trying to acquire the
    216 * same lock with the same context twice is also detected and signalled by
    217 * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
    218 *
    219 * In the die case the caller must release all currently held w/w mutexes for
    220 * the given context and then wait for this contending lock to be available by
    221 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
    222 * lock and proceed with trying to acquire further w/w mutexes (e.g. when
    223 * scanning through lru lists trying to free resources).
    224 *
    225 * The mutex must later on be released by the same task that
    226 * acquired it. The task may not exit without first unlocking the mutex. Also,
    227 * kernel memory where the mutex resides must not be freed with the mutex still
    228 * locked. The mutex must first be initialized (or statically defined) before it
    229 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
    230 * of the same w/w lock class as was used to initialize the acquire context.
    231 *
    232 * A mutex acquired with this function must be released with ww_mutex_unlock.
    233 */
    234extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
    235
    236/**
    237 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
    238 * @lock: the mutex to be acquired
    239 * @ctx: w/w acquire context
    240 *
    241 * Lock the w/w mutex exclusively for this task.
    242 *
    243 * Deadlocks within a given w/w class of locks are detected and handled with the
    244 * wait/die algorithm. If the lock isn't immediately available this function
    245 * will either sleep until it is (wait case). Or it selects the current context
    246 * for backing off by returning -EDEADLK (die case). Trying to acquire the
    247 * same lock with the same context twice is also detected and signalled by
    248 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
    249 * signal arrives while waiting for the lock then this function returns -EINTR.
    250 *
    251 * In the die case the caller must release all currently held w/w mutexes for
    252 * the given context and then wait for this contending lock to be available by
    253 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
    254 * not acquire this lock and proceed with trying to acquire further w/w mutexes
    255 * (e.g. when scanning through lru lists trying to free resources).
    256 *
    257 * The mutex must later on be released by the same task that
    258 * acquired it. The task may not exit without first unlocking the mutex. Also,
    259 * kernel memory where the mutex resides must not be freed with the mutex still
    260 * locked. The mutex must first be initialized (or statically defined) before it
    261 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
    262 * of the same w/w lock class as was used to initialize the acquire context.
    263 *
    264 * A mutex acquired with this function must be released with ww_mutex_unlock.
    265 */
    266extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
    267						    struct ww_acquire_ctx *ctx);
    268
    269/**
    270 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
    271 * @lock: the mutex to be acquired
    272 * @ctx: w/w acquire context
    273 *
    274 * Acquires a w/w mutex with the given context after a die case. This function
    275 * will sleep until the lock becomes available.
    276 *
    277 * The caller must have released all w/w mutexes already acquired with the
    278 * context and then call this function on the contended lock.
    279 *
    280 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
    281 * needs with ww_mutex_lock. Note that the -EALREADY return code from
    282 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
    283 *
    284 * It is forbidden to call this function with any other w/w mutexes associated
    285 * with the context held. It is forbidden to call this on anything else than the
    286 * contending mutex.
    287 *
    288 * Note that the slowpath lock acquiring can also be done by calling
    289 * ww_mutex_lock directly. This function here is simply to help w/w mutex
    290 * locking code readability by clearly denoting the slowpath.
    291 */
    292static inline void
    293ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    294{
    295	int ret;
    296#ifdef DEBUG_WW_MUTEXES
    297	DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
    298#endif
    299	ret = ww_mutex_lock(lock, ctx);
    300	(void)ret;
    301}
    302
    303/**
    304 * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible
    305 * @lock: the mutex to be acquired
    306 * @ctx: w/w acquire context
    307 *
    308 * Acquires a w/w mutex with the given context after a die case. This function
    309 * will sleep until the lock becomes available and returns 0 when the lock has
    310 * been acquired. If a signal arrives while waiting for the lock then this
    311 * function returns -EINTR.
    312 *
    313 * The caller must have released all w/w mutexes already acquired with the
    314 * context and then call this function on the contended lock.
    315 *
    316 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
    317 * needs with ww_mutex_lock. Note that the -EALREADY return code from
    318 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
    319 *
    320 * It is forbidden to call this function with any other w/w mutexes associated
    321 * with the given context held. It is forbidden to call this on anything else
    322 * than the contending mutex.
    323 *
    324 * Note that the slowpath lock acquiring can also be done by calling
    325 * ww_mutex_lock_interruptible directly. This function here is simply to help
    326 * w/w mutex locking code readability by clearly denoting the slowpath.
    327 */
    328static inline int __must_check
    329ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
    330				 struct ww_acquire_ctx *ctx)
    331{
    332#ifdef DEBUG_WW_MUTEXES
    333	DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
    334#endif
    335	return ww_mutex_lock_interruptible(lock, ctx);
    336}
    337
    338extern void ww_mutex_unlock(struct ww_mutex *lock);
    339
    340extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
    341					 struct ww_acquire_ctx *ctx);
    342
    343/***
    344 * ww_mutex_destroy - mark a w/w mutex unusable
    345 * @lock: the mutex to be destroyed
    346 *
    347 * This function marks the mutex uninitialized, and any subsequent
    348 * use of the mutex is forbidden. The mutex must not be locked when
    349 * this function is called.
    350 */
    351static inline void ww_mutex_destroy(struct ww_mutex *lock)
    352{
    353#ifndef CONFIG_PREEMPT_RT
    354	mutex_destroy(&lock->base);
    355#endif
    356}
    357
    358/**
    359 * ww_mutex_is_locked - is the w/w mutex locked
    360 * @lock: the mutex to be queried
    361 *
    362 * Returns 1 if the mutex is locked, 0 if unlocked.
    363 */
    364static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
    365{
    366	return ww_mutex_base_is_locked(&lock->base);
    367}
    368
    369#endif