cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ww_mutex.h (14074B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2
      3#ifndef WW_RT
      4
      5#define MUTEX		mutex
      6#define MUTEX_WAITER	mutex_waiter
      7
      8static inline struct mutex_waiter *
      9__ww_waiter_first(struct mutex *lock)
     10{
     11	struct mutex_waiter *w;
     12
     13	w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
     14	if (list_entry_is_head(w, &lock->wait_list, list))
     15		return NULL;
     16
     17	return w;
     18}
     19
     20static inline struct mutex_waiter *
     21__ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
     22{
     23	w = list_next_entry(w, list);
     24	if (list_entry_is_head(w, &lock->wait_list, list))
     25		return NULL;
     26
     27	return w;
     28}
     29
     30static inline struct mutex_waiter *
     31__ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
     32{
     33	w = list_prev_entry(w, list);
     34	if (list_entry_is_head(w, &lock->wait_list, list))
     35		return NULL;
     36
     37	return w;
     38}
     39
     40static inline struct mutex_waiter *
     41__ww_waiter_last(struct mutex *lock)
     42{
     43	struct mutex_waiter *w;
     44
     45	w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
     46	if (list_entry_is_head(w, &lock->wait_list, list))
     47		return NULL;
     48
     49	return w;
     50}
     51
     52static inline void
     53__ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
     54{
     55	struct list_head *p = &lock->wait_list;
     56	if (pos)
     57		p = &pos->list;
     58	__mutex_add_waiter(lock, waiter, p);
     59}
     60
     61static inline struct task_struct *
     62__ww_mutex_owner(struct mutex *lock)
     63{
     64	return __mutex_owner(lock);
     65}
     66
     67static inline bool
     68__ww_mutex_has_waiters(struct mutex *lock)
     69{
     70	return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
     71}
     72
     73static inline void lock_wait_lock(struct mutex *lock)
     74{
     75	raw_spin_lock(&lock->wait_lock);
     76}
     77
     78static inline void unlock_wait_lock(struct mutex *lock)
     79{
     80	raw_spin_unlock(&lock->wait_lock);
     81}
     82
     83static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
     84{
     85	lockdep_assert_held(&lock->wait_lock);
     86}
     87
     88#else /* WW_RT */
     89
     90#define MUTEX		rt_mutex
     91#define MUTEX_WAITER	rt_mutex_waiter
     92
     93static inline struct rt_mutex_waiter *
     94__ww_waiter_first(struct rt_mutex *lock)
     95{
     96	struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
     97	if (!n)
     98		return NULL;
     99	return rb_entry(n, struct rt_mutex_waiter, tree_entry);
    100}
    101
    102static inline struct rt_mutex_waiter *
    103__ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
    104{
    105	struct rb_node *n = rb_next(&w->tree_entry);
    106	if (!n)
    107		return NULL;
    108	return rb_entry(n, struct rt_mutex_waiter, tree_entry);
    109}
    110
    111static inline struct rt_mutex_waiter *
    112__ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
    113{
    114	struct rb_node *n = rb_prev(&w->tree_entry);
    115	if (!n)
    116		return NULL;
    117	return rb_entry(n, struct rt_mutex_waiter, tree_entry);
    118}
    119
    120static inline struct rt_mutex_waiter *
    121__ww_waiter_last(struct rt_mutex *lock)
    122{
    123	struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
    124	if (!n)
    125		return NULL;
    126	return rb_entry(n, struct rt_mutex_waiter, tree_entry);
    127}
    128
    129static inline void
    130__ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
    131{
    132	/* RT unconditionally adds the waiter first and then removes it on error */
    133}
    134
    135static inline struct task_struct *
    136__ww_mutex_owner(struct rt_mutex *lock)
    137{
    138	return rt_mutex_owner(&lock->rtmutex);
    139}
    140
    141static inline bool
    142__ww_mutex_has_waiters(struct rt_mutex *lock)
    143{
    144	return rt_mutex_has_waiters(&lock->rtmutex);
    145}
    146
    147static inline void lock_wait_lock(struct rt_mutex *lock)
    148{
    149	raw_spin_lock(&lock->rtmutex.wait_lock);
    150}
    151
    152static inline void unlock_wait_lock(struct rt_mutex *lock)
    153{
    154	raw_spin_unlock(&lock->rtmutex.wait_lock);
    155}
    156
    157static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
    158{
    159	lockdep_assert_held(&lock->rtmutex.wait_lock);
    160}
    161
    162#endif /* WW_RT */
    163
    164/*
    165 * Wait-Die:
    166 *   The newer transactions are killed when:
    167 *     It (the new transaction) makes a request for a lock being held
    168 *     by an older transaction.
    169 *
    170 * Wound-Wait:
    171 *   The newer transactions are wounded when:
    172 *     An older transaction makes a request for a lock being held by
    173 *     the newer transaction.
    174 */
    175
    176/*
    177 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
    178 * it.
    179 */
    180static __always_inline void
    181ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
    182{
    183#ifdef DEBUG_WW_MUTEXES
    184	/*
    185	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
    186	 * but released with a normal mutex_unlock in this call.
    187	 *
    188	 * This should never happen, always use ww_mutex_unlock.
    189	 */
    190	DEBUG_LOCKS_WARN_ON(ww->ctx);
    191
    192	/*
    193	 * Not quite done after calling ww_acquire_done() ?
    194	 */
    195	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
    196
    197	if (ww_ctx->contending_lock) {
    198		/*
    199		 * After -EDEADLK you tried to
    200		 * acquire a different ww_mutex? Bad!
    201		 */
    202		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
    203
    204		/*
    205		 * You called ww_mutex_lock after receiving -EDEADLK,
    206		 * but 'forgot' to unlock everything else first?
    207		 */
    208		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
    209		ww_ctx->contending_lock = NULL;
    210	}
    211
    212	/*
    213	 * Naughty, using a different class will lead to undefined behavior!
    214	 */
    215	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
    216#endif
    217	ww_ctx->acquired++;
    218	ww->ctx = ww_ctx;
    219}
    220
    221/*
    222 * Determine if @a is 'less' than @b. IOW, either @a is a lower priority task
    223 * or, when of equal priority, a younger transaction than @b.
    224 *
    225 * Depending on the algorithm, @a will either need to wait for @b, or die.
    226 */
    227static inline bool
    228__ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
    229{
    230/*
    231 * Can only do the RT prio for WW_RT, because task->prio isn't stable due to PI,
    232 * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and
    233 * isn't affected by this.
    234 */
    235#ifdef WW_RT
    236	/* kernel prio; less is more */
    237	int a_prio = a->task->prio;
    238	int b_prio = b->task->prio;
    239
    240	if (rt_prio(a_prio) || rt_prio(b_prio)) {
    241
    242		if (a_prio > b_prio)
    243			return true;
    244
    245		if (a_prio < b_prio)
    246			return false;
    247
    248		/* equal static prio */
    249
    250		if (dl_prio(a_prio)) {
    251			if (dl_time_before(b->task->dl.deadline,
    252					   a->task->dl.deadline))
    253				return true;
    254
    255			if (dl_time_before(a->task->dl.deadline,
    256					   b->task->dl.deadline))
    257				return false;
    258		}
    259
    260		/* equal prio */
    261	}
    262#endif
    263
    264	/* FIFO order tie break -- bigger is younger */
    265	return (signed long)(a->stamp - b->stamp) > 0;
    266}
    267
    268/*
    269 * Wait-Die; wake a lesser waiter context (when locks held) such that it can
    270 * die.
    271 *
    272 * Among waiters with context, only the first one can have other locks acquired
    273 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
    274 * __ww_mutex_check_kill() wake any but the earliest context.
    275 */
    276static bool
    277__ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
    278	       struct ww_acquire_ctx *ww_ctx)
    279{
    280	if (!ww_ctx->is_wait_die)
    281		return false;
    282
    283	if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
    284#ifndef WW_RT
    285		debug_mutex_wake_waiter(lock, waiter);
    286#endif
    287		wake_up_process(waiter->task);
    288	}
    289
    290	return true;
    291}
    292
    293/*
    294 * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
    295 *
    296 * Wound the lock holder if there are waiters with more important transactions
    297 * than the lock holders. Even if multiple waiters may wound the lock holder,
    298 * it's sufficient that only one does.
    299 */
    300static bool __ww_mutex_wound(struct MUTEX *lock,
    301			     struct ww_acquire_ctx *ww_ctx,
    302			     struct ww_acquire_ctx *hold_ctx)
    303{
    304	struct task_struct *owner = __ww_mutex_owner(lock);
    305
    306	lockdep_assert_wait_lock_held(lock);
    307
    308	/*
    309	 * Possible through __ww_mutex_add_waiter() when we race with
    310	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
    311	 * through __ww_mutex_check_waiters().
    312	 */
    313	if (!hold_ctx)
    314		return false;
    315
    316	/*
    317	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
    318	 * it cannot go away because we'll have FLAG_WAITERS set and hold
    319	 * wait_lock.
    320	 */
    321	if (!owner)
    322		return false;
    323
    324	if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
    325		hold_ctx->wounded = 1;
    326
    327		/*
    328		 * wake_up_process() paired with set_current_state()
    329		 * inserts sufficient barriers to make sure @owner either sees
    330		 * it's wounded in __ww_mutex_check_kill() or has a
    331		 * wakeup pending to re-read the wounded state.
    332		 */
    333		if (owner != current)
    334			wake_up_process(owner);
    335
    336		return true;
    337	}
    338
    339	return false;
    340}
    341
    342/*
    343 * We just acquired @lock under @ww_ctx, if there are more important contexts
    344 * waiting behind us on the wait-list, check if they need to die, or wound us.
    345 *
    346 * See __ww_mutex_add_waiter() for the list-order construction; basically the
    347 * list is ordered by stamp, smallest (oldest) first.
    348 *
    349 * This relies on never mixing wait-die/wound-wait on the same wait-list;
    350 * which is currently ensured by that being a ww_class property.
    351 *
    352 * The current task must not be on the wait list.
    353 */
    354static void
    355__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
    356{
    357	struct MUTEX_WAITER *cur;
    358
    359	lockdep_assert_wait_lock_held(lock);
    360
    361	for (cur = __ww_waiter_first(lock); cur;
    362	     cur = __ww_waiter_next(lock, cur)) {
    363
    364		if (!cur->ww_ctx)
    365			continue;
    366
    367		if (__ww_mutex_die(lock, cur, ww_ctx) ||
    368		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
    369			break;
    370	}
    371}
    372
    373/*
    374 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
    375 * and wake up any waiters so they can recheck.
    376 */
    377static __always_inline void
    378ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
    379{
    380	ww_mutex_lock_acquired(lock, ctx);
    381
    382	/*
    383	 * The lock->ctx update should be visible on all cores before
    384	 * the WAITERS check is done, otherwise contended waiters might be
    385	 * missed. The contended waiters will either see ww_ctx == NULL
    386	 * and keep spinning, or it will acquire wait_lock, add itself
    387	 * to waiter list and sleep.
    388	 */
    389	smp_mb(); /* See comments above and below. */
    390
    391	/*
    392	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
    393	 *     MB		        MB
    394	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
    395	 *
    396	 * The memory barrier above pairs with the memory barrier in
    397	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
    398	 * and/or !empty list.
    399	 */
    400	if (likely(!__ww_mutex_has_waiters(&lock->base)))
    401		return;
    402
    403	/*
    404	 * Uh oh, we raced in fastpath, check if any of the waiters need to
    405	 * die or wound us.
    406	 */
    407	lock_wait_lock(&lock->base);
    408	__ww_mutex_check_waiters(&lock->base, ctx);
    409	unlock_wait_lock(&lock->base);
    410}
    411
    412static __always_inline int
    413__ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
    414{
    415	if (ww_ctx->acquired > 0) {
    416#ifdef DEBUG_WW_MUTEXES
    417		struct ww_mutex *ww;
    418
    419		ww = container_of(lock, struct ww_mutex, base);
    420		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
    421		ww_ctx->contending_lock = ww;
    422#endif
    423		return -EDEADLK;
    424	}
    425
    426	return 0;
    427}
    428
    429/*
    430 * Check the wound condition for the current lock acquire.
    431 *
    432 * Wound-Wait: If we're wounded, kill ourself.
    433 *
    434 * Wait-Die: If we're trying to acquire a lock already held by an older
    435 *           context, kill ourselves.
    436 *
    437 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
    438 * look at waiters before us in the wait-list.
    439 */
    440static inline int
    441__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
    442		      struct ww_acquire_ctx *ctx)
    443{
    444	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
    445	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
    446	struct MUTEX_WAITER *cur;
    447
    448	if (ctx->acquired == 0)
    449		return 0;
    450
    451	if (!ctx->is_wait_die) {
    452		if (ctx->wounded)
    453			return __ww_mutex_kill(lock, ctx);
    454
    455		return 0;
    456	}
    457
    458	if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
    459		return __ww_mutex_kill(lock, ctx);
    460
    461	/*
    462	 * If there is a waiter in front of us that has a context, then its
    463	 * stamp is earlier than ours and we must kill ourself.
    464	 */
    465	for (cur = __ww_waiter_prev(lock, waiter); cur;
    466	     cur = __ww_waiter_prev(lock, cur)) {
    467
    468		if (!cur->ww_ctx)
    469			continue;
    470
    471		return __ww_mutex_kill(lock, ctx);
    472	}
    473
    474	return 0;
    475}
    476
    477/*
    478 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
    479 * first. Such that older contexts are preferred to acquire the lock over
    480 * younger contexts.
    481 *
    482 * Waiters without context are interspersed in FIFO order.
    483 *
    484 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
    485 * older contexts already waiting) to avoid unnecessary waiting and for
    486 * Wound-Wait ensure we wound the owning context when it is younger.
    487 */
    488static inline int
    489__ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
    490		      struct MUTEX *lock,
    491		      struct ww_acquire_ctx *ww_ctx)
    492{
    493	struct MUTEX_WAITER *cur, *pos = NULL;
    494	bool is_wait_die;
    495
    496	if (!ww_ctx) {
    497		__ww_waiter_add(lock, waiter, NULL);
    498		return 0;
    499	}
    500
    501	is_wait_die = ww_ctx->is_wait_die;
    502
    503	/*
    504	 * Add the waiter before the first waiter with a higher stamp.
    505	 * Waiters without a context are skipped to avoid starving
    506	 * them. Wait-Die waiters may die here. Wound-Wait waiters
    507	 * never die here, but they are sorted in stamp order and
    508	 * may wound the lock holder.
    509	 */
    510	for (cur = __ww_waiter_last(lock); cur;
    511	     cur = __ww_waiter_prev(lock, cur)) {
    512
    513		if (!cur->ww_ctx)
    514			continue;
    515
    516		if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
    517			/*
    518			 * Wait-Die: if we find an older context waiting, there
    519			 * is no point in queueing behind it, as we'd have to
    520			 * die the moment it would acquire the lock.
    521			 */
    522			if (is_wait_die) {
    523				int ret = __ww_mutex_kill(lock, ww_ctx);
    524
    525				if (ret)
    526					return ret;
    527			}
    528
    529			break;
    530		}
    531
    532		pos = cur;
    533
    534		/* Wait-Die: ensure younger waiters die. */
    535		__ww_mutex_die(lock, cur, ww_ctx);
    536	}
    537
    538	__ww_waiter_add(lock, waiter, pos);
    539
    540	/*
    541	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
    542	 * wound that such that we might proceed.
    543	 */
    544	if (!is_wait_die) {
    545		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
    546
    547		/*
    548		 * See ww_mutex_set_context_fastpath(). Orders setting
    549		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
    550		 * such that either we or the fastpath will wound @ww->ctx.
    551		 */
    552		smp_mb();
    553		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
    554	}
    555
    556	return 0;
    557}
    558
    559static inline void __ww_mutex_unlock(struct ww_mutex *lock)
    560{
    561	if (lock->ctx) {
    562#ifdef DEBUG_WW_MUTEXES
    563		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
    564#endif
    565		if (lock->ctx->acquired > 0)
    566			lock->ctx->acquired--;
    567		lock->ctx = NULL;
    568	}
    569}