cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ww_rt_mutex.c (2400B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * rtmutex API
      4 */
      5#include <linux/spinlock.h>
      6#include <linux/export.h>
      7
      8#define RT_MUTEX_BUILD_MUTEX
      9#define WW_RT
     10#include "rtmutex.c"
     11
     12int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
     13{
     14	struct rt_mutex *rtm = &lock->base;
     15
     16	if (!ww_ctx)
     17		return rt_mutex_trylock(rtm);
     18
     19	/*
     20	 * Reset the wounded flag after a kill. No other process can
     21	 * race and wound us here, since they can't have a valid owner
     22	 * pointer if we don't have any locks held.
     23	 */
     24	if (ww_ctx->acquired == 0)
     25		ww_ctx->wounded = 0;
     26
     27	if (__rt_mutex_trylock(&rtm->rtmutex)) {
     28		ww_mutex_set_context_fastpath(lock, ww_ctx);
     29		mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
     30		return 1;
     31	}
     32
     33	return 0;
     34}
     35EXPORT_SYMBOL(ww_mutex_trylock);
     36
     37static int __sched
     38__ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
     39		   unsigned int state, unsigned long ip)
     40{
     41	struct lockdep_map __maybe_unused *nest_lock = NULL;
     42	struct rt_mutex *rtm = &lock->base;
     43	int ret;
     44
     45	might_sleep();
     46
     47	if (ww_ctx) {
     48		if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
     49			return -EALREADY;
     50
     51		/*
     52		 * Reset the wounded flag after a kill. No other process can
     53		 * race and wound us here, since they can't have a valid owner
     54		 * pointer if we don't have any locks held.
     55		 */
     56		if (ww_ctx->acquired == 0)
     57			ww_ctx->wounded = 0;
     58
     59#ifdef CONFIG_DEBUG_LOCK_ALLOC
     60		nest_lock = &ww_ctx->dep_map;
     61#endif
     62	}
     63	mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
     64
     65	if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
     66		if (ww_ctx)
     67			ww_mutex_set_context_fastpath(lock, ww_ctx);
     68		return 0;
     69	}
     70
     71	ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
     72
     73	if (ret)
     74		mutex_release(&rtm->dep_map, ip);
     75	return ret;
     76}
     77
     78int __sched
     79ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     80{
     81	return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
     82}
     83EXPORT_SYMBOL(ww_mutex_lock);
     84
     85int __sched
     86ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     87{
     88	return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
     89}
     90EXPORT_SYMBOL(ww_mutex_lock_interruptible);
     91
     92void __sched ww_mutex_unlock(struct ww_mutex *lock)
     93{
     94	struct rt_mutex *rtm = &lock->base;
     95
     96	__ww_mutex_unlock(lock);
     97
     98	mutex_release(&rtm->dep_map, _RET_IP_);
     99	__rt_mutex_unlock(&rtm->rtmutex);
    100}
    101EXPORT_SYMBOL(ww_mutex_unlock);