cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

osq_lock.c (5958B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/percpu.h>
      3#include <linux/sched.h>
      4#include <linux/osq_lock.h>
      5
      6/*
      7 * An MCS like lock especially tailored for optimistic spinning for sleeping
      8 * lock implementations (mutex, rwsem, etc).
      9 *
     10 * Using a single mcs node per CPU is safe because sleeping locks should not be
     11 * called from interrupt context and we have preemption disabled while
     12 * spinning.
     13 */
     14static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
     15
     16/*
     17 * We use the value 0 to represent "no CPU", thus the encoded value
     18 * will be the CPU number incremented by 1.
     19 */
     20static inline int encode_cpu(int cpu_nr)
     21{
     22	return cpu_nr + 1;
     23}
     24
     25static inline int node_cpu(struct optimistic_spin_node *node)
     26{
     27	return node->cpu - 1;
     28}
     29
     30static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
     31{
     32	int cpu_nr = encoded_cpu_val - 1;
     33
     34	return per_cpu_ptr(&osq_node, cpu_nr);
     35}
     36
     37/*
     38 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
     39 * Can return NULL in case we were the last queued and we updated @lock instead.
     40 */
     41static inline struct optimistic_spin_node *
     42osq_wait_next(struct optimistic_spin_queue *lock,
     43	      struct optimistic_spin_node *node,
     44	      struct optimistic_spin_node *prev)
     45{
     46	struct optimistic_spin_node *next = NULL;
     47	int curr = encode_cpu(smp_processor_id());
     48	int old;
     49
     50	/*
     51	 * If there is a prev node in queue, then the 'old' value will be
     52	 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
     53	 * we're currently last in queue, then the queue will then become empty.
     54	 */
     55	old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
     56
     57	for (;;) {
     58		if (atomic_read(&lock->tail) == curr &&
     59		    atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
     60			/*
     61			 * We were the last queued, we moved @lock back. @prev
     62			 * will now observe @lock and will complete its
     63			 * unlock()/unqueue().
     64			 */
     65			break;
     66		}
     67
     68		/*
     69		 * We must xchg() the @node->next value, because if we were to
     70		 * leave it in, a concurrent unlock()/unqueue() from
     71		 * @node->next might complete Step-A and think its @prev is
     72		 * still valid.
     73		 *
     74		 * If the concurrent unlock()/unqueue() wins the race, we'll
     75		 * wait for either @lock to point to us, through its Step-B, or
     76		 * wait for a new @node->next from its Step-C.
     77		 */
     78		if (node->next) {
     79			next = xchg(&node->next, NULL);
     80			if (next)
     81				break;
     82		}
     83
     84		cpu_relax();
     85	}
     86
     87	return next;
     88}
     89
     90bool osq_lock(struct optimistic_spin_queue *lock)
     91{
     92	struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
     93	struct optimistic_spin_node *prev, *next;
     94	int curr = encode_cpu(smp_processor_id());
     95	int old;
     96
     97	node->locked = 0;
     98	node->next = NULL;
     99	node->cpu = curr;
    100
    101	/*
    102	 * We need both ACQUIRE (pairs with corresponding RELEASE in
    103	 * unlock() uncontended, or fastpath) and RELEASE (to publish
    104	 * the node fields we just initialised) semantics when updating
    105	 * the lock tail.
    106	 */
    107	old = atomic_xchg(&lock->tail, curr);
    108	if (old == OSQ_UNLOCKED_VAL)
    109		return true;
    110
    111	prev = decode_cpu(old);
    112	node->prev = prev;
    113
    114	/*
    115	 * osq_lock()			unqueue
    116	 *
    117	 * node->prev = prev		osq_wait_next()
    118	 * WMB				MB
    119	 * prev->next = node		next->prev = prev // unqueue-C
    120	 *
    121	 * Here 'node->prev' and 'next->prev' are the same variable and we need
    122	 * to ensure these stores happen in-order to avoid corrupting the list.
    123	 */
    124	smp_wmb();
    125
    126	WRITE_ONCE(prev->next, node);
    127
    128	/*
    129	 * Normally @prev is untouchable after the above store; because at that
    130	 * moment unlock can proceed and wipe the node element from stack.
    131	 *
    132	 * However, since our nodes are static per-cpu storage, we're
    133	 * guaranteed their existence -- this allows us to apply
    134	 * cmpxchg in an attempt to undo our queueing.
    135	 */
    136
    137	/*
    138	 * Wait to acquire the lock or cancellation. Note that need_resched()
    139	 * will come with an IPI, which will wake smp_cond_load_relaxed() if it
    140	 * is implemented with a monitor-wait. vcpu_is_preempted() relies on
    141	 * polling, be careful.
    142	 */
    143	if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
    144				  vcpu_is_preempted(node_cpu(node->prev))))
    145		return true;
    146
    147	/* unqueue */
    148	/*
    149	 * Step - A  -- stabilize @prev
    150	 *
    151	 * Undo our @prev->next assignment; this will make @prev's
    152	 * unlock()/unqueue() wait for a next pointer since @lock points to us
    153	 * (or later).
    154	 */
    155
    156	for (;;) {
    157		/*
    158		 * cpu_relax() below implies a compiler barrier which would
    159		 * prevent this comparison being optimized away.
    160		 */
    161		if (data_race(prev->next) == node &&
    162		    cmpxchg(&prev->next, node, NULL) == node)
    163			break;
    164
    165		/*
    166		 * We can only fail the cmpxchg() racing against an unlock(),
    167		 * in which case we should observe @node->locked becoming
    168		 * true.
    169		 */
    170		if (smp_load_acquire(&node->locked))
    171			return true;
    172
    173		cpu_relax();
    174
    175		/*
    176		 * Or we race against a concurrent unqueue()'s step-B, in which
    177		 * case its step-C will write us a new @node->prev pointer.
    178		 */
    179		prev = READ_ONCE(node->prev);
    180	}
    181
    182	/*
    183	 * Step - B -- stabilize @next
    184	 *
    185	 * Similar to unlock(), wait for @node->next or move @lock from @node
    186	 * back to @prev.
    187	 */
    188
    189	next = osq_wait_next(lock, node, prev);
    190	if (!next)
    191		return false;
    192
    193	/*
    194	 * Step - C -- unlink
    195	 *
    196	 * @prev is stable because its still waiting for a new @prev->next
    197	 * pointer, @next is stable because our @node->next pointer is NULL and
    198	 * it will wait in Step-A.
    199	 */
    200
    201	WRITE_ONCE(next->prev, prev);
    202	WRITE_ONCE(prev->next, next);
    203
    204	return false;
    205}
    206
    207void osq_unlock(struct optimistic_spin_queue *lock)
    208{
    209	struct optimistic_spin_node *node, *next;
    210	int curr = encode_cpu(smp_processor_id());
    211
    212	/*
    213	 * Fast path for the uncontended case.
    214	 */
    215	if (likely(atomic_cmpxchg_release(&lock->tail, curr,
    216					  OSQ_UNLOCKED_VAL) == curr))
    217		return;
    218
    219	/*
    220	 * Second most likely case.
    221	 */
    222	node = this_cpu_ptr(&osq_node);
    223	next = xchg(&node->next, NULL);
    224	if (next) {
    225		WRITE_ONCE(next->locked, 1);
    226		return;
    227	}
    228
    229	next = osq_wait_next(lock, node, NULL);
    230	if (next)
    231		WRITE_ONCE(next->locked, 1);
    232}