cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sync.c (6347B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * RCU-based infrastructure for lightweight reader-writer locking
      4 *
      5 * Copyright (c) 2015, Red Hat, Inc.
      6 *
      7 * Author: Oleg Nesterov <oleg@redhat.com>
      8 */
      9
     10#include <linux/rcu_sync.h>
     11#include <linux/sched.h>
     12
     13enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };
     14
     15#define	rss_lock	gp_wait.lock
     16
     17/**
     18 * rcu_sync_init() - Initialize an rcu_sync structure
     19 * @rsp: Pointer to rcu_sync structure to be initialized
     20 */
     21void rcu_sync_init(struct rcu_sync *rsp)
     22{
     23	memset(rsp, 0, sizeof(*rsp));
     24	init_waitqueue_head(&rsp->gp_wait);
     25}
     26
     27/**
     28 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
     29 * @rsp: Pointer to rcu_sync structure to use for synchronization
     30 *
     31 * Must be called after rcu_sync_init() and before first use.
     32 *
     33 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
     34 * pairs turn into NO-OPs.
     35 */
     36void rcu_sync_enter_start(struct rcu_sync *rsp)
     37{
     38	rsp->gp_count++;
     39	rsp->gp_state = GP_PASSED;
     40}
     41
     42
     43static void rcu_sync_func(struct rcu_head *rhp);
     44
     45static void rcu_sync_call(struct rcu_sync *rsp)
     46{
     47	call_rcu(&rsp->cb_head, rcu_sync_func);
     48}
     49
     50/**
     51 * rcu_sync_func() - Callback function managing reader access to fastpath
     52 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
     53 *
     54 * This function is passed to call_rcu() function by rcu_sync_enter() and
     55 * rcu_sync_exit(), so that it is invoked after a grace period following the
     56 * that invocation of enter/exit.
     57 *
     58 * If it is called by rcu_sync_enter() it signals that all the readers were
     59 * switched onto slow path.
     60 *
     61 * If it is called by rcu_sync_exit() it takes action based on events that
     62 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
     63 * and rcu_sync_exit() pairs need not wait for a grace period.
     64 *
     65 * If another rcu_sync_enter() is invoked before the grace period
     66 * ended, reset state to allow the next rcu_sync_exit() to let the
     67 * readers back onto their fastpaths (after a grace period).  If both
     68 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
     69 * before the grace period ended, re-invoke call_rcu() on behalf of that
     70 * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
     71 * can again use their fastpaths.
     72 */
     73static void rcu_sync_func(struct rcu_head *rhp)
     74{
     75	struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
     76	unsigned long flags;
     77
     78	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
     79	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
     80
     81	spin_lock_irqsave(&rsp->rss_lock, flags);
     82	if (rsp->gp_count) {
     83		/*
     84		 * We're at least a GP after the GP_IDLE->GP_ENTER transition.
     85		 */
     86		WRITE_ONCE(rsp->gp_state, GP_PASSED);
     87		wake_up_locked(&rsp->gp_wait);
     88	} else if (rsp->gp_state == GP_REPLAY) {
     89		/*
     90		 * A new rcu_sync_exit() has happened; requeue the callback to
     91		 * catch a later GP.
     92		 */
     93		WRITE_ONCE(rsp->gp_state, GP_EXIT);
     94		rcu_sync_call(rsp);
     95	} else {
     96		/*
     97		 * We're at least a GP after the last rcu_sync_exit(); everybody
     98		 * will now have observed the write side critical section.
     99		 * Let 'em rip!
    100		 */
    101		WRITE_ONCE(rsp->gp_state, GP_IDLE);
    102	}
    103	spin_unlock_irqrestore(&rsp->rss_lock, flags);
    104}
    105
    106/**
    107 * rcu_sync_enter() - Force readers onto slowpath
    108 * @rsp: Pointer to rcu_sync structure to use for synchronization
    109 *
    110 * This function is used by updaters who need readers to make use of
    111 * a slowpath during the update.  After this function returns, all
    112 * subsequent calls to rcu_sync_is_idle() will return false, which
    113 * tells readers to stay off their fastpaths.  A later call to
    114 * rcu_sync_exit() re-enables reader fastpaths.
    115 *
    116 * When called in isolation, rcu_sync_enter() must wait for a grace
    117 * period, however, closely spaced calls to rcu_sync_enter() can
    118 * optimize away the grace-period wait via a state machine implemented
    119 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
    120 */
    121void rcu_sync_enter(struct rcu_sync *rsp)
    122{
    123	int gp_state;
    124
    125	spin_lock_irq(&rsp->rss_lock);
    126	gp_state = rsp->gp_state;
    127	if (gp_state == GP_IDLE) {
    128		WRITE_ONCE(rsp->gp_state, GP_ENTER);
    129		WARN_ON_ONCE(rsp->gp_count);
    130		/*
    131		 * Note that we could simply do rcu_sync_call(rsp) here and
    132		 * avoid the "if (gp_state == GP_IDLE)" block below.
    133		 *
    134		 * However, synchronize_rcu() can be faster if rcu_expedited
    135		 * or rcu_blocking_is_gp() is true.
    136		 *
    137		 * Another reason is that we can't wait for rcu callback if
    138		 * we are called at early boot time but this shouldn't happen.
    139		 */
    140	}
    141	rsp->gp_count++;
    142	spin_unlock_irq(&rsp->rss_lock);
    143
    144	if (gp_state == GP_IDLE) {
    145		/*
    146		 * See the comment above, this simply does the "synchronous"
    147		 * call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
    148		 */
    149		synchronize_rcu();
    150		rcu_sync_func(&rsp->cb_head);
    151		/* Not really needed, wait_event() would see GP_PASSED. */
    152		return;
    153	}
    154
    155	wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED);
    156}
    157
    158/**
    159 * rcu_sync_exit() - Allow readers back onto fast path after grace period
    160 * @rsp: Pointer to rcu_sync structure to use for synchronization
    161 *
    162 * This function is used by updaters who have completed, and can therefore
    163 * now allow readers to make use of their fastpaths after a grace period
    164 * has elapsed.  After this grace period has completed, all subsequent
    165 * calls to rcu_sync_is_idle() will return true, which tells readers that
    166 * they can once again use their fastpaths.
    167 */
    168void rcu_sync_exit(struct rcu_sync *rsp)
    169{
    170	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
    171	WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
    172
    173	spin_lock_irq(&rsp->rss_lock);
    174	if (!--rsp->gp_count) {
    175		if (rsp->gp_state == GP_PASSED) {
    176			WRITE_ONCE(rsp->gp_state, GP_EXIT);
    177			rcu_sync_call(rsp);
    178		} else if (rsp->gp_state == GP_EXIT) {
    179			WRITE_ONCE(rsp->gp_state, GP_REPLAY);
    180		}
    181	}
    182	spin_unlock_irq(&rsp->rss_lock);
    183}
    184
    185/**
    186 * rcu_sync_dtor() - Clean up an rcu_sync structure
    187 * @rsp: Pointer to rcu_sync structure to be cleaned up
    188 */
    189void rcu_sync_dtor(struct rcu_sync *rsp)
    190{
    191	int gp_state;
    192
    193	WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
    194	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
    195
    196	spin_lock_irq(&rsp->rss_lock);
    197	if (rsp->gp_state == GP_REPLAY)
    198		WRITE_ONCE(rsp->gp_state, GP_EXIT);
    199	gp_state = rsp->gp_state;
    200	spin_unlock_irq(&rsp->rss_lock);
    201
    202	if (gp_state != GP_IDLE) {
    203		rcu_barrier();
    204		WARN_ON_ONCE(rsp->gp_state != GP_IDLE);
    205	}
    206}