cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

update.c (18663B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * Read-Copy Update mechanism for mutual exclusion
      4 *
      5 * Copyright IBM Corporation, 2001
      6 *
      7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
      8 *	    Manfred Spraul <manfred@colorfullife.com>
      9 *
     10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
     11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
     12 * Papers:
     13 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
     14 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
     15 *
     16 * For detailed explanation of Read-Copy Update mechanism see -
     17 *		http://lse.sourceforge.net/locking/rcupdate.html
     18 *
     19 */
     20#include <linux/types.h>
     21#include <linux/kernel.h>
     22#include <linux/init.h>
     23#include <linux/spinlock.h>
     24#include <linux/smp.h>
     25#include <linux/interrupt.h>
     26#include <linux/sched/signal.h>
     27#include <linux/sched/debug.h>
     28#include <linux/atomic.h>
     29#include <linux/bitops.h>
     30#include <linux/percpu.h>
     31#include <linux/notifier.h>
     32#include <linux/cpu.h>
     33#include <linux/mutex.h>
     34#include <linux/export.h>
     35#include <linux/hardirq.h>
     36#include <linux/delay.h>
     37#include <linux/moduleparam.h>
     38#include <linux/kthread.h>
     39#include <linux/tick.h>
     40#include <linux/rcupdate_wait.h>
     41#include <linux/sched/isolation.h>
     42#include <linux/kprobes.h>
     43#include <linux/slab.h>
     44#include <linux/irq_work.h>
     45#include <linux/rcupdate_trace.h>
     46
     47#define CREATE_TRACE_POINTS
     48
     49#include "rcu.h"
     50
     51#ifdef MODULE_PARAM_PREFIX
     52#undef MODULE_PARAM_PREFIX
     53#endif
     54#define MODULE_PARAM_PREFIX "rcupdate."
     55
     56#ifndef CONFIG_TINY_RCU
     57module_param(rcu_expedited, int, 0444);
     58module_param(rcu_normal, int, 0444);
     59static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
     60#if !defined(CONFIG_PREEMPT_RT) || defined(CONFIG_NO_HZ_FULL)
     61module_param(rcu_normal_after_boot, int, 0444);
     62#endif
     63#endif /* #ifndef CONFIG_TINY_RCU */
     64
     65#ifdef CONFIG_DEBUG_LOCK_ALLOC
     66/**
     67 * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
     68 * @ret:	Best guess answer if lockdep cannot be relied on
     69 *
     70 * Returns true if lockdep must be ignored, in which case ``*ret`` contains
     71 * the best guess described below.  Otherwise returns false, in which
     72 * case ``*ret`` tells the caller nothing and the caller should instead
     73 * consult lockdep.
     74 *
     75 * If CONFIG_DEBUG_LOCK_ALLOC is selected, set ``*ret`` to nonzero iff in an
     76 * RCU-sched read-side critical section.  In absence of
     77 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
     78 * critical section unless it can prove otherwise.  Note that disabling
     79 * of preemption (including disabling irqs) counts as an RCU-sched
     80 * read-side critical section.  This is useful for debug checks in functions
     81 * that required that they be called within an RCU-sched read-side
     82 * critical section.
     83 *
     84 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
     85 * and while lockdep is disabled.
     86 *
     87 * Note that if the CPU is in the idle loop from an RCU point of view (ie:
     88 * that we are in the section between rcu_idle_enter() and rcu_idle_exit())
     89 * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an
     90 * rcu_read_lock().  The reason for this is that RCU ignores CPUs that are
     91 * in such a section, considering these as in extended quiescent state,
     92 * so such a CPU is effectively never in an RCU read-side critical section
     93 * regardless of what RCU primitives it invokes.  This state of affairs is
     94 * required --- we need to keep an RCU-free window in idle where the CPU may
     95 * possibly enter into low power mode. This way we can notice an extended
     96 * quiescent state to other CPUs that started a grace period. Otherwise
     97 * we would delay any grace period as long as we run in the idle task.
     98 *
     99 * Similarly, we avoid claiming an RCU read lock held if the current
    100 * CPU is offline.
    101 */
    102static bool rcu_read_lock_held_common(bool *ret)
    103{
    104	if (!debug_lockdep_rcu_enabled()) {
    105		*ret = true;
    106		return true;
    107	}
    108	if (!rcu_is_watching()) {
    109		*ret = false;
    110		return true;
    111	}
    112	if (!rcu_lockdep_current_cpu_online()) {
    113		*ret = false;
    114		return true;
    115	}
    116	return false;
    117}
    118
    119int rcu_read_lock_sched_held(void)
    120{
    121	bool ret;
    122
    123	if (rcu_read_lock_held_common(&ret))
    124		return ret;
    125	return lock_is_held(&rcu_sched_lock_map) || !preemptible();
    126}
    127EXPORT_SYMBOL(rcu_read_lock_sched_held);
    128#endif
    129
    130#ifndef CONFIG_TINY_RCU
    131
    132/*
    133 * Should expedited grace-period primitives always fall back to their
    134 * non-expedited counterparts?  Intended for use within RCU.  Note
    135 * that if the user specifies both rcu_expedited and rcu_normal, then
    136 * rcu_normal wins.  (Except during the time period during boot from
    137 * when the first task is spawned until the rcu_set_runtime_mode()
    138 * core_initcall() is invoked, at which point everything is expedited.)
    139 */
    140bool rcu_gp_is_normal(void)
    141{
    142	return READ_ONCE(rcu_normal) &&
    143	       rcu_scheduler_active != RCU_SCHEDULER_INIT;
    144}
    145EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
    146
    147static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
    148
    149/*
    150 * Should normal grace-period primitives be expedited?  Intended for
    151 * use within RCU.  Note that this function takes the rcu_expedited
    152 * sysfs/boot variable and rcu_scheduler_active into account as well
    153 * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
    154 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
    155 */
    156bool rcu_gp_is_expedited(void)
    157{
    158	return rcu_expedited || atomic_read(&rcu_expedited_nesting);
    159}
    160EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
    161
    162/**
    163 * rcu_expedite_gp - Expedite future RCU grace periods
    164 *
    165 * After a call to this function, future calls to synchronize_rcu() and
    166 * friends act as the corresponding synchronize_rcu_expedited() function
    167 * had instead been called.
    168 */
    169void rcu_expedite_gp(void)
    170{
    171	atomic_inc(&rcu_expedited_nesting);
    172}
    173EXPORT_SYMBOL_GPL(rcu_expedite_gp);
    174
    175/**
    176 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
    177 *
    178 * Undo a prior call to rcu_expedite_gp().  If all prior calls to
    179 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
    180 * and if the rcu_expedited sysfs/boot parameter is not set, then all
    181 * subsequent calls to synchronize_rcu() and friends will return to
    182 * their normal non-expedited behavior.
    183 */
    184void rcu_unexpedite_gp(void)
    185{
    186	atomic_dec(&rcu_expedited_nesting);
    187}
    188EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
    189
    190static bool rcu_boot_ended __read_mostly;
    191
    192/*
    193 * Inform RCU of the end of the in-kernel boot sequence.
    194 */
    195void rcu_end_inkernel_boot(void)
    196{
    197	rcu_unexpedite_gp();
    198	if (rcu_normal_after_boot)
    199		WRITE_ONCE(rcu_normal, 1);
    200	rcu_boot_ended = true;
    201}
    202
    203/*
    204 * Let rcutorture know when it is OK to turn it up to eleven.
    205 */
    206bool rcu_inkernel_boot_has_ended(void)
    207{
    208	return rcu_boot_ended;
    209}
    210EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
    211
    212#endif /* #ifndef CONFIG_TINY_RCU */
    213
    214/*
    215 * Test each non-SRCU synchronous grace-period wait API.  This is
    216 * useful just after a change in mode for these primitives, and
    217 * during early boot.
    218 */
    219void rcu_test_sync_prims(void)
    220{
    221	if (!IS_ENABLED(CONFIG_PROVE_RCU))
    222		return;
    223	synchronize_rcu();
    224	synchronize_rcu_expedited();
    225}
    226
    227#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
    228
    229/*
    230 * Switch to run-time mode once RCU has fully initialized.
    231 */
    232static int __init rcu_set_runtime_mode(void)
    233{
    234	rcu_test_sync_prims();
    235	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
    236	kfree_rcu_scheduler_running();
    237	rcu_test_sync_prims();
    238	return 0;
    239}
    240core_initcall(rcu_set_runtime_mode);
    241
    242#endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
    243
    244#ifdef CONFIG_DEBUG_LOCK_ALLOC
    245static struct lock_class_key rcu_lock_key;
    246struct lockdep_map rcu_lock_map = {
    247	.name = "rcu_read_lock",
    248	.key = &rcu_lock_key,
    249	.wait_type_outer = LD_WAIT_FREE,
    250	.wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT implies PREEMPT_RCU */
    251};
    252EXPORT_SYMBOL_GPL(rcu_lock_map);
    253
    254static struct lock_class_key rcu_bh_lock_key;
    255struct lockdep_map rcu_bh_lock_map = {
    256	.name = "rcu_read_lock_bh",
    257	.key = &rcu_bh_lock_key,
    258	.wait_type_outer = LD_WAIT_FREE,
    259	.wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
    260};
    261EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
    262
    263static struct lock_class_key rcu_sched_lock_key;
    264struct lockdep_map rcu_sched_lock_map = {
    265	.name = "rcu_read_lock_sched",
    266	.key = &rcu_sched_lock_key,
    267	.wait_type_outer = LD_WAIT_FREE,
    268	.wait_type_inner = LD_WAIT_SPIN,
    269};
    270EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
    271
    272// Tell lockdep when RCU callbacks are being invoked.
    273static struct lock_class_key rcu_callback_key;
    274struct lockdep_map rcu_callback_map =
    275	STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
    276EXPORT_SYMBOL_GPL(rcu_callback_map);
    277
    278noinstr int notrace debug_lockdep_rcu_enabled(void)
    279{
    280	return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
    281	       current->lockdep_recursion == 0;
    282}
    283EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
    284
    285/**
    286 * rcu_read_lock_held() - might we be in RCU read-side critical section?
    287 *
    288 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
    289 * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
    290 * this assumes we are in an RCU read-side critical section unless it can
    291 * prove otherwise.  This is useful for debug checks in functions that
    292 * require that they be called within an RCU read-side critical section.
    293 *
    294 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
    295 * and while lockdep is disabled.
    296 *
    297 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
    298 * occur in the same context, for example, it is illegal to invoke
    299 * rcu_read_unlock() in process context if the matching rcu_read_lock()
    300 * was invoked from within an irq handler.
    301 *
    302 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
    303 * offline from an RCU perspective, so check for those as well.
    304 */
    305int rcu_read_lock_held(void)
    306{
    307	bool ret;
    308
    309	if (rcu_read_lock_held_common(&ret))
    310		return ret;
    311	return lock_is_held(&rcu_lock_map);
    312}
    313EXPORT_SYMBOL_GPL(rcu_read_lock_held);
    314
    315/**
    316 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
    317 *
    318 * Check for bottom half being disabled, which covers both the
    319 * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
    320 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
    321 * will show the situation.  This is useful for debug checks in functions
    322 * that require that they be called within an RCU read-side critical
    323 * section.
    324 *
    325 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
    326 *
    327 * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
    328 * offline from an RCU perspective, so check for those as well.
    329 */
    330int rcu_read_lock_bh_held(void)
    331{
    332	bool ret;
    333
    334	if (rcu_read_lock_held_common(&ret))
    335		return ret;
    336	return in_softirq() || irqs_disabled();
    337}
    338EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
    339
    340int rcu_read_lock_any_held(void)
    341{
    342	bool ret;
    343
    344	if (rcu_read_lock_held_common(&ret))
    345		return ret;
    346	if (lock_is_held(&rcu_lock_map) ||
    347	    lock_is_held(&rcu_bh_lock_map) ||
    348	    lock_is_held(&rcu_sched_lock_map))
    349		return 1;
    350	return !preemptible();
    351}
    352EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
    353
    354#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
    355
    356/**
    357 * wakeme_after_rcu() - Callback function to awaken a task after grace period
    358 * @head: Pointer to rcu_head member within rcu_synchronize structure
    359 *
    360 * Awaken the corresponding task now that a grace period has elapsed.
    361 */
    362void wakeme_after_rcu(struct rcu_head *head)
    363{
    364	struct rcu_synchronize *rcu;
    365
    366	rcu = container_of(head, struct rcu_synchronize, head);
    367	complete(&rcu->completion);
    368}
    369EXPORT_SYMBOL_GPL(wakeme_after_rcu);
    370
    371void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
    372		   struct rcu_synchronize *rs_array)
    373{
    374	int i;
    375	int j;
    376
    377	/* Initialize and register callbacks for each crcu_array element. */
    378	for (i = 0; i < n; i++) {
    379		if (checktiny &&
    380		    (crcu_array[i] == call_rcu)) {
    381			might_sleep();
    382			continue;
    383		}
    384		for (j = 0; j < i; j++)
    385			if (crcu_array[j] == crcu_array[i])
    386				break;
    387		if (j == i) {
    388			init_rcu_head_on_stack(&rs_array[i].head);
    389			init_completion(&rs_array[i].completion);
    390			(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
    391		}
    392	}
    393
    394	/* Wait for all callbacks to be invoked. */
    395	for (i = 0; i < n; i++) {
    396		if (checktiny &&
    397		    (crcu_array[i] == call_rcu))
    398			continue;
    399		for (j = 0; j < i; j++)
    400			if (crcu_array[j] == crcu_array[i])
    401				break;
    402		if (j == i) {
    403			wait_for_completion(&rs_array[i].completion);
    404			destroy_rcu_head_on_stack(&rs_array[i].head);
    405		}
    406	}
    407}
    408EXPORT_SYMBOL_GPL(__wait_rcu_gp);
    409
    410void finish_rcuwait(struct rcuwait *w)
    411{
    412	rcu_assign_pointer(w->task, NULL);
    413	__set_current_state(TASK_RUNNING);
    414}
    415EXPORT_SYMBOL_GPL(finish_rcuwait);
    416
    417#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
    418void init_rcu_head(struct rcu_head *head)
    419{
    420	debug_object_init(head, &rcuhead_debug_descr);
    421}
    422EXPORT_SYMBOL_GPL(init_rcu_head);
    423
    424void destroy_rcu_head(struct rcu_head *head)
    425{
    426	debug_object_free(head, &rcuhead_debug_descr);
    427}
    428EXPORT_SYMBOL_GPL(destroy_rcu_head);
    429
    430static bool rcuhead_is_static_object(void *addr)
    431{
    432	return true;
    433}
    434
    435/**
    436 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
    437 * @head: pointer to rcu_head structure to be initialized
    438 *
    439 * This function informs debugobjects of a new rcu_head structure that
    440 * has been allocated as an auto variable on the stack.  This function
    441 * is not required for rcu_head structures that are statically defined or
    442 * that are dynamically allocated on the heap.  This function has no
    443 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
    444 */
    445void init_rcu_head_on_stack(struct rcu_head *head)
    446{
    447	debug_object_init_on_stack(head, &rcuhead_debug_descr);
    448}
    449EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
    450
    451/**
    452 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
    453 * @head: pointer to rcu_head structure to be initialized
    454 *
    455 * This function informs debugobjects that an on-stack rcu_head structure
    456 * is about to go out of scope.  As with init_rcu_head_on_stack(), this
    457 * function is not required for rcu_head structures that are statically
    458 * defined or that are dynamically allocated on the heap.  Also as with
    459 * init_rcu_head_on_stack(), this function has no effect for
    460 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
    461 */
    462void destroy_rcu_head_on_stack(struct rcu_head *head)
    463{
    464	debug_object_free(head, &rcuhead_debug_descr);
    465}
    466EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
    467
    468const struct debug_obj_descr rcuhead_debug_descr = {
    469	.name = "rcu_head",
    470	.is_static_object = rcuhead_is_static_object,
    471};
    472EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
    473#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
    474
    475#if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
    476void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
    477			       unsigned long secs,
    478			       unsigned long c_old, unsigned long c)
    479{
    480	trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
    481}
    482EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
    483#else
    484#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
    485	do { } while (0)
    486#endif
    487
    488#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
    489/* Get rcutorture access to sched_setaffinity(). */
    490long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
    491{
    492	int ret;
    493
    494	ret = sched_setaffinity(pid, in_mask);
    495	WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
    496	return ret;
    497}
    498EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
    499#endif
    500
    501#ifdef CONFIG_RCU_STALL_COMMON
    502int rcu_cpu_stall_ftrace_dump __read_mostly;
    503module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
    504int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
    505EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
    506module_param(rcu_cpu_stall_suppress, int, 0644);
    507int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
    508module_param(rcu_cpu_stall_timeout, int, 0644);
    509int rcu_exp_cpu_stall_timeout __read_mostly = CONFIG_RCU_EXP_CPU_STALL_TIMEOUT;
    510module_param(rcu_exp_cpu_stall_timeout, int, 0644);
    511#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
    512
    513// Suppress boot-time RCU CPU stall warnings and rcutorture writer stall
    514// warnings.  Also used by rcutorture even if stall warnings are excluded.
    515int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls.
    516EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
    517module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
    518
    519#ifdef CONFIG_PROVE_RCU
    520
    521/*
    522 * Early boot self test parameters.
    523 */
    524static bool rcu_self_test;
    525module_param(rcu_self_test, bool, 0444);
    526
    527static int rcu_self_test_counter;
    528
    529static void test_callback(struct rcu_head *r)
    530{
    531	rcu_self_test_counter++;
    532	pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
    533}
    534
    535DEFINE_STATIC_SRCU(early_srcu);
    536static unsigned long early_srcu_cookie;
    537
    538struct early_boot_kfree_rcu {
    539	struct rcu_head rh;
    540};
    541
    542static void early_boot_test_call_rcu(void)
    543{
    544	static struct rcu_head head;
    545	static struct rcu_head shead;
    546	struct early_boot_kfree_rcu *rhp;
    547
    548	call_rcu(&head, test_callback);
    549	if (IS_ENABLED(CONFIG_SRCU)) {
    550		early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
    551		call_srcu(&early_srcu, &shead, test_callback);
    552	}
    553	rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
    554	if (!WARN_ON_ONCE(!rhp))
    555		kfree_rcu(rhp, rh);
    556}
    557
    558void rcu_early_boot_tests(void)
    559{
    560	pr_info("Running RCU self tests\n");
    561
    562	if (rcu_self_test)
    563		early_boot_test_call_rcu();
    564	rcu_test_sync_prims();
    565}
    566
    567static int rcu_verify_early_boot_tests(void)
    568{
    569	int ret = 0;
    570	int early_boot_test_counter = 0;
    571
    572	if (rcu_self_test) {
    573		early_boot_test_counter++;
    574		rcu_barrier();
    575		if (IS_ENABLED(CONFIG_SRCU)) {
    576			early_boot_test_counter++;
    577			srcu_barrier(&early_srcu);
    578			WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie));
    579		}
    580	}
    581	if (rcu_self_test_counter != early_boot_test_counter) {
    582		WARN_ON(1);
    583		ret = -1;
    584	}
    585
    586	return ret;
    587}
    588late_initcall(rcu_verify_early_boot_tests);
    589#else
    590void rcu_early_boot_tests(void) {}
    591#endif /* CONFIG_PROVE_RCU */
    592
    593#include "tasks.h"
    594
    595#ifndef CONFIG_TINY_RCU
    596
    597/*
    598 * Print any significant non-default boot-time settings.
    599 */
    600void __init rcupdate_announce_bootup_oddness(void)
    601{
    602	if (rcu_normal)
    603		pr_info("\tNo expedited grace period (rcu_normal).\n");
    604	else if (rcu_normal_after_boot)
    605		pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
    606	else if (rcu_expedited)
    607		pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
    608	if (rcu_cpu_stall_suppress)
    609		pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
    610	if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
    611		pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
    612	rcu_tasks_bootup_oddness();
    613}
    614
    615#endif /* #ifndef CONFIG_TINY_RCU */