cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

seqlock.h (38820B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __LINUX_SEQLOCK_H
      3#define __LINUX_SEQLOCK_H
      4
      5/*
      6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
      7 * lockless readers (read-only retry loops), and no writer starvation.
      8 *
      9 * See Documentation/locking/seqlock.rst
     10 *
     11 * Copyrights:
     12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
     13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
     14 */
     15
     16#include <linux/compiler.h>
     17#include <linux/kcsan-checks.h>
     18#include <linux/lockdep.h>
     19#include <linux/mutex.h>
     20#include <linux/preempt.h>
     21#include <linux/spinlock.h>
     22
     23#include <asm/processor.h>
     24
     25/*
     26 * The seqlock seqcount_t interface does not prescribe a precise sequence of
     27 * read begin/retry/end. For readers, typically there is a call to
     28 * read_seqcount_begin() and read_seqcount_retry(), however, there are more
     29 * esoteric cases which do not follow this pattern.
     30 *
     31 * As a consequence, we take the following best-effort approach for raw usage
     32 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
     33 * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
     34 * atomics; if there is a matching read_seqcount_retry() call, no following
     35 * memory operations are considered atomic. Usage of the seqlock_t interface
     36 * is not affected.
     37 */
     38#define KCSAN_SEQLOCK_REGION_MAX 1000
     39
     40/*
     41 * Sequence counters (seqcount_t)
     42 *
     43 * This is the raw counting mechanism, without any writer protection.
     44 *
     45 * Write side critical sections must be serialized and non-preemptible.
     46 *
     47 * If readers can be invoked from hardirq or softirq contexts,
     48 * interrupts or bottom halves must also be respectively disabled before
     49 * entering the write section.
     50 *
     51 * This mechanism can't be used if the protected data contains pointers,
     52 * as the writer can invalidate a pointer that a reader is following.
     53 *
     54 * If the write serialization mechanism is one of the common kernel
     55 * locking primitives, use a sequence counter with associated lock
     56 * (seqcount_LOCKNAME_t) instead.
     57 *
     58 * If it's desired to automatically handle the sequence counter writer
     59 * serialization and non-preemptibility requirements, use a sequential
     60 * lock (seqlock_t) instead.
     61 *
     62 * See Documentation/locking/seqlock.rst
     63 */
     64typedef struct seqcount {
     65	unsigned sequence;
     66#ifdef CONFIG_DEBUG_LOCK_ALLOC
     67	struct lockdep_map dep_map;
     68#endif
     69} seqcount_t;
     70
     71static inline void __seqcount_init(seqcount_t *s, const char *name,
     72					  struct lock_class_key *key)
     73{
     74	/*
     75	 * Make sure we are not reinitializing a held lock:
     76	 */
     77	lockdep_init_map(&s->dep_map, name, key, 0);
     78	s->sequence = 0;
     79}
     80
     81#ifdef CONFIG_DEBUG_LOCK_ALLOC
     82
     83# define SEQCOUNT_DEP_MAP_INIT(lockname)				\
     84		.dep_map = { .name = #lockname }
     85
     86/**
     87 * seqcount_init() - runtime initializer for seqcount_t
     88 * @s: Pointer to the seqcount_t instance
     89 */
     90# define seqcount_init(s)						\
     91	do {								\
     92		static struct lock_class_key __key;			\
     93		__seqcount_init((s), #s, &__key);			\
     94	} while (0)
     95
     96static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
     97{
     98	seqcount_t *l = (seqcount_t *)s;
     99	unsigned long flags;
    100
    101	local_irq_save(flags);
    102	seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
    103	seqcount_release(&l->dep_map, _RET_IP_);
    104	local_irq_restore(flags);
    105}
    106
    107#else
    108# define SEQCOUNT_DEP_MAP_INIT(lockname)
    109# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
    110# define seqcount_lockdep_reader_access(x)
    111#endif
    112
    113/**
    114 * SEQCNT_ZERO() - static initializer for seqcount_t
    115 * @name: Name of the seqcount_t instance
    116 */
    117#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
    118
    119/*
    120 * Sequence counters with associated locks (seqcount_LOCKNAME_t)
    121 *
    122 * A sequence counter which associates the lock used for writer
    123 * serialization at initialization time. This enables lockdep to validate
    124 * that the write side critical section is properly serialized.
    125 *
    126 * For associated locks which do not implicitly disable preemption,
    127 * preemption protection is enforced in the write side function.
    128 *
    129 * Lockdep is never used in any for the raw write variants.
    130 *
    131 * See Documentation/locking/seqlock.rst
    132 */
    133
    134/*
    135 * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
    136 * disable preemption. It can lead to higher latencies, and the write side
    137 * sections will not be able to acquire locks which become sleeping locks
    138 * (e.g. spinlock_t).
    139 *
    140 * To remain preemptible while avoiding a possible livelock caused by the
    141 * reader preempting the writer, use a different technique: let the reader
    142 * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
    143 * case, acquire then release the associated LOCKNAME writer serialization
    144 * lock. This will allow any possibly-preempted writer to make progress
    145 * until the end of its writer serialization lock critical section.
    146 *
    147 * This lock-unlock technique must be implemented for all of PREEMPT_RT
    148 * sleeping locks.  See Documentation/locking/locktypes.rst
    149 */
    150#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
    151#define __SEQ_LOCK(expr)	expr
    152#else
    153#define __SEQ_LOCK(expr)
    154#endif
    155
    156/*
    157 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
    158 * @seqcount:	The real sequence counter
    159 * @lock:	Pointer to the associated lock
    160 *
    161 * A plain sequence counter with external writer synchronization by
    162 * LOCKNAME @lock. The lock is associated to the sequence counter in the
    163 * static initializer or init function. This enables lockdep to validate
    164 * that the write side critical section is properly serialized.
    165 *
    166 * LOCKNAME:	raw_spinlock, spinlock, rwlock or mutex
    167 */
    168
    169/*
    170 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
    171 * @s:		Pointer to the seqcount_LOCKNAME_t instance
    172 * @lock:	Pointer to the associated lock
    173 */
    174
    175#define seqcount_LOCKNAME_init(s, _lock, lockname)			\
    176	do {								\
    177		seqcount_##lockname##_t *____s = (s);			\
    178		seqcount_init(&____s->seqcount);			\
    179		__SEQ_LOCK(____s->lock = (_lock));			\
    180	} while (0)
    181
    182#define seqcount_raw_spinlock_init(s, lock)	seqcount_LOCKNAME_init(s, lock, raw_spinlock)
    183#define seqcount_spinlock_init(s, lock)		seqcount_LOCKNAME_init(s, lock, spinlock)
    184#define seqcount_rwlock_init(s, lock)		seqcount_LOCKNAME_init(s, lock, rwlock)
    185#define seqcount_mutex_init(s, lock)		seqcount_LOCKNAME_init(s, lock, mutex)
    186
    187/*
    188 * SEQCOUNT_LOCKNAME()	- Instantiate seqcount_LOCKNAME_t and helpers
    189 * seqprop_LOCKNAME_*()	- Property accessors for seqcount_LOCKNAME_t
    190 *
    191 * @lockname:		"LOCKNAME" part of seqcount_LOCKNAME_t
    192 * @locktype:		LOCKNAME canonical C data type
    193 * @preemptible:	preemptibility of above locktype
    194 * @lockmember:		argument for lockdep_assert_held()
    195 * @lockbase:		associated lock release function (prefix only)
    196 * @lock_acquire:	associated lock acquisition function (full call)
    197 */
    198#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
    199typedef struct seqcount_##lockname {					\
    200	seqcount_t		seqcount;				\
    201	__SEQ_LOCK(locktype	*lock);					\
    202} seqcount_##lockname##_t;						\
    203									\
    204static __always_inline seqcount_t *					\
    205__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s)			\
    206{									\
    207	return &s->seqcount;						\
    208}									\
    209									\
    210static __always_inline unsigned						\
    211__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s)	\
    212{									\
    213	unsigned seq = READ_ONCE(s->seqcount.sequence);			\
    214									\
    215	if (!IS_ENABLED(CONFIG_PREEMPT_RT))				\
    216		return seq;						\
    217									\
    218	if (preemptible && unlikely(seq & 1)) {				\
    219		__SEQ_LOCK(lock_acquire);				\
    220		__SEQ_LOCK(lockbase##_unlock(s->lock));			\
    221									\
    222		/*							\
    223		 * Re-read the sequence counter since the (possibly	\
    224		 * preempted) writer made progress.			\
    225		 */							\
    226		seq = READ_ONCE(s->seqcount.sequence);			\
    227	}								\
    228									\
    229	return seq;							\
    230}									\
    231									\
    232static __always_inline bool						\
    233__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s)	\
    234{									\
    235	if (!IS_ENABLED(CONFIG_PREEMPT_RT))				\
    236		return preemptible;					\
    237									\
    238	/* PREEMPT_RT relies on the above LOCK+UNLOCK */		\
    239	return false;							\
    240}									\
    241									\
    242static __always_inline void						\
    243__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s)		\
    244{									\
    245	__SEQ_LOCK(lockdep_assert_held(lockmember));			\
    246}
    247
    248/*
    249 * __seqprop() for seqcount_t
    250 */
    251
    252static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
    253{
    254	return s;
    255}
    256
    257static inline unsigned __seqprop_sequence(const seqcount_t *s)
    258{
    259	return READ_ONCE(s->sequence);
    260}
    261
    262static inline bool __seqprop_preemptible(const seqcount_t *s)
    263{
    264	return false;
    265}
    266
    267static inline void __seqprop_assert(const seqcount_t *s)
    268{
    269	lockdep_assert_preemption_disabled();
    270}
    271
    272#define __SEQ_RT	IS_ENABLED(CONFIG_PREEMPT_RT)
    273
    274SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t,  false,    s->lock,        raw_spin, raw_spin_lock(s->lock))
    275SEQCOUNT_LOCKNAME(spinlock,     spinlock_t,      __SEQ_RT, s->lock,        spin,     spin_lock(s->lock))
    276SEQCOUNT_LOCKNAME(rwlock,       rwlock_t,        __SEQ_RT, s->lock,        read,     read_lock(s->lock))
    277SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     s->lock,        mutex,    mutex_lock(s->lock))
    278
    279/*
    280 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
    281 * @name:	Name of the seqcount_LOCKNAME_t instance
    282 * @lock:	Pointer to the associated LOCKNAME
    283 */
    284
    285#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) {			\
    286	.seqcount		= SEQCNT_ZERO(seq_name.seqcount),	\
    287	__SEQ_LOCK(.lock	= (assoc_lock))				\
    288}
    289
    290#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKNAME_ZERO(name, lock)
    291#define SEQCNT_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKNAME_ZERO(name, lock)
    292#define SEQCNT_RWLOCK_ZERO(name, lock)		SEQCOUNT_LOCKNAME_ZERO(name, lock)
    293#define SEQCNT_MUTEX_ZERO(name, lock)		SEQCOUNT_LOCKNAME_ZERO(name, lock)
    294#define SEQCNT_WW_MUTEX_ZERO(name, lock) 	SEQCOUNT_LOCKNAME_ZERO(name, lock)
    295
    296#define __seqprop_case(s, lockname, prop)				\
    297	seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
    298
    299#define __seqprop(s, prop) _Generic(*(s),				\
    300	seqcount_t:		__seqprop_##prop((void *)(s)),		\
    301	__seqprop_case((s),	raw_spinlock,	prop),			\
    302	__seqprop_case((s),	spinlock,	prop),			\
    303	__seqprop_case((s),	rwlock,		prop),			\
    304	__seqprop_case((s),	mutex,		prop))
    305
    306#define seqprop_ptr(s)			__seqprop(s, ptr)
    307#define seqprop_sequence(s)		__seqprop(s, sequence)
    308#define seqprop_preemptible(s)		__seqprop(s, preemptible)
    309#define seqprop_assert(s)		__seqprop(s, assert)
    310
    311/**
    312 * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
    313 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    314 *
    315 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
    316 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
    317 * provided before actually loading any of the variables that are to be
    318 * protected in this critical section.
    319 *
    320 * Use carefully, only in critical code, and comment how the barrier is
    321 * provided.
    322 *
    323 * Return: count to be passed to read_seqcount_retry()
    324 */
    325#define __read_seqcount_begin(s)					\
    326({									\
    327	unsigned __seq;							\
    328									\
    329	while ((__seq = seqprop_sequence(s)) & 1)			\
    330		cpu_relax();						\
    331									\
    332	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);			\
    333	__seq;								\
    334})
    335
    336/**
    337 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
    338 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    339 *
    340 * Return: count to be passed to read_seqcount_retry()
    341 */
    342#define raw_read_seqcount_begin(s)					\
    343({									\
    344	unsigned _seq = __read_seqcount_begin(s);			\
    345									\
    346	smp_rmb();							\
    347	_seq;								\
    348})
    349
    350/**
    351 * read_seqcount_begin() - begin a seqcount_t read critical section
    352 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    353 *
    354 * Return: count to be passed to read_seqcount_retry()
    355 */
    356#define read_seqcount_begin(s)						\
    357({									\
    358	seqcount_lockdep_reader_access(seqprop_ptr(s));			\
    359	raw_read_seqcount_begin(s);					\
    360})
    361
    362/**
    363 * raw_read_seqcount() - read the raw seqcount_t counter value
    364 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    365 *
    366 * raw_read_seqcount opens a read critical section of the given
    367 * seqcount_t, without any lockdep checking, and without checking or
    368 * masking the sequence counter LSB. Calling code is responsible for
    369 * handling that.
    370 *
    371 * Return: count to be passed to read_seqcount_retry()
    372 */
    373#define raw_read_seqcount(s)						\
    374({									\
    375	unsigned __seq = seqprop_sequence(s);				\
    376									\
    377	smp_rmb();							\
    378	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);			\
    379	__seq;								\
    380})
    381
    382/**
    383 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
    384 *                        lockdep and w/o counter stabilization
    385 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    386 *
    387 * raw_seqcount_begin opens a read critical section of the given
    388 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
    389 * for the count to stabilize. If a writer is active when it begins, it
    390 * will fail the read_seqcount_retry() at the end of the read critical
    391 * section instead of stabilizing at the beginning of it.
    392 *
    393 * Use this only in special kernel hot paths where the read section is
    394 * small and has a high probability of success through other external
    395 * means. It will save a single branching instruction.
    396 *
    397 * Return: count to be passed to read_seqcount_retry()
    398 */
    399#define raw_seqcount_begin(s)						\
    400({									\
    401	/*								\
    402	 * If the counter is odd, let read_seqcount_retry() fail	\
    403	 * by decrementing the counter.					\
    404	 */								\
    405	raw_read_seqcount(s) & ~1;					\
    406})
    407
    408/**
    409 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
    410 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    411 * @start: count, from read_seqcount_begin()
    412 *
    413 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
    414 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
    415 * provided before actually loading any of the variables that are to be
    416 * protected in this critical section.
    417 *
    418 * Use carefully, only in critical code, and comment how the barrier is
    419 * provided.
    420 *
    421 * Return: true if a read section retry is required, else false
    422 */
    423#define __read_seqcount_retry(s, start)					\
    424	do___read_seqcount_retry(seqprop_ptr(s), start)
    425
    426static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
    427{
    428	kcsan_atomic_next(0);
    429	return unlikely(READ_ONCE(s->sequence) != start);
    430}
    431
    432/**
    433 * read_seqcount_retry() - end a seqcount_t read critical section
    434 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    435 * @start: count, from read_seqcount_begin()
    436 *
    437 * read_seqcount_retry closes the read critical section of given
    438 * seqcount_t.  If the critical section was invalid, it must be ignored
    439 * (and typically retried).
    440 *
    441 * Return: true if a read section retry is required, else false
    442 */
    443#define read_seqcount_retry(s, start)					\
    444	do_read_seqcount_retry(seqprop_ptr(s), start)
    445
    446static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
    447{
    448	smp_rmb();
    449	return do___read_seqcount_retry(s, start);
    450}
    451
    452/**
    453 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
    454 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    455 *
    456 * Context: check write_seqcount_begin()
    457 */
    458#define raw_write_seqcount_begin(s)					\
    459do {									\
    460	if (seqprop_preemptible(s))					\
    461		preempt_disable();					\
    462									\
    463	do_raw_write_seqcount_begin(seqprop_ptr(s));			\
    464} while (0)
    465
    466static inline void do_raw_write_seqcount_begin(seqcount_t *s)
    467{
    468	kcsan_nestable_atomic_begin();
    469	s->sequence++;
    470	smp_wmb();
    471}
    472
    473/**
    474 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
    475 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    476 *
    477 * Context: check write_seqcount_end()
    478 */
    479#define raw_write_seqcount_end(s)					\
    480do {									\
    481	do_raw_write_seqcount_end(seqprop_ptr(s));			\
    482									\
    483	if (seqprop_preemptible(s))					\
    484		preempt_enable();					\
    485} while (0)
    486
    487static inline void do_raw_write_seqcount_end(seqcount_t *s)
    488{
    489	smp_wmb();
    490	s->sequence++;
    491	kcsan_nestable_atomic_end();
    492}
    493
    494/**
    495 * write_seqcount_begin_nested() - start a seqcount_t write section with
    496 *                                 custom lockdep nesting level
    497 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    498 * @subclass: lockdep nesting level
    499 *
    500 * See Documentation/locking/lockdep-design.rst
    501 * Context: check write_seqcount_begin()
    502 */
    503#define write_seqcount_begin_nested(s, subclass)			\
    504do {									\
    505	seqprop_assert(s);						\
    506									\
    507	if (seqprop_preemptible(s))					\
    508		preempt_disable();					\
    509									\
    510	do_write_seqcount_begin_nested(seqprop_ptr(s), subclass);	\
    511} while (0)
    512
    513static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
    514{
    515	do_raw_write_seqcount_begin(s);
    516	seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
    517}
    518
    519/**
    520 * write_seqcount_begin() - start a seqcount_t write side critical section
    521 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    522 *
    523 * Context: sequence counter write side sections must be serialized and
    524 * non-preemptible. Preemption will be automatically disabled if and
    525 * only if the seqcount write serialization lock is associated, and
    526 * preemptible.  If readers can be invoked from hardirq or softirq
    527 * context, interrupts or bottom halves must be respectively disabled.
    528 */
    529#define write_seqcount_begin(s)						\
    530do {									\
    531	seqprop_assert(s);						\
    532									\
    533	if (seqprop_preemptible(s))					\
    534		preempt_disable();					\
    535									\
    536	do_write_seqcount_begin(seqprop_ptr(s));			\
    537} while (0)
    538
    539static inline void do_write_seqcount_begin(seqcount_t *s)
    540{
    541	do_write_seqcount_begin_nested(s, 0);
    542}
    543
    544/**
    545 * write_seqcount_end() - end a seqcount_t write side critical section
    546 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    547 *
    548 * Context: Preemption will be automatically re-enabled if and only if
    549 * the seqcount write serialization lock is associated, and preemptible.
    550 */
    551#define write_seqcount_end(s)						\
    552do {									\
    553	do_write_seqcount_end(seqprop_ptr(s));				\
    554									\
    555	if (seqprop_preemptible(s))					\
    556		preempt_enable();					\
    557} while (0)
    558
    559static inline void do_write_seqcount_end(seqcount_t *s)
    560{
    561	seqcount_release(&s->dep_map, _RET_IP_);
    562	do_raw_write_seqcount_end(s);
    563}
    564
    565/**
    566 * raw_write_seqcount_barrier() - do a seqcount_t write barrier
    567 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    568 *
    569 * This can be used to provide an ordering guarantee instead of the usual
    570 * consistency guarantee. It is one wmb cheaper, because it can collapse
    571 * the two back-to-back wmb()s.
    572 *
    573 * Note that writes surrounding the barrier should be declared atomic (e.g.
    574 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
    575 * atomically, avoiding compiler optimizations; b) to document which writes are
    576 * meant to propagate to the reader critical section. This is necessary because
    577 * neither writes before and after the barrier are enclosed in a seq-writer
    578 * critical section that would ensure readers are aware of ongoing writes::
    579 *
    580 *	seqcount_t seq;
    581 *	bool X = true, Y = false;
    582 *
    583 *	void read(void)
    584 *	{
    585 *		bool x, y;
    586 *
    587 *		do {
    588 *			int s = read_seqcount_begin(&seq);
    589 *
    590 *			x = X; y = Y;
    591 *
    592 *		} while (read_seqcount_retry(&seq, s));
    593 *
    594 *		BUG_ON(!x && !y);
    595 *      }
    596 *
    597 *      void write(void)
    598 *      {
    599 *		WRITE_ONCE(Y, true);
    600 *
    601 *		raw_write_seqcount_barrier(seq);
    602 *
    603 *		WRITE_ONCE(X, false);
    604 *      }
    605 */
    606#define raw_write_seqcount_barrier(s)					\
    607	do_raw_write_seqcount_barrier(seqprop_ptr(s))
    608
    609static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
    610{
    611	kcsan_nestable_atomic_begin();
    612	s->sequence++;
    613	smp_wmb();
    614	s->sequence++;
    615	kcsan_nestable_atomic_end();
    616}
    617
    618/**
    619 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
    620 *                               side operations
    621 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
    622 *
    623 * After write_seqcount_invalidate, no seqcount_t read side operations
    624 * will complete successfully and see data older than this.
    625 */
    626#define write_seqcount_invalidate(s)					\
    627	do_write_seqcount_invalidate(seqprop_ptr(s))
    628
    629static inline void do_write_seqcount_invalidate(seqcount_t *s)
    630{
    631	smp_wmb();
    632	kcsan_nestable_atomic_begin();
    633	s->sequence+=2;
    634	kcsan_nestable_atomic_end();
    635}
    636
    637/*
    638 * Latch sequence counters (seqcount_latch_t)
    639 *
    640 * A sequence counter variant where the counter even/odd value is used to
    641 * switch between two copies of protected data. This allows the read path,
    642 * typically NMIs, to safely interrupt the write side critical section.
    643 *
    644 * As the write sections are fully preemptible, no special handling for
    645 * PREEMPT_RT is needed.
    646 */
    647typedef struct {
    648	seqcount_t seqcount;
    649} seqcount_latch_t;
    650
    651/**
    652 * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
    653 * @seq_name: Name of the seqcount_latch_t instance
    654 */
    655#define SEQCNT_LATCH_ZERO(seq_name) {					\
    656	.seqcount		= SEQCNT_ZERO(seq_name.seqcount),	\
    657}
    658
    659/**
    660 * seqcount_latch_init() - runtime initializer for seqcount_latch_t
    661 * @s: Pointer to the seqcount_latch_t instance
    662 */
    663#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
    664
    665/**
    666 * raw_read_seqcount_latch() - pick even/odd latch data copy
    667 * @s: Pointer to seqcount_latch_t
    668 *
    669 * See raw_write_seqcount_latch() for details and a full reader/writer
    670 * usage example.
    671 *
    672 * Return: sequence counter raw value. Use the lowest bit as an index for
    673 * picking which data copy to read. The full counter must then be checked
    674 * with read_seqcount_latch_retry().
    675 */
    676static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
    677{
    678	/*
    679	 * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
    680	 * Due to the dependent load, a full smp_rmb() is not needed.
    681	 */
    682	return READ_ONCE(s->seqcount.sequence);
    683}
    684
    685/**
    686 * read_seqcount_latch_retry() - end a seqcount_latch_t read section
    687 * @s:		Pointer to seqcount_latch_t
    688 * @start:	count, from raw_read_seqcount_latch()
    689 *
    690 * Return: true if a read section retry is required, else false
    691 */
    692static inline int
    693read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
    694{
    695	return read_seqcount_retry(&s->seqcount, start);
    696}
    697
    698/**
    699 * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
    700 * @s: Pointer to seqcount_latch_t
    701 *
    702 * The latch technique is a multiversion concurrency control method that allows
    703 * queries during non-atomic modifications. If you can guarantee queries never
    704 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
    705 * -- you most likely do not need this.
    706 *
    707 * Where the traditional RCU/lockless data structures rely on atomic
    708 * modifications to ensure queries observe either the old or the new state the
    709 * latch allows the same for non-atomic updates. The trade-off is doubling the
    710 * cost of storage; we have to maintain two copies of the entire data
    711 * structure.
    712 *
    713 * Very simply put: we first modify one copy and then the other. This ensures
    714 * there is always one copy in a stable state, ready to give us an answer.
    715 *
    716 * The basic form is a data structure like::
    717 *
    718 *	struct latch_struct {
    719 *		seqcount_latch_t	seq;
    720 *		struct data_struct	data[2];
    721 *	};
    722 *
    723 * Where a modification, which is assumed to be externally serialized, does the
    724 * following::
    725 *
    726 *	void latch_modify(struct latch_struct *latch, ...)
    727 *	{
    728 *		smp_wmb();	// Ensure that the last data[1] update is visible
    729 *		latch->seq.sequence++;
    730 *		smp_wmb();	// Ensure that the seqcount update is visible
    731 *
    732 *		modify(latch->data[0], ...);
    733 *
    734 *		smp_wmb();	// Ensure that the data[0] update is visible
    735 *		latch->seq.sequence++;
    736 *		smp_wmb();	// Ensure that the seqcount update is visible
    737 *
    738 *		modify(latch->data[1], ...);
    739 *	}
    740 *
    741 * The query will have a form like::
    742 *
    743 *	struct entry *latch_query(struct latch_struct *latch, ...)
    744 *	{
    745 *		struct entry *entry;
    746 *		unsigned seq, idx;
    747 *
    748 *		do {
    749 *			seq = raw_read_seqcount_latch(&latch->seq);
    750 *
    751 *			idx = seq & 0x01;
    752 *			entry = data_query(latch->data[idx], ...);
    753 *
    754 *		// This includes needed smp_rmb()
    755 *		} while (read_seqcount_latch_retry(&latch->seq, seq));
    756 *
    757 *		return entry;
    758 *	}
    759 *
    760 * So during the modification, queries are first redirected to data[1]. Then we
    761 * modify data[0]. When that is complete, we redirect queries back to data[0]
    762 * and we can modify data[1].
    763 *
    764 * NOTE:
    765 *
    766 *	The non-requirement for atomic modifications does _NOT_ include
    767 *	the publishing of new entries in the case where data is a dynamic
    768 *	data structure.
    769 *
    770 *	An iteration might start in data[0] and get suspended long enough
    771 *	to miss an entire modification sequence, once it resumes it might
    772 *	observe the new entry.
    773 *
    774 * NOTE2:
    775 *
    776 *	When data is a dynamic data structure; one should use regular RCU
    777 *	patterns to manage the lifetimes of the objects within.
    778 */
    779static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
    780{
    781	smp_wmb();	/* prior stores before incrementing "sequence" */
    782	s->seqcount.sequence++;
    783	smp_wmb();      /* increment "sequence" before following stores */
    784}
    785
    786/*
    787 * Sequential locks (seqlock_t)
    788 *
    789 * Sequence counters with an embedded spinlock for writer serialization
    790 * and non-preemptibility.
    791 *
    792 * For more info, see:
    793 *    - Comments on top of seqcount_t
    794 *    - Documentation/locking/seqlock.rst
    795 */
    796typedef struct {
    797	/*
    798	 * Make sure that readers don't starve writers on PREEMPT_RT: use
    799	 * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
    800	 */
    801	seqcount_spinlock_t seqcount;
    802	spinlock_t lock;
    803} seqlock_t;
    804
    805#define __SEQLOCK_UNLOCKED(lockname)					\
    806	{								\
    807		.seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
    808		.lock =	__SPIN_LOCK_UNLOCKED(lockname)			\
    809	}
    810
    811/**
    812 * seqlock_init() - dynamic initializer for seqlock_t
    813 * @sl: Pointer to the seqlock_t instance
    814 */
    815#define seqlock_init(sl)						\
    816	do {								\
    817		spin_lock_init(&(sl)->lock);				\
    818		seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock);	\
    819	} while (0)
    820
    821/**
    822 * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
    823 * @sl: Name of the seqlock_t instance
    824 */
    825#define DEFINE_SEQLOCK(sl) \
    826		seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
    827
    828/**
    829 * read_seqbegin() - start a seqlock_t read side critical section
    830 * @sl: Pointer to seqlock_t
    831 *
    832 * Return: count, to be passed to read_seqretry()
    833 */
    834static inline unsigned read_seqbegin(const seqlock_t *sl)
    835{
    836	unsigned ret = read_seqcount_begin(&sl->seqcount);
    837
    838	kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry() */
    839	kcsan_flat_atomic_begin();
    840	return ret;
    841}
    842
    843/**
    844 * read_seqretry() - end a seqlock_t read side section
    845 * @sl: Pointer to seqlock_t
    846 * @start: count, from read_seqbegin()
    847 *
    848 * read_seqretry closes the read side critical section of given seqlock_t.
    849 * If the critical section was invalid, it must be ignored (and typically
    850 * retried).
    851 *
    852 * Return: true if a read section retry is required, else false
    853 */
    854static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
    855{
    856	/*
    857	 * Assume not nested: read_seqretry() may be called multiple times when
    858	 * completing read critical section.
    859	 */
    860	kcsan_flat_atomic_end();
    861
    862	return read_seqcount_retry(&sl->seqcount, start);
    863}
    864
    865/*
    866 * For all seqlock_t write side functions, use the the internal
    867 * do_write_seqcount_begin() instead of generic write_seqcount_begin().
    868 * This way, no redundant lockdep_assert_held() checks are added.
    869 */
    870
    871/**
    872 * write_seqlock() - start a seqlock_t write side critical section
    873 * @sl: Pointer to seqlock_t
    874 *
    875 * write_seqlock opens a write side critical section for the given
    876 * seqlock_t.  It also implicitly acquires the spinlock_t embedded inside
    877 * that sequential lock. All seqlock_t write side sections are thus
    878 * automatically serialized and non-preemptible.
    879 *
    880 * Context: if the seqlock_t read section, or other write side critical
    881 * sections, can be invoked from hardirq or softirq contexts, use the
    882 * _irqsave or _bh variants of this function instead.
    883 */
    884static inline void write_seqlock(seqlock_t *sl)
    885{
    886	spin_lock(&sl->lock);
    887	do_write_seqcount_begin(&sl->seqcount.seqcount);
    888}
    889
    890/**
    891 * write_sequnlock() - end a seqlock_t write side critical section
    892 * @sl: Pointer to seqlock_t
    893 *
    894 * write_sequnlock closes the (serialized and non-preemptible) write side
    895 * critical section of given seqlock_t.
    896 */
    897static inline void write_sequnlock(seqlock_t *sl)
    898{
    899	do_write_seqcount_end(&sl->seqcount.seqcount);
    900	spin_unlock(&sl->lock);
    901}
    902
    903/**
    904 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
    905 * @sl: Pointer to seqlock_t
    906 *
    907 * _bh variant of write_seqlock(). Use only if the read side section, or
    908 * other write side sections, can be invoked from softirq contexts.
    909 */
    910static inline void write_seqlock_bh(seqlock_t *sl)
    911{
    912	spin_lock_bh(&sl->lock);
    913	do_write_seqcount_begin(&sl->seqcount.seqcount);
    914}
    915
    916/**
    917 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
    918 * @sl: Pointer to seqlock_t
    919 *
    920 * write_sequnlock_bh closes the serialized, non-preemptible, and
    921 * softirqs-disabled, seqlock_t write side critical section opened with
    922 * write_seqlock_bh().
    923 */
    924static inline void write_sequnlock_bh(seqlock_t *sl)
    925{
    926	do_write_seqcount_end(&sl->seqcount.seqcount);
    927	spin_unlock_bh(&sl->lock);
    928}
    929
    930/**
    931 * write_seqlock_irq() - start a non-interruptible seqlock_t write section
    932 * @sl: Pointer to seqlock_t
    933 *
    934 * _irq variant of write_seqlock(). Use only if the read side section, or
    935 * other write sections, can be invoked from hardirq contexts.
    936 */
    937static inline void write_seqlock_irq(seqlock_t *sl)
    938{
    939	spin_lock_irq(&sl->lock);
    940	do_write_seqcount_begin(&sl->seqcount.seqcount);
    941}
    942
    943/**
    944 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
    945 * @sl: Pointer to seqlock_t
    946 *
    947 * write_sequnlock_irq closes the serialized and non-interruptible
    948 * seqlock_t write side section opened with write_seqlock_irq().
    949 */
    950static inline void write_sequnlock_irq(seqlock_t *sl)
    951{
    952	do_write_seqcount_end(&sl->seqcount.seqcount);
    953	spin_unlock_irq(&sl->lock);
    954}
    955
    956static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
    957{
    958	unsigned long flags;
    959
    960	spin_lock_irqsave(&sl->lock, flags);
    961	do_write_seqcount_begin(&sl->seqcount.seqcount);
    962	return flags;
    963}
    964
    965/**
    966 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
    967 *                           section
    968 * @lock:  Pointer to seqlock_t
    969 * @flags: Stack-allocated storage for saving caller's local interrupt
    970 *         state, to be passed to write_sequnlock_irqrestore().
    971 *
    972 * _irqsave variant of write_seqlock(). Use it only if the read side
    973 * section, or other write sections, can be invoked from hardirq context.
    974 */
    975#define write_seqlock_irqsave(lock, flags)				\
    976	do { flags = __write_seqlock_irqsave(lock); } while (0)
    977
    978/**
    979 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
    980 *                                section
    981 * @sl:    Pointer to seqlock_t
    982 * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
    983 *
    984 * write_sequnlock_irqrestore closes the serialized and non-interruptible
    985 * seqlock_t write section previously opened with write_seqlock_irqsave().
    986 */
    987static inline void
    988write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
    989{
    990	do_write_seqcount_end(&sl->seqcount.seqcount);
    991	spin_unlock_irqrestore(&sl->lock, flags);
    992}
    993
    994/**
    995 * read_seqlock_excl() - begin a seqlock_t locking reader section
    996 * @sl:	Pointer to seqlock_t
    997 *
    998 * read_seqlock_excl opens a seqlock_t locking reader critical section.  A
    999 * locking reader exclusively locks out *both* other writers *and* other
   1000 * locking readers, but it does not update the embedded sequence number.
   1001 *
   1002 * Locking readers act like a normal spin_lock()/spin_unlock().
   1003 *
   1004 * Context: if the seqlock_t write section, *or other read sections*, can
   1005 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
   1006 * variant of this function instead.
   1007 *
   1008 * The opened read section must be closed with read_sequnlock_excl().
   1009 */
   1010static inline void read_seqlock_excl(seqlock_t *sl)
   1011{
   1012	spin_lock(&sl->lock);
   1013}
   1014
   1015/**
   1016 * read_sequnlock_excl() - end a seqlock_t locking reader critical section
   1017 * @sl: Pointer to seqlock_t
   1018 */
   1019static inline void read_sequnlock_excl(seqlock_t *sl)
   1020{
   1021	spin_unlock(&sl->lock);
   1022}
   1023
   1024/**
   1025 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
   1026 *			    softirqs disabled
   1027 * @sl: Pointer to seqlock_t
   1028 *
   1029 * _bh variant of read_seqlock_excl(). Use this variant only if the
   1030 * seqlock_t write side section, *or other read sections*, can be invoked
   1031 * from softirq contexts.
   1032 */
   1033static inline void read_seqlock_excl_bh(seqlock_t *sl)
   1034{
   1035	spin_lock_bh(&sl->lock);
   1036}
   1037
   1038/**
   1039 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
   1040 *			      reader section
   1041 * @sl: Pointer to seqlock_t
   1042 */
   1043static inline void read_sequnlock_excl_bh(seqlock_t *sl)
   1044{
   1045	spin_unlock_bh(&sl->lock);
   1046}
   1047
   1048/**
   1049 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
   1050 *			     reader section
   1051 * @sl: Pointer to seqlock_t
   1052 *
   1053 * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
   1054 * write side section, *or other read sections*, can be invoked from a
   1055 * hardirq context.
   1056 */
   1057static inline void read_seqlock_excl_irq(seqlock_t *sl)
   1058{
   1059	spin_lock_irq(&sl->lock);
   1060}
   1061
   1062/**
   1063 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
   1064 *                             locking reader section
   1065 * @sl: Pointer to seqlock_t
   1066 */
   1067static inline void read_sequnlock_excl_irq(seqlock_t *sl)
   1068{
   1069	spin_unlock_irq(&sl->lock);
   1070}
   1071
   1072static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
   1073{
   1074	unsigned long flags;
   1075
   1076	spin_lock_irqsave(&sl->lock, flags);
   1077	return flags;
   1078}
   1079
   1080/**
   1081 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
   1082 *				 locking reader section
   1083 * @lock:  Pointer to seqlock_t
   1084 * @flags: Stack-allocated storage for saving caller's local interrupt
   1085 *         state, to be passed to read_sequnlock_excl_irqrestore().
   1086 *
   1087 * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
   1088 * write side section, *or other read sections*, can be invoked from a
   1089 * hardirq context.
   1090 */
   1091#define read_seqlock_excl_irqsave(lock, flags)				\
   1092	do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
   1093
   1094/**
   1095 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
   1096 *				      locking reader section
   1097 * @sl:    Pointer to seqlock_t
   1098 * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
   1099 */
   1100static inline void
   1101read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
   1102{
   1103	spin_unlock_irqrestore(&sl->lock, flags);
   1104}
   1105
   1106/**
   1107 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
   1108 * @lock: Pointer to seqlock_t
   1109 * @seq : Marker and return parameter. If the passed value is even, the
   1110 * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
   1111 * If the passed value is odd, the reader will become a *locking* reader
   1112 * as in read_seqlock_excl().  In the first call to this function, the
   1113 * caller *must* initialize and pass an even value to @seq; this way, a
   1114 * lockless read can be optimistically tried first.
   1115 *
   1116 * read_seqbegin_or_lock is an API designed to optimistically try a normal
   1117 * lockless seqlock_t read section first.  If an odd counter is found, the
   1118 * lockless read trial has failed, and the next read iteration transforms
   1119 * itself into a full seqlock_t locking reader.
   1120 *
   1121 * This is typically used to avoid seqlock_t lockless readers starvation
   1122 * (too much retry loops) in the case of a sharp spike in write side
   1123 * activity.
   1124 *
   1125 * Context: if the seqlock_t write section, *or other read sections*, can
   1126 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
   1127 * variant of this function instead.
   1128 *
   1129 * Check Documentation/locking/seqlock.rst for template example code.
   1130 *
   1131 * Return: the encountered sequence counter value, through the @seq
   1132 * parameter, which is overloaded as a return parameter. This returned
   1133 * value must be checked with need_seqretry(). If the read section need to
   1134 * be retried, this returned value must also be passed as the @seq
   1135 * parameter of the next read_seqbegin_or_lock() iteration.
   1136 */
   1137static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
   1138{
   1139	if (!(*seq & 1))	/* Even */
   1140		*seq = read_seqbegin(lock);
   1141	else			/* Odd */
   1142		read_seqlock_excl(lock);
   1143}
   1144
   1145/**
   1146 * need_seqretry() - validate seqlock_t "locking or lockless" read section
   1147 * @lock: Pointer to seqlock_t
   1148 * @seq: sequence count, from read_seqbegin_or_lock()
   1149 *
   1150 * Return: true if a read section retry is required, false otherwise
   1151 */
   1152static inline int need_seqretry(seqlock_t *lock, int seq)
   1153{
   1154	return !(seq & 1) && read_seqretry(lock, seq);
   1155}
   1156
   1157/**
   1158 * done_seqretry() - end seqlock_t "locking or lockless" reader section
   1159 * @lock: Pointer to seqlock_t
   1160 * @seq: count, from read_seqbegin_or_lock()
   1161 *
   1162 * done_seqretry finishes the seqlock_t read side critical section started
   1163 * with read_seqbegin_or_lock() and validated by need_seqretry().
   1164 */
   1165static inline void done_seqretry(seqlock_t *lock, int seq)
   1166{
   1167	if (seq & 1)
   1168		read_sequnlock_excl(lock);
   1169}
   1170
   1171/**
   1172 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
   1173 *                                   a non-interruptible locking reader
   1174 * @lock: Pointer to seqlock_t
   1175 * @seq:  Marker and return parameter. Check read_seqbegin_or_lock().
   1176 *
   1177 * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
   1178 * the seqlock_t write section, *or other read sections*, can be invoked
   1179 * from hardirq context.
   1180 *
   1181 * Note: Interrupts will be disabled only for "locking reader" mode.
   1182 *
   1183 * Return:
   1184 *
   1185 *   1. The saved local interrupts state in case of a locking reader, to
   1186 *      be passed to done_seqretry_irqrestore().
   1187 *
   1188 *   2. The encountered sequence counter value, returned through @seq
   1189 *      overloaded as a return parameter. Check read_seqbegin_or_lock().
   1190 */
   1191static inline unsigned long
   1192read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
   1193{
   1194	unsigned long flags = 0;
   1195
   1196	if (!(*seq & 1))	/* Even */
   1197		*seq = read_seqbegin(lock);
   1198	else			/* Odd */
   1199		read_seqlock_excl_irqsave(lock, flags);
   1200
   1201	return flags;
   1202}
   1203
   1204/**
   1205 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
   1206 *				non-interruptible locking reader section
   1207 * @lock:  Pointer to seqlock_t
   1208 * @seq:   Count, from read_seqbegin_or_lock_irqsave()
   1209 * @flags: Caller's saved local interrupt state in case of a locking
   1210 *	   reader, also from read_seqbegin_or_lock_irqsave()
   1211 *
   1212 * This is the _irqrestore variant of done_seqretry(). The read section
   1213 * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
   1214 * by need_seqretry().
   1215 */
   1216static inline void
   1217done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
   1218{
   1219	if (seq & 1)
   1220		read_sequnlock_excl_irqrestore(lock, flags);
   1221}
   1222#endif /* __LINUX_SEQLOCK_H */