cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rwsem.h (7466B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/* rwsem.h: R/W semaphores, public interface
      3 *
      4 * Written by David Howells (dhowells@redhat.com).
      5 * Derived from asm-i386/semaphore.h
      6 */
      7
      8#ifndef _LINUX_RWSEM_H
      9#define _LINUX_RWSEM_H
     10
     11#include <linux/linkage.h>
     12
     13#include <linux/types.h>
     14#include <linux/list.h>
     15#include <linux/spinlock.h>
     16#include <linux/atomic.h>
     17#include <linux/err.h>
     18
     19#ifdef CONFIG_DEBUG_LOCK_ALLOC
     20# define __RWSEM_DEP_MAP_INIT(lockname)			\
     21	.dep_map = {					\
     22		.name = #lockname,			\
     23		.wait_type_inner = LD_WAIT_SLEEP,	\
     24	},
     25#else
     26# define __RWSEM_DEP_MAP_INIT(lockname)
     27#endif
     28
     29#ifndef CONFIG_PREEMPT_RT
     30
     31#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
     32#include <linux/osq_lock.h>
     33#endif
     34
     35/*
     36 * For an uncontended rwsem, count and owner are the only fields a task
     37 * needs to touch when acquiring the rwsem. So they are put next to each
     38 * other to increase the chance that they will share the same cacheline.
     39 *
     40 * In a contended rwsem, the owner is likely the most frequently accessed
     41 * field in the structure as the optimistic waiter that holds the osq lock
     42 * will spin on owner. For an embedded rwsem, other hot fields in the
     43 * containing structure should be moved further away from the rwsem to
     44 * reduce the chance that they will share the same cacheline causing
     45 * cacheline bouncing problem.
     46 */
     47struct rw_semaphore {
     48	atomic_long_t count;
     49	/*
     50	 * Write owner or one of the read owners as well flags regarding
     51	 * the current state of the rwsem. Can be used as a speculative
     52	 * check to see if the write owner is running on the cpu.
     53	 */
     54	atomic_long_t owner;
     55#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
     56	struct optimistic_spin_queue osq; /* spinner MCS lock */
     57#endif
     58	raw_spinlock_t wait_lock;
     59	struct list_head wait_list;
     60#ifdef CONFIG_DEBUG_RWSEMS
     61	void *magic;
     62#endif
     63#ifdef CONFIG_DEBUG_LOCK_ALLOC
     64	struct lockdep_map	dep_map;
     65#endif
     66};
     67
     68/* In all implementations count != 0 means locked */
     69static inline int rwsem_is_locked(struct rw_semaphore *sem)
     70{
     71	return atomic_long_read(&sem->count) != 0;
     72}
     73
     74#define RWSEM_UNLOCKED_VALUE		0L
     75#define __RWSEM_COUNT_INIT(name)	.count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
     76
     77/* Common initializer macros and functions */
     78
     79#ifdef CONFIG_DEBUG_RWSEMS
     80# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
     81#else
     82# define __RWSEM_DEBUG_INIT(lockname)
     83#endif
     84
     85#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
     86#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
     87#else
     88#define __RWSEM_OPT_INIT(lockname)
     89#endif
     90
     91#define __RWSEM_INITIALIZER(name)				\
     92	{ __RWSEM_COUNT_INIT(name),				\
     93	  .owner = ATOMIC_LONG_INIT(0),				\
     94	  __RWSEM_OPT_INIT(name)				\
     95	  .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
     96	  .wait_list = LIST_HEAD_INIT((name).wait_list),	\
     97	  __RWSEM_DEBUG_INIT(name)				\
     98	  __RWSEM_DEP_MAP_INIT(name) }
     99
    100#define DECLARE_RWSEM(name) \
    101	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
    102
    103extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
    104			 struct lock_class_key *key);
    105
    106#define init_rwsem(sem)						\
    107do {								\
    108	static struct lock_class_key __key;			\
    109								\
    110	__init_rwsem((sem), #sem, &__key);			\
    111} while (0)
    112
    113/*
    114 * This is the same regardless of which rwsem implementation that is being used.
    115 * It is just a heuristic meant to be called by somebody already holding the
    116 * rwsem to see if somebody from an incompatible type is wanting access to the
    117 * lock.
    118 */
    119static inline int rwsem_is_contended(struct rw_semaphore *sem)
    120{
    121	return !list_empty(&sem->wait_list);
    122}
    123
    124#else /* !CONFIG_PREEMPT_RT */
    125
    126#include <linux/rwbase_rt.h>
    127
    128struct rw_semaphore {
    129	struct rwbase_rt	rwbase;
    130#ifdef CONFIG_DEBUG_LOCK_ALLOC
    131	struct lockdep_map	dep_map;
    132#endif
    133};
    134
    135#define __RWSEM_INITIALIZER(name)				\
    136	{							\
    137		.rwbase = __RWBASE_INITIALIZER(name),		\
    138		__RWSEM_DEP_MAP_INIT(name)			\
    139	}
    140
    141#define DECLARE_RWSEM(lockname) \
    142	struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
    143
    144extern void  __init_rwsem(struct rw_semaphore *rwsem, const char *name,
    145			  struct lock_class_key *key);
    146
    147#define init_rwsem(sem)						\
    148do {								\
    149	static struct lock_class_key __key;			\
    150								\
    151	__init_rwsem((sem), #sem, &__key);			\
    152} while (0)
    153
    154static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
    155{
    156	return rw_base_is_locked(&sem->rwbase);
    157}
    158
    159static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
    160{
    161	return rw_base_is_contended(&sem->rwbase);
    162}
    163
    164#endif /* CONFIG_PREEMPT_RT */
    165
    166/*
    167 * The functions below are the same for all rwsem implementations including
    168 * the RT specific variant.
    169 */
    170
    171/*
    172 * lock for reading
    173 */
    174extern void down_read(struct rw_semaphore *sem);
    175extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
    176extern int __must_check down_read_killable(struct rw_semaphore *sem);
    177
    178/*
    179 * trylock for reading -- returns 1 if successful, 0 if contention
    180 */
    181extern int down_read_trylock(struct rw_semaphore *sem);
    182
    183/*
    184 * lock for writing
    185 */
    186extern void down_write(struct rw_semaphore *sem);
    187extern int __must_check down_write_killable(struct rw_semaphore *sem);
    188
    189/*
    190 * trylock for writing -- returns 1 if successful, 0 if contention
    191 */
    192extern int down_write_trylock(struct rw_semaphore *sem);
    193
    194/*
    195 * release a read lock
    196 */
    197extern void up_read(struct rw_semaphore *sem);
    198
    199/*
    200 * release a write lock
    201 */
    202extern void up_write(struct rw_semaphore *sem);
    203
    204/*
    205 * downgrade write lock to read lock
    206 */
    207extern void downgrade_write(struct rw_semaphore *sem);
    208
    209#ifdef CONFIG_DEBUG_LOCK_ALLOC
    210/*
    211 * nested locking. NOTE: rwsems are not allowed to recurse
    212 * (which occurs if the same task tries to acquire the same
    213 * lock instance multiple times), but multiple locks of the
    214 * same lock class might be taken, if the order of the locks
    215 * is always the same. This ordering rule can be expressed
    216 * to lockdep via the _nested() APIs, but enumerating the
    217 * subclasses that are used. (If the nesting relationship is
    218 * static then another method for expressing nested locking is
    219 * the explicit definition of lock class keys and the use of
    220 * lockdep_set_class() at lock initialization time.
    221 * See Documentation/locking/lockdep-design.rst for more details.)
    222 */
    223extern void down_read_nested(struct rw_semaphore *sem, int subclass);
    224extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
    225extern void down_write_nested(struct rw_semaphore *sem, int subclass);
    226extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
    227extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
    228
    229# define down_write_nest_lock(sem, nest_lock)			\
    230do {								\
    231	typecheck(struct lockdep_map *, &(nest_lock)->dep_map);	\
    232	_down_write_nest_lock(sem, &(nest_lock)->dep_map);	\
    233} while (0)
    234
    235/*
    236 * Take/release a lock when not the owner will release it.
    237 *
    238 * [ This API should be avoided as much as possible - the
    239 *   proper abstraction for this case is completions. ]
    240 */
    241extern void down_read_non_owner(struct rw_semaphore *sem);
    242extern void up_read_non_owner(struct rw_semaphore *sem);
    243#else
    244# define down_read_nested(sem, subclass)		down_read(sem)
    245# define down_read_killable_nested(sem, subclass)	down_read_killable(sem)
    246# define down_write_nest_lock(sem, nest_lock)	down_write(sem)
    247# define down_write_nested(sem, subclass)	down_write(sem)
    248# define down_write_killable_nested(sem, subclass)	down_write_killable(sem)
    249# define down_read_non_owner(sem)		down_read(sem)
    250# define up_read_non_owner(sem)			up_read(sem)
    251#endif
    252
    253#endif /* _LINUX_RWSEM_H */