percpu-rwsem.h (4257B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_PERCPU_RWSEM_H 3#define _LINUX_PERCPU_RWSEM_H 4 5#include <linux/atomic.h> 6#include <linux/percpu.h> 7#include <linux/rcuwait.h> 8#include <linux/wait.h> 9#include <linux/rcu_sync.h> 10#include <linux/lockdep.h> 11 12struct percpu_rw_semaphore { 13 struct rcu_sync rss; 14 unsigned int __percpu *read_count; 15 struct rcuwait writer; 16 wait_queue_head_t waiters; 17 atomic_t block; 18#ifdef CONFIG_DEBUG_LOCK_ALLOC 19 struct lockdep_map dep_map; 20#endif 21}; 22 23#ifdef CONFIG_DEBUG_LOCK_ALLOC 24#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }, 25#else 26#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) 27#endif 28 29#define __DEFINE_PERCPU_RWSEM(name, is_static) \ 30static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ 31is_static struct percpu_rw_semaphore name = { \ 32 .rss = __RCU_SYNC_INITIALIZER(name.rss), \ 33 .read_count = &__percpu_rwsem_rc_##name, \ 34 .writer = __RCUWAIT_INITIALIZER(name.writer), \ 35 .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \ 36 .block = ATOMIC_INIT(0), \ 37 __PERCPU_RWSEM_DEP_MAP_INIT(name) \ 38} 39 40#define DEFINE_PERCPU_RWSEM(name) \ 41 __DEFINE_PERCPU_RWSEM(name, /* not static */) 42#define DEFINE_STATIC_PERCPU_RWSEM(name) \ 43 __DEFINE_PERCPU_RWSEM(name, static) 44 45extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool); 46 47static inline void percpu_down_read(struct percpu_rw_semaphore *sem) 48{ 49 might_sleep(); 50 51 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 52 53 preempt_disable(); 54 /* 55 * We are in an RCU-sched read-side critical section, so the writer 56 * cannot both change sem->state from readers_fast and start checking 57 * counters while we are here. So if we see !sem->state, we know that 58 * the writer won't be checking until we're past the preempt_enable() 59 * and that once the synchronize_rcu() is done, the writer will see 60 * anything we did within this RCU-sched read-size critical section. 61 */ 62 if (likely(rcu_sync_is_idle(&sem->rss))) 63 this_cpu_inc(*sem->read_count); 64 else 65 __percpu_down_read(sem, false); /* Unconditional memory barrier */ 66 /* 67 * The preempt_enable() prevents the compiler from 68 * bleeding the critical section out. 69 */ 70 preempt_enable(); 71} 72 73static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) 74{ 75 bool ret = true; 76 77 preempt_disable(); 78 /* 79 * Same as in percpu_down_read(). 80 */ 81 if (likely(rcu_sync_is_idle(&sem->rss))) 82 this_cpu_inc(*sem->read_count); 83 else 84 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ 85 preempt_enable(); 86 /* 87 * The barrier() from preempt_enable() prevents the compiler from 88 * bleeding the critical section out. 89 */ 90 91 if (ret) 92 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); 93 94 return ret; 95} 96 97static inline void percpu_up_read(struct percpu_rw_semaphore *sem) 98{ 99 rwsem_release(&sem->dep_map, _RET_IP_); 100 101 preempt_disable(); 102 /* 103 * Same as in percpu_down_read(). 104 */ 105 if (likely(rcu_sync_is_idle(&sem->rss))) { 106 this_cpu_dec(*sem->read_count); 107 } else { 108 /* 109 * slowpath; reader will only ever wake a single blocked 110 * writer. 111 */ 112 smp_mb(); /* B matches C */ 113 /* 114 * In other words, if they see our decrement (presumably to 115 * aggregate zero, as that is the only time it matters) they 116 * will also see our critical section. 117 */ 118 this_cpu_dec(*sem->read_count); 119 rcuwait_wake_up(&sem->writer); 120 } 121 preempt_enable(); 122} 123 124extern void percpu_down_write(struct percpu_rw_semaphore *); 125extern void percpu_up_write(struct percpu_rw_semaphore *); 126 127extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, 128 const char *, struct lock_class_key *); 129 130extern void percpu_free_rwsem(struct percpu_rw_semaphore *); 131 132#define percpu_init_rwsem(sem) \ 133({ \ 134 static struct lock_class_key rwsem_key; \ 135 __percpu_init_rwsem(sem, #sem, &rwsem_key); \ 136}) 137 138#define percpu_rwsem_is_held(sem) lockdep_is_held(sem) 139#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem) 140 141static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, 142 bool read, unsigned long ip) 143{ 144 lock_release(&sem->dep_map, ip); 145} 146 147static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, 148 bool read, unsigned long ip) 149{ 150 lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip); 151} 152 153#endif