cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

percpu-refcount.h (11493B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Percpu refcounts:
      4 * (C) 2012 Google, Inc.
      5 * Author: Kent Overstreet <koverstreet@google.com>
      6 *
      7 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
      8 * atomic_dec_and_test() - but percpu.
      9 *
     10 * There's one important difference between percpu refs and normal atomic_t
     11 * refcounts; you have to keep track of your initial refcount, and then when you
     12 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
     13 * refcount.
     14 *
     15 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
     16 * than an atomic_t - this is because of the way shutdown works, see
     17 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
     18 *
     19 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
     20 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
     21 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
     22 * issuing the appropriate barriers, and then marks the ref as shutting down so
     23 * that percpu_ref_put() will check for the ref hitting 0.  After it returns,
     24 * it's safe to drop the initial ref.
     25 *
     26 * USAGE:
     27 *
     28 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
     29 * is created when userspaces calls io_setup(), and destroyed when userspace
     30 * calls io_destroy() or the process exits.
     31 *
     32 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
     33 * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
     34 * After that, there can't be any new users of the kioctx (from lookup_ioctx())
     35 * and it's then safe to drop the initial ref with percpu_ref_put().
     36 *
     37 * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
     38 * to synchronize with RCU protected lookup_ioctx().  percpu_ref operations don't
     39 * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
     40 * with RCU protection, it must be done explicitly.
     41 *
     42 * Code that does a two stage shutdown like this often needs some kind of
     43 * explicit synchronization to ensure the initial refcount can only be dropped
     44 * once - percpu_ref_kill() does this for you, it returns true once and false if
     45 * someone else already called it. The aio code uses it this way, but it's not
     46 * necessary if the code has some other mechanism to synchronize teardown.
     47 * around.
     48 */
     49
     50#ifndef _LINUX_PERCPU_REFCOUNT_H
     51#define _LINUX_PERCPU_REFCOUNT_H
     52
     53#include <linux/atomic.h>
     54#include <linux/percpu.h>
     55#include <linux/rcupdate.h>
     56#include <linux/types.h>
     57#include <linux/gfp.h>
     58
     59struct percpu_ref;
     60typedef void (percpu_ref_func_t)(struct percpu_ref *);
     61
     62/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
     63enum {
     64	__PERCPU_REF_ATOMIC	= 1LU << 0,	/* operating in atomic mode */
     65	__PERCPU_REF_DEAD	= 1LU << 1,	/* (being) killed */
     66	__PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
     67
     68	__PERCPU_REF_FLAG_BITS	= 2,
     69};
     70
     71/* @flags for percpu_ref_init() */
     72enum {
     73	/*
     74	 * Start w/ ref == 1 in atomic mode.  Can be switched to percpu
     75	 * operation using percpu_ref_switch_to_percpu().  If initialized
     76	 * with this flag, the ref will stay in atomic mode until
     77	 * percpu_ref_switch_to_percpu() is invoked on it.
     78	 * Implies ALLOW_REINIT.
     79	 */
     80	PERCPU_REF_INIT_ATOMIC	= 1 << 0,
     81
     82	/*
     83	 * Start dead w/ ref == 0 in atomic mode.  Must be revived with
     84	 * percpu_ref_reinit() before used.  Implies INIT_ATOMIC and
     85	 * ALLOW_REINIT.
     86	 */
     87	PERCPU_REF_INIT_DEAD	= 1 << 1,
     88
     89	/*
     90	 * Allow switching from atomic mode to percpu mode.
     91	 */
     92	PERCPU_REF_ALLOW_REINIT	= 1 << 2,
     93};
     94
     95struct percpu_ref_data {
     96	atomic_long_t		count;
     97	percpu_ref_func_t	*release;
     98	percpu_ref_func_t	*confirm_switch;
     99	bool			force_atomic:1;
    100	bool			allow_reinit:1;
    101	struct rcu_head		rcu;
    102	struct percpu_ref	*ref;
    103};
    104
    105struct percpu_ref {
    106	/*
    107	 * The low bit of the pointer indicates whether the ref is in percpu
    108	 * mode; if set, then get/put will manipulate the atomic_t.
    109	 */
    110	unsigned long		percpu_count_ptr;
    111
    112	/*
    113	 * 'percpu_ref' is often embedded into user structure, and only
    114	 * 'percpu_count_ptr' is required in fast path, move other fields
    115	 * into 'percpu_ref_data', so we can reduce memory footprint in
    116	 * fast path.
    117	 */
    118	struct percpu_ref_data  *data;
    119};
    120
    121int __must_check percpu_ref_init(struct percpu_ref *ref,
    122				 percpu_ref_func_t *release, unsigned int flags,
    123				 gfp_t gfp);
    124void percpu_ref_exit(struct percpu_ref *ref);
    125void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
    126				 percpu_ref_func_t *confirm_switch);
    127void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
    128void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
    129void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
    130				 percpu_ref_func_t *confirm_kill);
    131void percpu_ref_resurrect(struct percpu_ref *ref);
    132void percpu_ref_reinit(struct percpu_ref *ref);
    133bool percpu_ref_is_zero(struct percpu_ref *ref);
    134
    135/**
    136 * percpu_ref_kill - drop the initial ref
    137 * @ref: percpu_ref to kill
    138 *
    139 * Must be used to drop the initial ref on a percpu refcount; must be called
    140 * precisely once before shutdown.
    141 *
    142 * Switches @ref into atomic mode before gathering up the percpu counters
    143 * and dropping the initial ref.
    144 *
    145 * There are no implied RCU grace periods between kill and release.
    146 */
    147static inline void percpu_ref_kill(struct percpu_ref *ref)
    148{
    149	percpu_ref_kill_and_confirm(ref, NULL);
    150}
    151
    152/*
    153 * Internal helper.  Don't use outside percpu-refcount proper.  The
    154 * function doesn't return the pointer and let the caller test it for NULL
    155 * because doing so forces the compiler to generate two conditional
    156 * branches as it can't assume that @ref->percpu_count is not NULL.
    157 */
    158static inline bool __ref_is_percpu(struct percpu_ref *ref,
    159					  unsigned long __percpu **percpu_countp)
    160{
    161	unsigned long percpu_ptr;
    162
    163	/*
    164	 * The value of @ref->percpu_count_ptr is tested for
    165	 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
    166	 * used as a pointer.  If the compiler generates a separate fetch
    167	 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
    168	 * between contaminating the pointer value, meaning that
    169	 * READ_ONCE() is required when fetching it.
    170	 *
    171	 * The dependency ordering from the READ_ONCE() pairs
    172	 * with smp_store_release() in __percpu_ref_switch_to_percpu().
    173	 */
    174	percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
    175
    176	/*
    177	 * Theoretically, the following could test just ATOMIC; however,
    178	 * then we'd have to mask off DEAD separately as DEAD may be
    179	 * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
    180	 * implies ATOMIC anyway.  Test them together.
    181	 */
    182	if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
    183		return false;
    184
    185	*percpu_countp = (unsigned long __percpu *)percpu_ptr;
    186	return true;
    187}
    188
    189/**
    190 * percpu_ref_get_many - increment a percpu refcount
    191 * @ref: percpu_ref to get
    192 * @nr: number of references to get
    193 *
    194 * Analogous to atomic_long_add().
    195 *
    196 * This function is safe to call as long as @ref is between init and exit.
    197 */
    198static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
    199{
    200	unsigned long __percpu *percpu_count;
    201
    202	rcu_read_lock();
    203
    204	if (__ref_is_percpu(ref, &percpu_count))
    205		this_cpu_add(*percpu_count, nr);
    206	else
    207		atomic_long_add(nr, &ref->data->count);
    208
    209	rcu_read_unlock();
    210}
    211
    212/**
    213 * percpu_ref_get - increment a percpu refcount
    214 * @ref: percpu_ref to get
    215 *
    216 * Analogous to atomic_long_inc().
    217 *
    218 * This function is safe to call as long as @ref is between init and exit.
    219 */
    220static inline void percpu_ref_get(struct percpu_ref *ref)
    221{
    222	percpu_ref_get_many(ref, 1);
    223}
    224
    225/**
    226 * percpu_ref_tryget_many - try to increment a percpu refcount
    227 * @ref: percpu_ref to try-get
    228 * @nr: number of references to get
    229 *
    230 * Increment a percpu refcount  by @nr unless its count already reached zero.
    231 * Returns %true on success; %false on failure.
    232 *
    233 * This function is safe to call as long as @ref is between init and exit.
    234 */
    235static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
    236					  unsigned long nr)
    237{
    238	unsigned long __percpu *percpu_count;
    239	bool ret;
    240
    241	rcu_read_lock();
    242
    243	if (__ref_is_percpu(ref, &percpu_count)) {
    244		this_cpu_add(*percpu_count, nr);
    245		ret = true;
    246	} else {
    247		ret = atomic_long_add_unless(&ref->data->count, nr, 0);
    248	}
    249
    250	rcu_read_unlock();
    251
    252	return ret;
    253}
    254
    255/**
    256 * percpu_ref_tryget - try to increment a percpu refcount
    257 * @ref: percpu_ref to try-get
    258 *
    259 * Increment a percpu refcount unless its count already reached zero.
    260 * Returns %true on success; %false on failure.
    261 *
    262 * This function is safe to call as long as @ref is between init and exit.
    263 */
    264static inline bool percpu_ref_tryget(struct percpu_ref *ref)
    265{
    266	return percpu_ref_tryget_many(ref, 1);
    267}
    268
    269/**
    270 * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the
    271 * caller is responsible for taking RCU.
    272 *
    273 * This function is safe to call as long as @ref is between init and exit.
    274 */
    275static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref)
    276{
    277	unsigned long __percpu *percpu_count;
    278	bool ret = false;
    279
    280	WARN_ON_ONCE(!rcu_read_lock_held());
    281
    282	if (likely(__ref_is_percpu(ref, &percpu_count))) {
    283		this_cpu_inc(*percpu_count);
    284		ret = true;
    285	} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
    286		ret = atomic_long_inc_not_zero(&ref->data->count);
    287	}
    288	return ret;
    289}
    290
    291/**
    292 * percpu_ref_tryget_live - try to increment a live percpu refcount
    293 * @ref: percpu_ref to try-get
    294 *
    295 * Increment a percpu refcount unless it has already been killed.  Returns
    296 * %true on success; %false on failure.
    297 *
    298 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
    299 * function will fail.  For such guarantee, percpu_ref_kill_and_confirm()
    300 * should be used.  After the confirm_kill callback is invoked, it's
    301 * guaranteed that no new reference will be given out by
    302 * percpu_ref_tryget_live().
    303 *
    304 * This function is safe to call as long as @ref is between init and exit.
    305 */
    306static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
    307{
    308	bool ret = false;
    309
    310	rcu_read_lock();
    311	ret = percpu_ref_tryget_live_rcu(ref);
    312	rcu_read_unlock();
    313	return ret;
    314}
    315
    316/**
    317 * percpu_ref_put_many - decrement a percpu refcount
    318 * @ref: percpu_ref to put
    319 * @nr: number of references to put
    320 *
    321 * Decrement the refcount, and if 0, call the release function (which was passed
    322 * to percpu_ref_init())
    323 *
    324 * This function is safe to call as long as @ref is between init and exit.
    325 */
    326static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
    327{
    328	unsigned long __percpu *percpu_count;
    329
    330	rcu_read_lock();
    331
    332	if (__ref_is_percpu(ref, &percpu_count))
    333		this_cpu_sub(*percpu_count, nr);
    334	else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
    335		ref->data->release(ref);
    336
    337	rcu_read_unlock();
    338}
    339
    340/**
    341 * percpu_ref_put - decrement a percpu refcount
    342 * @ref: percpu_ref to put
    343 *
    344 * Decrement the refcount, and if 0, call the release function (which was passed
    345 * to percpu_ref_init())
    346 *
    347 * This function is safe to call as long as @ref is between init and exit.
    348 */
    349static inline void percpu_ref_put(struct percpu_ref *ref)
    350{
    351	percpu_ref_put_many(ref, 1);
    352}
    353
    354/**
    355 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
    356 * @ref: percpu_ref to test
    357 *
    358 * Returns %true if @ref is dying or dead.
    359 *
    360 * This function is safe to call as long as @ref is between init and exit
    361 * and the caller is responsible for synchronizing against state changes.
    362 */
    363static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
    364{
    365	return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
    366}
    367
    368#endif