cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

refcount.h (4207B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _TOOLS_LINUX_REFCOUNT_H
      3#define _TOOLS_LINUX_REFCOUNT_H
      4
      5/*
      6 * Variant of atomic_t specialized for reference counts.
      7 *
      8 * The interface matches the atomic_t interface (to aid in porting) but only
      9 * provides the few functions one should use for reference counting.
     10 *
     11 * It differs in that the counter saturates at UINT_MAX and will not move once
     12 * there. This avoids wrapping the counter and causing 'spurious'
     13 * use-after-free issues.
     14 *
     15 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
     16 * and provide only what is strictly required for refcounts.
     17 *
     18 * The increments are fully relaxed; these will not provide ordering. The
     19 * rationale is that whatever is used to obtain the object we're increasing the
     20 * reference count on will provide the ordering. For locked data structures,
     21 * its the lock acquire, for RCU/lockless data structures its the dependent
     22 * load.
     23 *
     24 * Do note that inc_not_zero() provides a control dependency which will order
     25 * future stores against the inc, this ensures we'll never modify the object
     26 * if we did not in fact acquire a reference.
     27 *
     28 * The decrements will provide release order, such that all the prior loads and
     29 * stores will be issued before, it also provides a control dependency, which
     30 * will order us against the subsequent free().
     31 *
     32 * The control dependency is against the load of the cmpxchg (ll/sc) that
     33 * succeeded. This means the stores aren't fully ordered, but this is fine
     34 * because the 1->0 transition indicates no concurrency.
     35 *
     36 * Note that the allocator is responsible for ordering things between free()
     37 * and alloc().
     38 *
     39 */
     40
     41#include <linux/atomic.h>
     42#include <linux/kernel.h>
     43
     44#ifdef NDEBUG
     45#define REFCOUNT_WARN(cond, str) (void)(cond)
     46#define __refcount_check
     47#else
     48#define REFCOUNT_WARN(cond, str) BUG_ON(cond)
     49#define __refcount_check	__must_check
     50#endif
     51
     52typedef struct refcount_struct {
     53	atomic_t refs;
     54} refcount_t;
     55
     56#define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
     57
     58static inline void refcount_set(refcount_t *r, unsigned int n)
     59{
     60	atomic_set(&r->refs, n);
     61}
     62
     63static inline unsigned int refcount_read(const refcount_t *r)
     64{
     65	return atomic_read(&r->refs);
     66}
     67
     68/*
     69 * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
     70 *
     71 * Provides no memory ordering, it is assumed the caller has guaranteed the
     72 * object memory to be stable (RCU, etc.). It does provide a control dependency
     73 * and thereby orders future stores. See the comment on top.
     74 */
     75static inline __refcount_check
     76bool refcount_inc_not_zero(refcount_t *r)
     77{
     78	unsigned int old, new, val = atomic_read(&r->refs);
     79
     80	for (;;) {
     81		new = val + 1;
     82
     83		if (!val)
     84			return false;
     85
     86		if (unlikely(!new))
     87			return true;
     88
     89		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
     90		if (old == val)
     91			break;
     92
     93		val = old;
     94	}
     95
     96	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
     97
     98	return true;
     99}
    100
    101/*
    102 * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
    103 *
    104 * Provides no memory ordering, it is assumed the caller already has a
    105 * reference on the object, will WARN when this is not so.
    106 */
    107static inline void refcount_inc(refcount_t *r)
    108{
    109	REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
    110}
    111
    112/*
    113 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
    114 * decrement when saturated at UINT_MAX.
    115 *
    116 * Provides release memory ordering, such that prior loads and stores are done
    117 * before, and provides a control dependency such that free() must come after.
    118 * See the comment on top.
    119 */
    120static inline __refcount_check
    121bool refcount_sub_and_test(unsigned int i, refcount_t *r)
    122{
    123	unsigned int old, new, val = atomic_read(&r->refs);
    124
    125	for (;;) {
    126		if (unlikely(val == UINT_MAX))
    127			return false;
    128
    129		new = val - i;
    130		if (new > val) {
    131			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
    132			return false;
    133		}
    134
    135		old = atomic_cmpxchg_release(&r->refs, val, new);
    136		if (old == val)
    137			break;
    138
    139		val = old;
    140	}
    141
    142	return !new;
    143}
    144
    145static inline __refcount_check
    146bool refcount_dec_and_test(refcount_t *r)
    147{
    148	return refcount_sub_and_test(1, r);
    149}
    150
    151
    152#endif /* _ATOMIC_LINUX_REFCOUNT_H */