cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

refcount.h (12371B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Variant of atomic_t specialized for reference counts.
      4 *
      5 * The interface matches the atomic_t interface (to aid in porting) but only
      6 * provides the few functions one should use for reference counting.
      7 *
      8 * Saturation semantics
      9 * ====================
     10 *
     11 * refcount_t differs from atomic_t in that the counter saturates at
     12 * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
     13 * counter and causing 'spurious' use-after-free issues. In order to avoid the
     14 * cost associated with introducing cmpxchg() loops into all of the saturating
     15 * operations, we temporarily allow the counter to take on an unchecked value
     16 * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
     17 * or overflow has occurred. Although this is racy when multiple threads
     18 * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
     19 * equidistant from 0 and INT_MAX we minimise the scope for error:
     20 *
     21 * 	                           INT_MAX     REFCOUNT_SATURATED   UINT_MAX
     22 *   0                          (0x7fff_ffff)    (0xc000_0000)    (0xffff_ffff)
     23 *   +--------------------------------+----------------+----------------+
     24 *                                     <---------- bad value! ---------->
     25 *
     26 * (in a signed view of the world, the "bad value" range corresponds to
     27 * a negative counter value).
     28 *
     29 * As an example, consider a refcount_inc() operation that causes the counter
     30 * to overflow:
     31 *
     32 * 	int old = atomic_fetch_add_relaxed(r);
     33 *	// old is INT_MAX, refcount now INT_MIN (0x8000_0000)
     34 *	if (old < 0)
     35 *		atomic_set(r, REFCOUNT_SATURATED);
     36 *
     37 * If another thread also performs a refcount_inc() operation between the two
     38 * atomic operations, then the count will continue to edge closer to 0. If it
     39 * reaches a value of 1 before /any/ of the threads reset it to the saturated
     40 * value, then a concurrent refcount_dec_and_test() may erroneously free the
     41 * underlying object.
     42 * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently
     43 * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK).
     44 * With the current PID limit, if no batched refcounting operations are used and
     45 * the attacker can't repeatedly trigger kernel oopses in the middle of refcount
     46 * operations, this makes it impossible for a saturated refcount to leave the
     47 * saturation range, even if it is possible for multiple uses of the same
     48 * refcount to nest in the context of a single task:
     49 *
     50 *     (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT =
     51 *     0x40000000 / 0x400000 = 0x100 = 256
     52 *
     53 * If hundreds of references are added/removed with a single refcounting
     54 * operation, it may potentially be possible to leave the saturation range; but
     55 * given the precise timing details involved with the round-robin scheduling of
     56 * each thread manipulating the refcount and the need to hit the race multiple
     57 * times in succession, there doesn't appear to be a practical avenue of attack
     58 * even if using refcount_add() operations with larger increments.
     59 *
     60 * Memory ordering
     61 * ===============
     62 *
     63 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
     64 * and provide only what is strictly required for refcounts.
     65 *
     66 * The increments are fully relaxed; these will not provide ordering. The
     67 * rationale is that whatever is used to obtain the object we're increasing the
     68 * reference count on will provide the ordering. For locked data structures,
     69 * its the lock acquire, for RCU/lockless data structures its the dependent
     70 * load.
     71 *
     72 * Do note that inc_not_zero() provides a control dependency which will order
     73 * future stores against the inc, this ensures we'll never modify the object
     74 * if we did not in fact acquire a reference.
     75 *
     76 * The decrements will provide release order, such that all the prior loads and
     77 * stores will be issued before, it also provides a control dependency, which
     78 * will order us against the subsequent free().
     79 *
     80 * The control dependency is against the load of the cmpxchg (ll/sc) that
     81 * succeeded. This means the stores aren't fully ordered, but this is fine
     82 * because the 1->0 transition indicates no concurrency.
     83 *
     84 * Note that the allocator is responsible for ordering things between free()
     85 * and alloc().
     86 *
     87 * The decrements dec_and_test() and sub_and_test() also provide acquire
     88 * ordering on success.
     89 *
     90 */
     91
     92#ifndef _LINUX_REFCOUNT_H
     93#define _LINUX_REFCOUNT_H
     94
     95#include <linux/atomic.h>
     96#include <linux/bug.h>
     97#include <linux/compiler.h>
     98#include <linux/limits.h>
     99#include <linux/spinlock_types.h>
    100
    101struct mutex;
    102
    103/**
    104 * typedef refcount_t - variant of atomic_t specialized for reference counts
    105 * @refs: atomic_t counter field
    106 *
    107 * The counter saturates at REFCOUNT_SATURATED and will not move once
    108 * there. This avoids wrapping the counter and causing 'spurious'
    109 * use-after-free bugs.
    110 */
    111typedef struct refcount_struct {
    112	atomic_t refs;
    113} refcount_t;
    114
    115#define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
    116#define REFCOUNT_MAX		INT_MAX
    117#define REFCOUNT_SATURATED	(INT_MIN / 2)
    118
    119enum refcount_saturation_type {
    120	REFCOUNT_ADD_NOT_ZERO_OVF,
    121	REFCOUNT_ADD_OVF,
    122	REFCOUNT_ADD_UAF,
    123	REFCOUNT_SUB_UAF,
    124	REFCOUNT_DEC_LEAK,
    125};
    126
    127void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
    128
    129/**
    130 * refcount_set - set a refcount's value
    131 * @r: the refcount
    132 * @n: value to which the refcount will be set
    133 */
    134static inline void refcount_set(refcount_t *r, int n)
    135{
    136	atomic_set(&r->refs, n);
    137}
    138
    139/**
    140 * refcount_read - get a refcount's value
    141 * @r: the refcount
    142 *
    143 * Return: the refcount's value
    144 */
    145static inline unsigned int refcount_read(const refcount_t *r)
    146{
    147	return atomic_read(&r->refs);
    148}
    149
    150static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
    151{
    152	int old = refcount_read(r);
    153
    154	do {
    155		if (!old)
    156			break;
    157	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
    158
    159	if (oldp)
    160		*oldp = old;
    161
    162	if (unlikely(old < 0 || old + i < 0))
    163		refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
    164
    165	return old;
    166}
    167
    168/**
    169 * refcount_add_not_zero - add a value to a refcount unless it is 0
    170 * @i: the value to add to the refcount
    171 * @r: the refcount
    172 *
    173 * Will saturate at REFCOUNT_SATURATED and WARN.
    174 *
    175 * Provides no memory ordering, it is assumed the caller has guaranteed the
    176 * object memory to be stable (RCU, etc.). It does provide a control dependency
    177 * and thereby orders future stores. See the comment on top.
    178 *
    179 * Use of this function is not recommended for the normal reference counting
    180 * use case in which references are taken and released one at a time.  In these
    181 * cases, refcount_inc(), or one of its variants, should instead be used to
    182 * increment a reference count.
    183 *
    184 * Return: false if the passed refcount is 0, true otherwise
    185 */
    186static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
    187{
    188	return __refcount_add_not_zero(i, r, NULL);
    189}
    190
    191static inline void __refcount_add(int i, refcount_t *r, int *oldp)
    192{
    193	int old = atomic_fetch_add_relaxed(i, &r->refs);
    194
    195	if (oldp)
    196		*oldp = old;
    197
    198	if (unlikely(!old))
    199		refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
    200	else if (unlikely(old < 0 || old + i < 0))
    201		refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
    202}
    203
    204/**
    205 * refcount_add - add a value to a refcount
    206 * @i: the value to add to the refcount
    207 * @r: the refcount
    208 *
    209 * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
    210 *
    211 * Provides no memory ordering, it is assumed the caller has guaranteed the
    212 * object memory to be stable (RCU, etc.). It does provide a control dependency
    213 * and thereby orders future stores. See the comment on top.
    214 *
    215 * Use of this function is not recommended for the normal reference counting
    216 * use case in which references are taken and released one at a time.  In these
    217 * cases, refcount_inc(), or one of its variants, should instead be used to
    218 * increment a reference count.
    219 */
    220static inline void refcount_add(int i, refcount_t *r)
    221{
    222	__refcount_add(i, r, NULL);
    223}
    224
    225static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
    226{
    227	return __refcount_add_not_zero(1, r, oldp);
    228}
    229
    230/**
    231 * refcount_inc_not_zero - increment a refcount unless it is 0
    232 * @r: the refcount to increment
    233 *
    234 * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
    235 * and WARN.
    236 *
    237 * Provides no memory ordering, it is assumed the caller has guaranteed the
    238 * object memory to be stable (RCU, etc.). It does provide a control dependency
    239 * and thereby orders future stores. See the comment on top.
    240 *
    241 * Return: true if the increment was successful, false otherwise
    242 */
    243static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
    244{
    245	return __refcount_inc_not_zero(r, NULL);
    246}
    247
    248static inline void __refcount_inc(refcount_t *r, int *oldp)
    249{
    250	__refcount_add(1, r, oldp);
    251}
    252
    253/**
    254 * refcount_inc - increment a refcount
    255 * @r: the refcount to increment
    256 *
    257 * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
    258 *
    259 * Provides no memory ordering, it is assumed the caller already has a
    260 * reference on the object.
    261 *
    262 * Will WARN if the refcount is 0, as this represents a possible use-after-free
    263 * condition.
    264 */
    265static inline void refcount_inc(refcount_t *r)
    266{
    267	__refcount_inc(r, NULL);
    268}
    269
    270static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
    271{
    272	int old = atomic_fetch_sub_release(i, &r->refs);
    273
    274	if (oldp)
    275		*oldp = old;
    276
    277	if (old == i) {
    278		smp_acquire__after_ctrl_dep();
    279		return true;
    280	}
    281
    282	if (unlikely(old < 0 || old - i < 0))
    283		refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
    284
    285	return false;
    286}
    287
    288/**
    289 * refcount_sub_and_test - subtract from a refcount and test if it is 0
    290 * @i: amount to subtract from the refcount
    291 * @r: the refcount
    292 *
    293 * Similar to atomic_dec_and_test(), but it will WARN, return false and
    294 * ultimately leak on underflow and will fail to decrement when saturated
    295 * at REFCOUNT_SATURATED.
    296 *
    297 * Provides release memory ordering, such that prior loads and stores are done
    298 * before, and provides an acquire ordering on success such that free()
    299 * must come after.
    300 *
    301 * Use of this function is not recommended for the normal reference counting
    302 * use case in which references are taken and released one at a time.  In these
    303 * cases, refcount_dec(), or one of its variants, should instead be used to
    304 * decrement a reference count.
    305 *
    306 * Return: true if the resulting refcount is 0, false otherwise
    307 */
    308static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
    309{
    310	return __refcount_sub_and_test(i, r, NULL);
    311}
    312
    313static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
    314{
    315	return __refcount_sub_and_test(1, r, oldp);
    316}
    317
    318/**
    319 * refcount_dec_and_test - decrement a refcount and test if it is 0
    320 * @r: the refcount
    321 *
    322 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
    323 * decrement when saturated at REFCOUNT_SATURATED.
    324 *
    325 * Provides release memory ordering, such that prior loads and stores are done
    326 * before, and provides an acquire ordering on success such that free()
    327 * must come after.
    328 *
    329 * Return: true if the resulting refcount is 0, false otherwise
    330 */
    331static inline __must_check bool refcount_dec_and_test(refcount_t *r)
    332{
    333	return __refcount_dec_and_test(r, NULL);
    334}
    335
    336static inline void __refcount_dec(refcount_t *r, int *oldp)
    337{
    338	int old = atomic_fetch_sub_release(1, &r->refs);
    339
    340	if (oldp)
    341		*oldp = old;
    342
    343	if (unlikely(old <= 1))
    344		refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
    345}
    346
    347/**
    348 * refcount_dec - decrement a refcount
    349 * @r: the refcount
    350 *
    351 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
    352 * when saturated at REFCOUNT_SATURATED.
    353 *
    354 * Provides release memory ordering, such that prior loads and stores are done
    355 * before.
    356 */
    357static inline void refcount_dec(refcount_t *r)
    358{
    359	__refcount_dec(r, NULL);
    360}
    361
    362extern __must_check bool refcount_dec_if_one(refcount_t *r);
    363extern __must_check bool refcount_dec_not_one(refcount_t *r);
    364extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
    365extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
    366extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
    367						       spinlock_t *lock,
    368						       unsigned long *flags) __cond_acquires(lock);
    369#endif /* _LINUX_REFCOUNT_H */