cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lockref.c (4025B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/export.h>
      3#include <linux/lockref.h>
      4
      5#if USE_CMPXCHG_LOCKREF
      6
      7/*
      8 * Note that the "cmpxchg()" reloads the "old" value for the
      9 * failure case.
     10 */
     11#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
     12	int retry = 100;							\
     13	struct lockref old;							\
     14	BUILD_BUG_ON(sizeof(old) != 8);						\
     15	old.lock_count = READ_ONCE(lockref->lock_count);			\
     16	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
     17		struct lockref new = old;					\
     18		CODE								\
     19		if (likely(try_cmpxchg64_relaxed(&lockref->lock_count,		\
     20						 &old.lock_count,		\
     21						 new.lock_count))) {		\
     22			SUCCESS;						\
     23		}								\
     24		if (!--retry)							\
     25			break;							\
     26		cpu_relax();							\
     27	}									\
     28} while (0)
     29
     30#else
     31
     32#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
     33
     34#endif
     35
     36/**
     37 * lockref_get - Increments reference count unconditionally
     38 * @lockref: pointer to lockref structure
     39 *
     40 * This operation is only valid if you already hold a reference
     41 * to the object, so you know the count cannot be zero.
     42 */
     43void lockref_get(struct lockref *lockref)
     44{
     45	CMPXCHG_LOOP(
     46		new.count++;
     47	,
     48		return;
     49	);
     50
     51	spin_lock(&lockref->lock);
     52	lockref->count++;
     53	spin_unlock(&lockref->lock);
     54}
     55EXPORT_SYMBOL(lockref_get);
     56
     57/**
     58 * lockref_get_not_zero - Increments count unless the count is 0 or dead
     59 * @lockref: pointer to lockref structure
     60 * Return: 1 if count updated successfully or 0 if count was zero
     61 */
     62int lockref_get_not_zero(struct lockref *lockref)
     63{
     64	int retval;
     65
     66	CMPXCHG_LOOP(
     67		new.count++;
     68		if (old.count <= 0)
     69			return 0;
     70	,
     71		return 1;
     72	);
     73
     74	spin_lock(&lockref->lock);
     75	retval = 0;
     76	if (lockref->count > 0) {
     77		lockref->count++;
     78		retval = 1;
     79	}
     80	spin_unlock(&lockref->lock);
     81	return retval;
     82}
     83EXPORT_SYMBOL(lockref_get_not_zero);
     84
     85/**
     86 * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
     87 * @lockref: pointer to lockref structure
     88 * Return: 1 if count updated successfully or 0 if count would become zero
     89 */
     90int lockref_put_not_zero(struct lockref *lockref)
     91{
     92	int retval;
     93
     94	CMPXCHG_LOOP(
     95		new.count--;
     96		if (old.count <= 1)
     97			return 0;
     98	,
     99		return 1;
    100	);
    101
    102	spin_lock(&lockref->lock);
    103	retval = 0;
    104	if (lockref->count > 1) {
    105		lockref->count--;
    106		retval = 1;
    107	}
    108	spin_unlock(&lockref->lock);
    109	return retval;
    110}
    111EXPORT_SYMBOL(lockref_put_not_zero);
    112
    113/**
    114 * lockref_put_return - Decrement reference count if possible
    115 * @lockref: pointer to lockref structure
    116 *
    117 * Decrement the reference count and return the new value.
    118 * If the lockref was dead or locked, return an error.
    119 */
    120int lockref_put_return(struct lockref *lockref)
    121{
    122	CMPXCHG_LOOP(
    123		new.count--;
    124		if (old.count <= 0)
    125			return -1;
    126	,
    127		return new.count;
    128	);
    129	return -1;
    130}
    131EXPORT_SYMBOL(lockref_put_return);
    132
    133/**
    134 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
    135 * @lockref: pointer to lockref structure
    136 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
    137 */
    138int lockref_put_or_lock(struct lockref *lockref)
    139{
    140	CMPXCHG_LOOP(
    141		new.count--;
    142		if (old.count <= 1)
    143			break;
    144	,
    145		return 1;
    146	);
    147
    148	spin_lock(&lockref->lock);
    149	if (lockref->count <= 1)
    150		return 0;
    151	lockref->count--;
    152	spin_unlock(&lockref->lock);
    153	return 1;
    154}
    155EXPORT_SYMBOL(lockref_put_or_lock);
    156
    157/**
    158 * lockref_mark_dead - mark lockref dead
    159 * @lockref: pointer to lockref structure
    160 */
    161void lockref_mark_dead(struct lockref *lockref)
    162{
    163	assert_spin_locked(&lockref->lock);
    164	lockref->count = -128;
    165}
    166EXPORT_SYMBOL(lockref_mark_dead);
    167
    168/**
    169 * lockref_get_not_dead - Increments count unless the ref is dead
    170 * @lockref: pointer to lockref structure
    171 * Return: 1 if count updated successfully or 0 if lockref was dead
    172 */
    173int lockref_get_not_dead(struct lockref *lockref)
    174{
    175	int retval;
    176
    177	CMPXCHG_LOOP(
    178		new.count++;
    179		if (old.count < 0)
    180			return 0;
    181	,
    182		return 1;
    183	);
    184
    185	spin_lock(&lockref->lock);
    186	retval = 0;
    187	if (lockref->count >= 0) {
    188		lockref->count++;
    189		retval = 1;
    190	}
    191	spin_unlock(&lockref->lock);
    192	return retval;
    193}
    194EXPORT_SYMBOL(lockref_get_not_dead);