cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qrwlock.h (4081B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * Queue read/write lock
      4 *
      5 * These use generic atomic and locking routines, but depend on a fair spinlock
      6 * implementation in order to be fair themselves.  The implementation in
      7 * asm-generic/spinlock.h meets these requirements.
      8 *
      9 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
     10 *
     11 * Authors: Waiman Long <waiman.long@hp.com>
     12 */
     13#ifndef __ASM_GENERIC_QRWLOCK_H
     14#define __ASM_GENERIC_QRWLOCK_H
     15
     16#include <linux/atomic.h>
     17#include <asm/barrier.h>
     18#include <asm/processor.h>
     19
     20#include <asm-generic/qrwlock_types.h>
     21
     22/* Must be included from asm/spinlock.h after defining arch_spin_is_locked.  */
     23
     24/*
     25 * Writer states & reader shift and bias.
     26 */
     27#define	_QW_WAITING	0x100		/* A writer is waiting	   */
     28#define	_QW_LOCKED	0x0ff		/* A writer holds the lock */
     29#define	_QW_WMASK	0x1ff		/* Writer mask		   */
     30#define	_QR_SHIFT	9		/* Reader count shift	   */
     31#define _QR_BIAS	(1U << _QR_SHIFT)
     32
     33/*
     34 * External function declarations
     35 */
     36extern void queued_read_lock_slowpath(struct qrwlock *lock);
     37extern void queued_write_lock_slowpath(struct qrwlock *lock);
     38
     39/**
     40 * queued_read_trylock - try to acquire read lock of a queued rwlock
     41 * @lock : Pointer to queued rwlock structure
     42 * Return: 1 if lock acquired, 0 if failed
     43 */
     44static inline int queued_read_trylock(struct qrwlock *lock)
     45{
     46	int cnts;
     47
     48	cnts = atomic_read(&lock->cnts);
     49	if (likely(!(cnts & _QW_WMASK))) {
     50		cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
     51		if (likely(!(cnts & _QW_WMASK)))
     52			return 1;
     53		atomic_sub(_QR_BIAS, &lock->cnts);
     54	}
     55	return 0;
     56}
     57
     58/**
     59 * queued_write_trylock - try to acquire write lock of a queued rwlock
     60 * @lock : Pointer to queued rwlock structure
     61 * Return: 1 if lock acquired, 0 if failed
     62 */
     63static inline int queued_write_trylock(struct qrwlock *lock)
     64{
     65	int cnts;
     66
     67	cnts = atomic_read(&lock->cnts);
     68	if (unlikely(cnts))
     69		return 0;
     70
     71	return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
     72				_QW_LOCKED));
     73}
     74/**
     75 * queued_read_lock - acquire read lock of a queued rwlock
     76 * @lock: Pointer to queued rwlock structure
     77 */
     78static inline void queued_read_lock(struct qrwlock *lock)
     79{
     80	int cnts;
     81
     82	cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
     83	if (likely(!(cnts & _QW_WMASK)))
     84		return;
     85
     86	/* The slowpath will decrement the reader count, if necessary. */
     87	queued_read_lock_slowpath(lock);
     88}
     89
     90/**
     91 * queued_write_lock - acquire write lock of a queued rwlock
     92 * @lock : Pointer to queued rwlock structure
     93 */
     94static inline void queued_write_lock(struct qrwlock *lock)
     95{
     96	int cnts = 0;
     97	/* Optimize for the unfair lock case where the fair flag is 0. */
     98	if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
     99		return;
    100
    101	queued_write_lock_slowpath(lock);
    102}
    103
    104/**
    105 * queued_read_unlock - release read lock of a queued rwlock
    106 * @lock : Pointer to queued rwlock structure
    107 */
    108static inline void queued_read_unlock(struct qrwlock *lock)
    109{
    110	/*
    111	 * Atomically decrement the reader count
    112	 */
    113	(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
    114}
    115
    116/**
    117 * queued_write_unlock - release write lock of a queued rwlock
    118 * @lock : Pointer to queued rwlock structure
    119 */
    120static inline void queued_write_unlock(struct qrwlock *lock)
    121{
    122	smp_store_release(&lock->wlocked, 0);
    123}
    124
    125/**
    126 * queued_rwlock_is_contended - check if the lock is contended
    127 * @lock : Pointer to queued rwlock structure
    128 * Return: 1 if lock contended, 0 otherwise
    129 */
    130static inline int queued_rwlock_is_contended(struct qrwlock *lock)
    131{
    132	return arch_spin_is_locked(&lock->wait_lock);
    133}
    134
    135/*
    136 * Remapping rwlock architecture specific functions to the corresponding
    137 * queued rwlock functions.
    138 */
    139#define arch_read_lock(l)		queued_read_lock(l)
    140#define arch_write_lock(l)		queued_write_lock(l)
    141#define arch_read_trylock(l)		queued_read_trylock(l)
    142#define arch_write_trylock(l)		queued_write_trylock(l)
    143#define arch_read_unlock(l)		queued_read_unlock(l)
    144#define arch_write_unlock(l)		queued_write_unlock(l)
    145#define arch_rwlock_is_contended(l)	queued_rwlock_is_contended(l)
    146
    147#endif /* __ASM_GENERIC_QRWLOCK_H */