cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_wakeref.h (7437B)


      1/*
      2 * SPDX-License-Identifier: MIT
      3 *
      4 * Copyright © 2019 Intel Corporation
      5 */
      6
      7#ifndef INTEL_WAKEREF_H
      8#define INTEL_WAKEREF_H
      9
     10#include <linux/atomic.h>
     11#include <linux/bitfield.h>
     12#include <linux/bits.h>
     13#include <linux/lockdep.h>
     14#include <linux/mutex.h>
     15#include <linux/refcount.h>
     16#include <linux/stackdepot.h>
     17#include <linux/timer.h>
     18#include <linux/workqueue.h>
     19
     20#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
     21#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
     22#else
     23#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
     24#endif
     25
     26struct intel_runtime_pm;
     27struct intel_wakeref;
     28
     29typedef depot_stack_handle_t intel_wakeref_t;
     30
     31struct intel_wakeref_ops {
     32	int (*get)(struct intel_wakeref *wf);
     33	int (*put)(struct intel_wakeref *wf);
     34};
     35
     36struct intel_wakeref {
     37	atomic_t count;
     38	struct mutex mutex;
     39
     40	intel_wakeref_t wakeref;
     41
     42	struct intel_runtime_pm *rpm;
     43	const struct intel_wakeref_ops *ops;
     44
     45	struct delayed_work work;
     46};
     47
     48struct intel_wakeref_lockclass {
     49	struct lock_class_key mutex;
     50	struct lock_class_key work;
     51};
     52
     53void __intel_wakeref_init(struct intel_wakeref *wf,
     54			  struct intel_runtime_pm *rpm,
     55			  const struct intel_wakeref_ops *ops,
     56			  struct intel_wakeref_lockclass *key);
     57#define intel_wakeref_init(wf, rpm, ops) do {				\
     58	static struct intel_wakeref_lockclass __key;			\
     59									\
     60	__intel_wakeref_init((wf), (rpm), (ops), &__key);		\
     61} while (0)
     62
     63int __intel_wakeref_get_first(struct intel_wakeref *wf);
     64void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
     65
     66/**
     67 * intel_wakeref_get: Acquire the wakeref
     68 * @wf: the wakeref
     69 *
     70 * Acquire a hold on the wakeref. The first user to do so, will acquire
     71 * the runtime pm wakeref and then call the @fn underneath the wakeref
     72 * mutex.
     73 *
     74 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
     75 * will be released and the acquisition unwound, and an error reported.
     76 *
     77 * Returns: 0 if the wakeref was acquired successfully, or a negative error
     78 * code otherwise.
     79 */
     80static inline int
     81intel_wakeref_get(struct intel_wakeref *wf)
     82{
     83	might_sleep();
     84	if (unlikely(!atomic_inc_not_zero(&wf->count)))
     85		return __intel_wakeref_get_first(wf);
     86
     87	return 0;
     88}
     89
     90/**
     91 * __intel_wakeref_get: Acquire the wakeref, again
     92 * @wf: the wakeref
     93 *
     94 * Increment the wakeref counter, only valid if it is already held by
     95 * the caller.
     96 *
     97 * See intel_wakeref_get().
     98 */
     99static inline void
    100__intel_wakeref_get(struct intel_wakeref *wf)
    101{
    102	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
    103	atomic_inc(&wf->count);
    104}
    105
    106/**
    107 * intel_wakeref_get_if_in_use: Acquire the wakeref
    108 * @wf: the wakeref
    109 *
    110 * Acquire a hold on the wakeref, but only if the wakeref is already
    111 * active.
    112 *
    113 * Returns: true if the wakeref was acquired, false otherwise.
    114 */
    115static inline bool
    116intel_wakeref_get_if_active(struct intel_wakeref *wf)
    117{
    118	return atomic_inc_not_zero(&wf->count);
    119}
    120
    121enum {
    122	INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
    123	__INTEL_WAKEREF_PUT_LAST_BIT__
    124};
    125
    126static inline void
    127intel_wakeref_might_get(struct intel_wakeref *wf)
    128{
    129	might_lock(&wf->mutex);
    130}
    131
    132/**
    133 * intel_wakeref_put_flags: Release the wakeref
    134 * @wf: the wakeref
    135 * @flags: control flags
    136 *
    137 * Release our hold on the wakeref. When there are no more users,
    138 * the runtime pm wakeref will be released after the @fn callback is called
    139 * underneath the wakeref mutex.
    140 *
    141 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
    142 * is retained and an error reported.
    143 *
    144 * Returns: 0 if the wakeref was released successfully, or a negative error
    145 * code otherwise.
    146 */
    147static inline void
    148__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
    149#define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
    150#define INTEL_WAKEREF_PUT_DELAY \
    151	GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
    152{
    153	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
    154	if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
    155		__intel_wakeref_put_last(wf, flags);
    156}
    157
    158static inline void
    159intel_wakeref_put(struct intel_wakeref *wf)
    160{
    161	might_sleep();
    162	__intel_wakeref_put(wf, 0);
    163}
    164
    165static inline void
    166intel_wakeref_put_async(struct intel_wakeref *wf)
    167{
    168	__intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
    169}
    170
    171static inline void
    172intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
    173{
    174	__intel_wakeref_put(wf,
    175			    INTEL_WAKEREF_PUT_ASYNC |
    176			    FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
    177}
    178
    179static inline void
    180intel_wakeref_might_put(struct intel_wakeref *wf)
    181{
    182	might_lock(&wf->mutex);
    183}
    184
    185/**
    186 * intel_wakeref_lock: Lock the wakeref (mutex)
    187 * @wf: the wakeref
    188 *
    189 * Locks the wakeref to prevent it being acquired or released. New users
    190 * can still adjust the counter, but the wakeref itself (and callback)
    191 * cannot be acquired or released.
    192 */
    193static inline void
    194intel_wakeref_lock(struct intel_wakeref *wf)
    195	__acquires(wf->mutex)
    196{
    197	mutex_lock(&wf->mutex);
    198}
    199
    200/**
    201 * intel_wakeref_unlock: Unlock the wakeref
    202 * @wf: the wakeref
    203 *
    204 * Releases a previously acquired intel_wakeref_lock().
    205 */
    206static inline void
    207intel_wakeref_unlock(struct intel_wakeref *wf)
    208	__releases(wf->mutex)
    209{
    210	mutex_unlock(&wf->mutex);
    211}
    212
    213/**
    214 * intel_wakeref_unlock_wait: Wait until the active callback is complete
    215 * @wf: the wakeref
    216 *
    217 * Waits for the active callback (under the @wf->mutex or another CPU) is
    218 * complete.
    219 */
    220static inline void
    221intel_wakeref_unlock_wait(struct intel_wakeref *wf)
    222{
    223	mutex_lock(&wf->mutex);
    224	mutex_unlock(&wf->mutex);
    225	flush_delayed_work(&wf->work);
    226}
    227
    228/**
    229 * intel_wakeref_is_active: Query whether the wakeref is currently held
    230 * @wf: the wakeref
    231 *
    232 * Returns: true if the wakeref is currently held.
    233 */
    234static inline bool
    235intel_wakeref_is_active(const struct intel_wakeref *wf)
    236{
    237	return READ_ONCE(wf->wakeref);
    238}
    239
    240/**
    241 * __intel_wakeref_defer_park: Defer the current park callback
    242 * @wf: the wakeref
    243 */
    244static inline void
    245__intel_wakeref_defer_park(struct intel_wakeref *wf)
    246{
    247	lockdep_assert_held(&wf->mutex);
    248	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
    249	atomic_set_release(&wf->count, 1);
    250}
    251
    252/**
    253 * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
    254 * @wf: the wakeref
    255 *
    256 * Wait for the earlier asynchronous release of the wakeref. Note
    257 * this will wait for any third party as well, so make sure you only wait
    258 * when you have control over the wakeref and trust no one else is acquiring
    259 * it.
    260 *
    261 * Return: 0 on success, error code if killed.
    262 */
    263int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
    264
    265struct intel_wakeref_auto {
    266	struct intel_runtime_pm *rpm;
    267	struct timer_list timer;
    268	intel_wakeref_t wakeref;
    269	spinlock_t lock;
    270	refcount_t count;
    271};
    272
    273/**
    274 * intel_wakeref_auto: Delay the runtime-pm autosuspend
    275 * @wf: the wakeref
    276 * @timeout: relative timeout in jiffies
    277 *
    278 * The runtime-pm core uses a suspend delay after the last wakeref
    279 * is released before triggering runtime suspend of the device. That
    280 * delay is configurable via sysfs with little regard to the device
    281 * characteristics. Instead, we want to tune the autosuspend based on our
    282 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
    283 * timeout.
    284 *
    285 * Pass @timeout = 0 to cancel a previous autosuspend by executing the
    286 * suspend immediately.
    287 */
    288void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
    289
    290void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
    291			     struct intel_runtime_pm *rpm);
    292void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
    293
    294#endif /* INTEL_WAKEREF_H */