cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_wakeref.c (4518B)


      1/*
      2 * SPDX-License-Identifier: MIT
      3 *
      4 * Copyright © 2019 Intel Corporation
      5 */
      6
      7#include <linux/wait_bit.h>
      8
      9#include "intel_runtime_pm.h"
     10#include "intel_wakeref.h"
     11
     12static void rpm_get(struct intel_wakeref *wf)
     13{
     14	wf->wakeref = intel_runtime_pm_get(wf->rpm);
     15}
     16
     17static void rpm_put(struct intel_wakeref *wf)
     18{
     19	intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
     20
     21	intel_runtime_pm_put(wf->rpm, wakeref);
     22	INTEL_WAKEREF_BUG_ON(!wakeref);
     23}
     24
     25int __intel_wakeref_get_first(struct intel_wakeref *wf)
     26{
     27	/*
     28	 * Treat get/put as different subclasses, as we may need to run
     29	 * the put callback from under the shrinker and do not want to
     30	 * cross-contanimate that callback with any extra work performed
     31	 * upon acquiring the wakeref.
     32	 */
     33	mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
     34	if (!atomic_read(&wf->count)) {
     35		int err;
     36
     37		rpm_get(wf);
     38
     39		err = wf->ops->get(wf);
     40		if (unlikely(err)) {
     41			rpm_put(wf);
     42			mutex_unlock(&wf->mutex);
     43			return err;
     44		}
     45
     46		smp_mb__before_atomic(); /* release wf->count */
     47	}
     48	atomic_inc(&wf->count);
     49	mutex_unlock(&wf->mutex);
     50
     51	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
     52	return 0;
     53}
     54
     55static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
     56{
     57	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
     58	if (unlikely(!atomic_dec_and_test(&wf->count)))
     59		goto unlock;
     60
     61	/* ops->put() must reschedule its own release on error/deferral */
     62	if (likely(!wf->ops->put(wf))) {
     63		rpm_put(wf);
     64		wake_up_var(&wf->wakeref);
     65	}
     66
     67unlock:
     68	mutex_unlock(&wf->mutex);
     69}
     70
     71void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
     72{
     73	INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
     74
     75	/* Assume we are not in process context and so cannot sleep. */
     76	if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
     77		mod_delayed_work(system_wq, &wf->work,
     78				 FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
     79		return;
     80	}
     81
     82	____intel_wakeref_put_last(wf);
     83}
     84
     85static void __intel_wakeref_put_work(struct work_struct *wrk)
     86{
     87	struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
     88
     89	if (atomic_add_unless(&wf->count, -1, 1))
     90		return;
     91
     92	mutex_lock(&wf->mutex);
     93	____intel_wakeref_put_last(wf);
     94}
     95
     96void __intel_wakeref_init(struct intel_wakeref *wf,
     97			  struct intel_runtime_pm *rpm,
     98			  const struct intel_wakeref_ops *ops,
     99			  struct intel_wakeref_lockclass *key)
    100{
    101	wf->rpm = rpm;
    102	wf->ops = ops;
    103
    104	__mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
    105	atomic_set(&wf->count, 0);
    106	wf->wakeref = 0;
    107
    108	INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
    109	lockdep_init_map(&wf->work.work.lockdep_map,
    110			 "wakeref.work", &key->work, 0);
    111}
    112
    113int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
    114{
    115	int err;
    116
    117	might_sleep();
    118
    119	err = wait_var_event_killable(&wf->wakeref,
    120				      !intel_wakeref_is_active(wf));
    121	if (err)
    122		return err;
    123
    124	intel_wakeref_unlock_wait(wf);
    125	return 0;
    126}
    127
    128static void wakeref_auto_timeout(struct timer_list *t)
    129{
    130	struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
    131	intel_wakeref_t wakeref;
    132	unsigned long flags;
    133
    134	if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
    135		return;
    136
    137	wakeref = fetch_and_zero(&wf->wakeref);
    138	spin_unlock_irqrestore(&wf->lock, flags);
    139
    140	intel_runtime_pm_put(wf->rpm, wakeref);
    141}
    142
    143void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
    144			     struct intel_runtime_pm *rpm)
    145{
    146	spin_lock_init(&wf->lock);
    147	timer_setup(&wf->timer, wakeref_auto_timeout, 0);
    148	refcount_set(&wf->count, 0);
    149	wf->wakeref = 0;
    150	wf->rpm = rpm;
    151}
    152
    153void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
    154{
    155	unsigned long flags;
    156
    157	if (!timeout) {
    158		if (del_timer_sync(&wf->timer))
    159			wakeref_auto_timeout(&wf->timer);
    160		return;
    161	}
    162
    163	/* Our mission is that we only extend an already active wakeref */
    164	assert_rpm_wakelock_held(wf->rpm);
    165
    166	if (!refcount_inc_not_zero(&wf->count)) {
    167		spin_lock_irqsave(&wf->lock, flags);
    168		if (!refcount_inc_not_zero(&wf->count)) {
    169			INTEL_WAKEREF_BUG_ON(wf->wakeref);
    170			wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
    171			refcount_set(&wf->count, 1);
    172		}
    173		spin_unlock_irqrestore(&wf->lock, flags);
    174	}
    175
    176	/*
    177	 * If we extend a pending timer, we will only get a single timer
    178	 * callback and so need to cancel the local inc by running the
    179	 * elided callback to keep the wf->count balanced.
    180	 */
    181	if (mod_timer(&wf->timer, jiffies + timeout))
    182		wakeref_auto_timeout(&wf->timer);
    183}
    184
    185void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
    186{
    187	intel_wakeref_auto(wf, 0);
    188	INTEL_WAKEREF_BUG_ON(wf->wakeref);
    189}