cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_engine_pm.h (2959B)


      1/* SPDX-License-Identifier: MIT */
      2/*
      3 * Copyright © 2019 Intel Corporation
      4 */
      5
      6#ifndef INTEL_ENGINE_PM_H
      7#define INTEL_ENGINE_PM_H
      8
      9#include "i915_drv.h"
     10#include "i915_request.h"
     11#include "intel_engine_types.h"
     12#include "intel_wakeref.h"
     13#include "intel_gt_pm.h"
     14
     15static inline bool
     16intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
     17{
     18	return intel_wakeref_is_active(&engine->wakeref);
     19}
     20
     21static inline void __intel_engine_pm_get(struct intel_engine_cs *engine)
     22{
     23	__intel_wakeref_get(&engine->wakeref);
     24}
     25
     26static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
     27{
     28	intel_wakeref_get(&engine->wakeref);
     29}
     30
     31static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
     32{
     33	return intel_wakeref_get_if_active(&engine->wakeref);
     34}
     35
     36static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
     37{
     38	if (!intel_engine_is_virtual(engine)) {
     39		intel_wakeref_might_get(&engine->wakeref);
     40	} else {
     41		struct intel_gt *gt = engine->gt;
     42		struct intel_engine_cs *tengine;
     43		intel_engine_mask_t tmp, mask = engine->mask;
     44
     45		for_each_engine_masked(tengine, gt, mask, tmp)
     46			intel_wakeref_might_get(&tengine->wakeref);
     47	}
     48	intel_gt_pm_might_get(engine->gt);
     49}
     50
     51static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
     52{
     53	intel_wakeref_put(&engine->wakeref);
     54}
     55
     56static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
     57{
     58	intel_wakeref_put_async(&engine->wakeref);
     59}
     60
     61static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
     62					     unsigned long delay)
     63{
     64	intel_wakeref_put_delay(&engine->wakeref, delay);
     65}
     66
     67static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
     68{
     69	intel_wakeref_unlock_wait(&engine->wakeref);
     70}
     71
     72static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
     73{
     74	if (!intel_engine_is_virtual(engine)) {
     75		intel_wakeref_might_put(&engine->wakeref);
     76	} else {
     77		struct intel_gt *gt = engine->gt;
     78		struct intel_engine_cs *tengine;
     79		intel_engine_mask_t tmp, mask = engine->mask;
     80
     81		for_each_engine_masked(tengine, gt, mask, tmp)
     82			intel_wakeref_might_put(&tengine->wakeref);
     83	}
     84	intel_gt_pm_might_put(engine->gt);
     85}
     86
     87static inline struct i915_request *
     88intel_engine_create_kernel_request(struct intel_engine_cs *engine)
     89{
     90	struct i915_request *rq;
     91
     92	/*
     93	 * The engine->kernel_context is special as it is used inside
     94	 * the engine-pm barrier (see __engine_park()), circumventing
     95	 * the usual mutexes and relying on the engine-pm barrier
     96	 * instead. So whenever we use the engine->kernel_context
     97	 * outside of the barrier, we must manually handle the
     98	 * engine wakeref to serialise with the use inside.
     99	 */
    100	intel_engine_pm_get(engine);
    101	rq = i915_request_create(engine->kernel_context);
    102	intel_engine_pm_put(engine);
    103
    104	return rq;
    105}
    106
    107void intel_engine_init__pm(struct intel_engine_cs *engine);
    108
    109void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine);
    110
    111#endif /* INTEL_ENGINE_PM_H */