cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_engine_pm.c (9214B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2019 Intel Corporation
      4 */
      5
      6#include "i915_drv.h"
      7
      8#include "intel_breadcrumbs.h"
      9#include "intel_context.h"
     10#include "intel_engine.h"
     11#include "intel_engine_heartbeat.h"
     12#include "intel_engine_pm.h"
     13#include "intel_gt.h"
     14#include "intel_gt_pm.h"
     15#include "intel_rc6.h"
     16#include "intel_ring.h"
     17#include "shmem_utils.h"
     18
     19static void dbg_poison_ce(struct intel_context *ce)
     20{
     21	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
     22		return;
     23
     24	if (ce->state) {
     25		struct drm_i915_gem_object *obj = ce->state->obj;
     26		int type = i915_coherent_map_type(ce->engine->i915, obj, true);
     27		void *map;
     28
     29		if (!i915_gem_object_trylock(obj, NULL))
     30			return;
     31
     32		map = i915_gem_object_pin_map(obj, type);
     33		if (!IS_ERR(map)) {
     34			memset(map, CONTEXT_REDZONE, obj->base.size);
     35			i915_gem_object_flush_map(obj);
     36			i915_gem_object_unpin_map(obj);
     37		}
     38		i915_gem_object_unlock(obj);
     39	}
     40}
     41
     42static int __engine_unpark(struct intel_wakeref *wf)
     43{
     44	struct intel_engine_cs *engine =
     45		container_of(wf, typeof(*engine), wakeref);
     46	struct intel_context *ce;
     47
     48	ENGINE_TRACE(engine, "\n");
     49
     50	intel_gt_pm_get(engine->gt);
     51
     52	/* Discard stale context state from across idling */
     53	ce = engine->kernel_context;
     54	if (ce) {
     55		GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
     56
     57		/* Flush all pending HW writes before we touch the context */
     58		while (unlikely(intel_context_inflight(ce)))
     59			intel_engine_flush_submission(engine);
     60
     61		/* First poison the image to verify we never fully trust it */
     62		dbg_poison_ce(ce);
     63
     64		/* Scrub the context image after our loss of control */
     65		ce->ops->reset(ce);
     66
     67		CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
     68			 ce->timeline->seqno,
     69			 READ_ONCE(*ce->timeline->hwsp_seqno),
     70			 ce->ring->emit);
     71		GEM_BUG_ON(ce->timeline->seqno !=
     72			   READ_ONCE(*ce->timeline->hwsp_seqno));
     73	}
     74
     75	if (engine->unpark)
     76		engine->unpark(engine);
     77
     78	intel_breadcrumbs_unpark(engine->breadcrumbs);
     79	intel_engine_unpark_heartbeat(engine);
     80	return 0;
     81}
     82
     83static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
     84{
     85	struct i915_request *rq = to_request(fence);
     86
     87	ewma__engine_latency_add(&rq->engine->latency,
     88				 ktime_us_delta(rq->fence.timestamp,
     89						rq->duration.emitted));
     90}
     91
     92static void
     93__queue_and_release_pm(struct i915_request *rq,
     94		       struct intel_timeline *tl,
     95		       struct intel_engine_cs *engine)
     96{
     97	struct intel_gt_timelines *timelines = &engine->gt->timelines;
     98
     99	ENGINE_TRACE(engine, "parking\n");
    100
    101	/*
    102	 * We have to serialise all potential retirement paths with our
    103	 * submission, as we don't want to underflow either the
    104	 * engine->wakeref.counter or our timeline->active_count.
    105	 *
    106	 * Equally, we cannot allow a new submission to start until
    107	 * after we finish queueing, nor could we allow that submitter
    108	 * to retire us before we are ready!
    109	 */
    110	spin_lock(&timelines->lock);
    111
    112	/* Let intel_gt_retire_requests() retire us (acquired under lock) */
    113	if (!atomic_fetch_inc(&tl->active_count))
    114		list_add_tail(&tl->link, &timelines->active_list);
    115
    116	/* Hand the request over to HW and so engine_retire() */
    117	__i915_request_queue_bh(rq);
    118
    119	/* Let new submissions commence (and maybe retire this timeline) */
    120	__intel_wakeref_defer_park(&engine->wakeref);
    121
    122	spin_unlock(&timelines->lock);
    123}
    124
    125static bool switch_to_kernel_context(struct intel_engine_cs *engine)
    126{
    127	struct intel_context *ce = engine->kernel_context;
    128	struct i915_request *rq;
    129	bool result = true;
    130
    131	/*
    132	 * This is execlist specific behaviour intended to ensure the GPU is
    133	 * idle by switching to a known 'safe' context. With GuC submission, the
    134	 * same idle guarantee is achieved by other means (disabling
    135	 * scheduling). Further, switching to a 'safe' context has no effect
    136	 * with GuC submission as the scheduler can just switch back again.
    137	 *
    138	 * FIXME: Move this backend scheduler specific behaviour into the
    139	 * scheduler backend.
    140	 */
    141	if (intel_engine_uses_guc(engine))
    142		return true;
    143
    144	/* GPU is pointing to the void, as good as in the kernel context. */
    145	if (intel_gt_is_wedged(engine->gt))
    146		return true;
    147
    148	GEM_BUG_ON(!intel_context_is_barrier(ce));
    149	GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
    150
    151	/* Already inside the kernel context, safe to power down. */
    152	if (engine->wakeref_serial == engine->serial)
    153		return true;
    154
    155	/*
    156	 * Note, we do this without taking the timeline->mutex. We cannot
    157	 * as we may be called while retiring the kernel context and so
    158	 * already underneath the timeline->mutex. Instead we rely on the
    159	 * exclusive property of the __engine_park that prevents anyone
    160	 * else from creating a request on this engine. This also requires
    161	 * that the ring is empty and we avoid any waits while constructing
    162	 * the context, as they assume protection by the timeline->mutex.
    163	 * This should hold true as we can only park the engine after
    164	 * retiring the last request, thus all rings should be empty and
    165	 * all timelines idle.
    166	 *
    167	 * For unlocking, there are 2 other parties and the GPU who have a
    168	 * stake here.
    169	 *
    170	 * A new gpu user will be waiting on the engine-pm to start their
    171	 * engine_unpark. New waiters are predicated on engine->wakeref.count
    172	 * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
    173	 * engine->wakeref.
    174	 *
    175	 * The other party is intel_gt_retire_requests(), which is walking the
    176	 * list of active timelines looking for completions. Meanwhile as soon
    177	 * as we call __i915_request_queue(), the GPU may complete our request.
    178	 * Ergo, if we put ourselves on the timelines.active_list
    179	 * (se intel_timeline_enter()) before we increment the
    180	 * engine->wakeref.count, we may see the request completion and retire
    181	 * it causing an underflow of the engine->wakeref.
    182	 */
    183	set_bit(CONTEXT_IS_PARKING, &ce->flags);
    184	GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
    185
    186	rq = __i915_request_create(ce, GFP_NOWAIT);
    187	if (IS_ERR(rq))
    188		/* Context switch failed, hope for the best! Maybe reset? */
    189		goto out_unlock;
    190
    191	/* Check again on the next retirement. */
    192	engine->wakeref_serial = engine->serial + 1;
    193	i915_request_add_active_barriers(rq);
    194
    195	/* Install ourselves as a preemption barrier */
    196	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
    197	if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
    198		/*
    199		 * Use an interrupt for precise measurement of duration,
    200		 * otherwise we rely on someone else retiring all the requests
    201		 * which may delay the signaling (i.e. we will likely wait
    202		 * until the background request retirement running every
    203		 * second or two).
    204		 */
    205		BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
    206		dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
    207		rq->duration.emitted = ktime_get();
    208	}
    209
    210	/* Expose ourselves to the world */
    211	__queue_and_release_pm(rq, ce->timeline, engine);
    212
    213	result = false;
    214out_unlock:
    215	clear_bit(CONTEXT_IS_PARKING, &ce->flags);
    216	return result;
    217}
    218
    219static void call_idle_barriers(struct intel_engine_cs *engine)
    220{
    221	struct llist_node *node, *next;
    222
    223	llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
    224		struct dma_fence_cb *cb =
    225			container_of((struct list_head *)node,
    226				     typeof(*cb), node);
    227
    228		cb->func(ERR_PTR(-EAGAIN), cb);
    229	}
    230}
    231
    232static int __engine_park(struct intel_wakeref *wf)
    233{
    234	struct intel_engine_cs *engine =
    235		container_of(wf, typeof(*engine), wakeref);
    236
    237	engine->saturated = 0;
    238
    239	/*
    240	 * If one and only one request is completed between pm events,
    241	 * we know that we are inside the kernel context and it is
    242	 * safe to power down. (We are paranoid in case that runtime
    243	 * suspend causes corruption to the active context image, and
    244	 * want to avoid that impacting userspace.)
    245	 */
    246	if (!switch_to_kernel_context(engine))
    247		return -EBUSY;
    248
    249	ENGINE_TRACE(engine, "parked\n");
    250
    251	call_idle_barriers(engine); /* cleanup after wedging */
    252
    253	intel_engine_park_heartbeat(engine);
    254	intel_breadcrumbs_park(engine->breadcrumbs);
    255
    256	/* Must be reset upon idling, or we may miss the busy wakeup. */
    257	GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
    258
    259	if (engine->park)
    260		engine->park(engine);
    261
    262	/* While gt calls i915_vma_parked(), we have to break the lock cycle */
    263	intel_gt_pm_put_async(engine->gt);
    264	return 0;
    265}
    266
    267static const struct intel_wakeref_ops wf_ops = {
    268	.get = __engine_unpark,
    269	.put = __engine_park,
    270};
    271
    272void intel_engine_init__pm(struct intel_engine_cs *engine)
    273{
    274	struct intel_runtime_pm *rpm = engine->uncore->rpm;
    275
    276	intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
    277	intel_engine_init_heartbeat(engine);
    278}
    279
    280/**
    281 * intel_engine_reset_pinned_contexts - Reset the pinned contexts of
    282 * an engine.
    283 * @engine: The engine whose pinned contexts we want to reset.
    284 *
    285 * Typically the pinned context LMEM images lose or get their content
    286 * corrupted on suspend. This function resets their images.
    287 */
    288void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine)
    289{
    290	struct intel_context *ce;
    291
    292	list_for_each_entry(ce, &engine->pinned_contexts_list,
    293			    pinned_contexts_link) {
    294		/* kernel context gets reset at __engine_unpark() */
    295		if (ce == engine->kernel_context)
    296			continue;
    297
    298		dbg_poison_ce(ce);
    299		ce->ops->reset(ce);
    300	}
    301}
    302
    303#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
    304#include "selftest_engine_pm.c"
    305#endif