cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_gem_pm.c (5991B)


      1/*
      2 * SPDX-License-Identifier: MIT
      3 *
      4 * Copyright © 2019 Intel Corporation
      5 */
      6
      7#include "gem/i915_gem_pm.h"
      8#include "gem/i915_gem_ttm_pm.h"
      9#include "gt/intel_gt.h"
     10#include "gt/intel_gt_pm.h"
     11#include "gt/intel_gt_requests.h"
     12
     13#include "i915_driver.h"
     14#include "i915_drv.h"
     15
     16#if defined(CONFIG_X86)
     17#include <asm/smp.h>
     18#else
     19#define wbinvd_on_all_cpus() \
     20	pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
     21#endif
     22
     23void i915_gem_suspend(struct drm_i915_private *i915)
     24{
     25	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
     26
     27	intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
     28	flush_workqueue(i915->wq);
     29
     30	/*
     31	 * We have to flush all the executing contexts to main memory so
     32	 * that they can saved in the hibernation image. To ensure the last
     33	 * context image is coherent, we have to switch away from it. That
     34	 * leaves the i915->kernel_context still active when
     35	 * we actually suspend, and its image in memory may not match the GPU
     36	 * state. Fortunately, the kernel_context is disposable and we do
     37	 * not rely on its state.
     38	 */
     39	intel_gt_suspend_prepare(to_gt(i915));
     40
     41	i915_gem_drain_freed_objects(i915);
     42}
     43
     44static int lmem_restore(struct drm_i915_private *i915, u32 flags)
     45{
     46	struct intel_memory_region *mr;
     47	int ret = 0, id;
     48
     49	for_each_memory_region(mr, i915, id) {
     50		if (mr->type == INTEL_MEMORY_LOCAL) {
     51			ret = i915_ttm_restore_region(mr, flags);
     52			if (ret)
     53				break;
     54		}
     55	}
     56
     57	return ret;
     58}
     59
     60static int lmem_suspend(struct drm_i915_private *i915, u32 flags)
     61{
     62	struct intel_memory_region *mr;
     63	int ret = 0, id;
     64
     65	for_each_memory_region(mr, i915, id) {
     66		if (mr->type == INTEL_MEMORY_LOCAL) {
     67			ret = i915_ttm_backup_region(mr, flags);
     68			if (ret)
     69				break;
     70		}
     71	}
     72
     73	return ret;
     74}
     75
     76static void lmem_recover(struct drm_i915_private *i915)
     77{
     78	struct intel_memory_region *mr;
     79	int id;
     80
     81	for_each_memory_region(mr, i915, id)
     82		if (mr->type == INTEL_MEMORY_LOCAL)
     83			i915_ttm_recover_region(mr);
     84}
     85
     86int i915_gem_backup_suspend(struct drm_i915_private *i915)
     87{
     88	int ret;
     89
     90	/* Opportunistically try to evict unpinned objects */
     91	ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU);
     92	if (ret)
     93		goto out_recover;
     94
     95	i915_gem_suspend(i915);
     96
     97	/*
     98	 * More objects may have become unpinned as requests were
     99	 * retired. Now try to evict again. The gt may be wedged here
    100	 * in which case we automatically fall back to memcpy.
    101	 * We allow also backing up pinned objects that have not been
    102	 * marked for early recover, and that may contain, for example,
    103	 * page-tables for the migrate context.
    104	 */
    105	ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU |
    106			   I915_TTM_BACKUP_PINNED);
    107	if (ret)
    108		goto out_recover;
    109
    110	/*
    111	 * Remaining objects are backed up using memcpy once we've stopped
    112	 * using the migrate context.
    113	 */
    114	ret = lmem_suspend(i915, I915_TTM_BACKUP_PINNED);
    115	if (ret)
    116		goto out_recover;
    117
    118	return 0;
    119
    120out_recover:
    121	lmem_recover(i915);
    122
    123	return ret;
    124}
    125
    126void i915_gem_suspend_late(struct drm_i915_private *i915)
    127{
    128	struct drm_i915_gem_object *obj;
    129	struct list_head *phases[] = {
    130		&i915->mm.shrink_list,
    131		&i915->mm.purge_list,
    132		NULL
    133	}, **phase;
    134	unsigned long flags;
    135	bool flush = false;
    136
    137	/*
    138	 * Neither the BIOS, ourselves or any other kernel
    139	 * expects the system to be in execlists mode on startup,
    140	 * so we need to reset the GPU back to legacy mode. And the only
    141	 * known way to disable logical contexts is through a GPU reset.
    142	 *
    143	 * So in order to leave the system in a known default configuration,
    144	 * always reset the GPU upon unload and suspend. Afterwards we then
    145	 * clean up the GEM state tracking, flushing off the requests and
    146	 * leaving the system in a known idle state.
    147	 *
    148	 * Note that is of the upmost importance that the GPU is idle and
    149	 * all stray writes are flushed *before* we dismantle the backing
    150	 * storage for the pinned objects.
    151	 *
    152	 * However, since we are uncertain that resetting the GPU on older
    153	 * machines is a good idea, we don't - just in case it leaves the
    154	 * machine in an unusable condition.
    155	 */
    156
    157	intel_gt_suspend_late(to_gt(i915));
    158
    159	spin_lock_irqsave(&i915->mm.obj_lock, flags);
    160	for (phase = phases; *phase; phase++) {
    161		list_for_each_entry(obj, *phase, mm.link) {
    162			if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
    163				flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
    164			__start_cpu_write(obj); /* presume auto-hibernate */
    165		}
    166	}
    167	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
    168	if (flush)
    169		wbinvd_on_all_cpus();
    170}
    171
    172int i915_gem_freeze(struct drm_i915_private *i915)
    173{
    174	/* Discard all purgeable objects, let userspace recover those as
    175	 * required after resuming.
    176	 */
    177	i915_gem_shrink_all(i915);
    178
    179	return 0;
    180}
    181
    182int i915_gem_freeze_late(struct drm_i915_private *i915)
    183{
    184	struct drm_i915_gem_object *obj;
    185	intel_wakeref_t wakeref;
    186
    187	/*
    188	 * Called just before we write the hibernation image.
    189	 *
    190	 * We need to update the domain tracking to reflect that the CPU
    191	 * will be accessing all the pages to create and restore from the
    192	 * hibernation, and so upon restoration those pages will be in the
    193	 * CPU domain.
    194	 *
    195	 * To make sure the hibernation image contains the latest state,
    196	 * we update that state just before writing out the image.
    197	 *
    198	 * To try and reduce the hibernation image, we manually shrink
    199	 * the objects as well, see i915_gem_freeze()
    200	 */
    201
    202	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
    203		i915_gem_shrink(NULL, i915, -1UL, NULL, ~0);
    204	i915_gem_drain_freed_objects(i915);
    205
    206	wbinvd_on_all_cpus();
    207	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
    208		__start_cpu_write(obj);
    209
    210	return 0;
    211}
    212
    213void i915_gem_resume(struct drm_i915_private *i915)
    214{
    215	int ret;
    216
    217	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
    218
    219	ret = lmem_restore(i915, 0);
    220	GEM_WARN_ON(ret);
    221
    222	/*
    223	 * As we didn't flush the kernel context before suspend, we cannot
    224	 * guarantee that the context image is complete. So let's just reset
    225	 * it and start again.
    226	 */
    227	intel_gt_resume(to_gt(i915));
    228
    229	ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
    230	GEM_WARN_ON(ret);
    231}