cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_gem_ttm_pm.c (5651B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2021 Intel Corporation
      4 */
      5
      6#include <drm/ttm/ttm_placement.h>
      7#include <drm/ttm/ttm_tt.h>
      8
      9#include "i915_drv.h"
     10#include "intel_memory_region.h"
     11#include "intel_region_ttm.h"
     12
     13#include "gem/i915_gem_region.h"
     14#include "gem/i915_gem_ttm.h"
     15#include "gem/i915_gem_ttm_move.h"
     16#include "gem/i915_gem_ttm_pm.h"
     17
     18/**
     19 * i915_ttm_backup_free - Free any backup attached to this object
     20 * @obj: The object whose backup is to be freed.
     21 */
     22void i915_ttm_backup_free(struct drm_i915_gem_object *obj)
     23{
     24	if (obj->ttm.backup) {
     25		i915_gem_object_put(obj->ttm.backup);
     26		obj->ttm.backup = NULL;
     27	}
     28}
     29
     30/**
     31 * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore
     32 * @base: The i915_gem_apply_to_region we derive from.
     33 * @allow_gpu: Whether using the gpu blitter is allowed.
     34 * @backup_pinned: On backup, backup also pinned objects.
     35 */
     36struct i915_gem_ttm_pm_apply {
     37	struct i915_gem_apply_to_region base;
     38	bool allow_gpu : 1;
     39	bool backup_pinned : 1;
     40};
     41
     42static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
     43			   struct drm_i915_gem_object *obj)
     44{
     45	struct i915_gem_ttm_pm_apply *pm_apply =
     46		container_of(apply, typeof(*pm_apply), base);
     47	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
     48	struct ttm_buffer_object *backup_bo;
     49	struct drm_i915_private *i915 =
     50		container_of(bo->bdev, typeof(*i915), bdev);
     51	struct drm_i915_gem_object *backup;
     52	struct ttm_operation_ctx ctx = {};
     53	int err = 0;
     54
     55	if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
     56		return 0;
     57
     58	if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
     59		return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
     60
     61	if (!pm_apply->backup_pinned ||
     62	    (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY)))
     63		return 0;
     64
     65	if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
     66		return 0;
     67
     68	backup = i915_gem_object_create_shmem(i915, obj->base.size);
     69	if (IS_ERR(backup))
     70		return PTR_ERR(backup);
     71
     72	err = i915_gem_object_lock(backup, apply->ww);
     73	if (err)
     74		goto out_no_lock;
     75
     76	backup_bo = i915_gem_to_ttm(backup);
     77	err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
     78	if (err)
     79		goto out_no_populate;
     80
     81	err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
     82	GEM_WARN_ON(err);
     83	ttm_bo_wait_ctx(backup_bo, &ctx);
     84
     85	obj->ttm.backup = backup;
     86	return 0;
     87
     88out_no_populate:
     89	i915_gem_ww_unlock_single(backup);
     90out_no_lock:
     91	i915_gem_object_put(backup);
     92
     93	return err;
     94}
     95
     96static int i915_ttm_recover(struct i915_gem_apply_to_region *apply,
     97			    struct drm_i915_gem_object *obj)
     98{
     99	i915_ttm_backup_free(obj);
    100	return 0;
    101}
    102
    103/**
    104 * i915_ttm_recover_region - Free the backup of all objects of a region
    105 * @mr: The memory region
    106 *
    107 * Checks all objects of a region if there is backup attached and if so
    108 * frees that backup. Typically this is called to recover after a partially
    109 * performed backup.
    110 */
    111void i915_ttm_recover_region(struct intel_memory_region *mr)
    112{
    113	static const struct i915_gem_apply_to_region_ops recover_ops = {
    114		.process_obj = i915_ttm_recover,
    115	};
    116	struct i915_gem_apply_to_region apply = {.ops = &recover_ops};
    117	int ret;
    118
    119	ret = i915_gem_process_region(mr, &apply);
    120	GEM_WARN_ON(ret);
    121}
    122
    123/**
    124 * i915_ttm_backup_region - Back up all objects of a region to smem.
    125 * @mr: The memory region
    126 * @allow_gpu: Whether to allow the gpu blitter for this backup.
    127 * @backup_pinned: Backup also pinned objects.
    128 *
    129 * Loops over all objects of a region and either evicts them if they are
    130 * evictable or backs them up using a backup object if they are pinned.
    131 *
    132 * Return: Zero on success. Negative error code on error.
    133 */
    134int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags)
    135{
    136	static const struct i915_gem_apply_to_region_ops backup_ops = {
    137		.process_obj = i915_ttm_backup,
    138	};
    139	struct i915_gem_ttm_pm_apply pm_apply = {
    140		.base = {.ops = &backup_ops},
    141		.allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
    142		.backup_pinned = flags & I915_TTM_BACKUP_PINNED,
    143	};
    144
    145	return i915_gem_process_region(mr, &pm_apply.base);
    146}
    147
    148static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
    149			    struct drm_i915_gem_object *obj)
    150{
    151	struct i915_gem_ttm_pm_apply *pm_apply =
    152		container_of(apply, typeof(*pm_apply), base);
    153	struct drm_i915_gem_object *backup = obj->ttm.backup;
    154	struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup);
    155	struct ttm_operation_ctx ctx = {};
    156	int err;
    157
    158	if (!backup)
    159		return 0;
    160
    161	if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY))
    162		return 0;
    163
    164	err = i915_gem_object_lock(backup, apply->ww);
    165	if (err)
    166		return err;
    167
    168	/* Content may have been swapped. */
    169	err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
    170	if (!err) {
    171		err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
    172					    false);
    173		GEM_WARN_ON(err);
    174		ttm_bo_wait_ctx(backup_bo, &ctx);
    175
    176		obj->ttm.backup = NULL;
    177		err = 0;
    178	}
    179
    180	i915_gem_ww_unlock_single(backup);
    181
    182	if (!err)
    183		i915_gem_object_put(backup);
    184
    185	return err;
    186}
    187
    188/**
    189 * i915_ttm_restore_region - Restore backed-up objects of a region from smem.
    190 * @mr: The memory region
    191 * @allow_gpu: Whether to allow the gpu blitter to recover.
    192 *
    193 * Loops over all objects of a region and if they are backed-up, restores
    194 * them from smem.
    195 *
    196 * Return: Zero on success. Negative error code on error.
    197 */
    198int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags)
    199{
    200	static const struct i915_gem_apply_to_region_ops restore_ops = {
    201		.process_obj = i915_ttm_restore,
    202	};
    203	struct i915_gem_ttm_pm_apply pm_apply = {
    204		.base = {.ops = &restore_ops},
    205		.allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
    206	};
    207
    208	return i915_gem_process_region(mr, &pm_apply.base);
    209}