cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_gem_region.c (5650B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2019 Intel Corporation
      4 */
      5
      6#include <uapi/drm/i915_drm.h>
      7
      8#include "intel_memory_region.h"
      9#include "i915_gem_region.h"
     10#include "i915_drv.h"
     11#include "i915_trace.h"
     12
     13void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
     14					struct intel_memory_region *mem)
     15{
     16	obj->mm.region = mem;
     17
     18	mutex_lock(&mem->objects.lock);
     19	list_add(&obj->mm.region_link, &mem->objects.list);
     20	mutex_unlock(&mem->objects.lock);
     21}
     22
     23void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
     24{
     25	struct intel_memory_region *mem = obj->mm.region;
     26
     27	mutex_lock(&mem->objects.lock);
     28	list_del(&obj->mm.region_link);
     29	mutex_unlock(&mem->objects.lock);
     30}
     31
     32static struct drm_i915_gem_object *
     33__i915_gem_object_create_region(struct intel_memory_region *mem,
     34				resource_size_t offset,
     35				resource_size_t size,
     36				resource_size_t page_size,
     37				unsigned int flags)
     38{
     39	struct drm_i915_gem_object *obj;
     40	resource_size_t default_page_size;
     41	int err;
     42
     43	/*
     44	 * NB: Our use of resource_size_t for the size stems from using struct
     45	 * resource for the mem->region. We might need to revisit this in the
     46	 * future.
     47	 */
     48
     49	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
     50
     51	if (WARN_ON_ONCE(flags & I915_BO_ALLOC_GPU_ONLY &&
     52			 (flags & I915_BO_ALLOC_CPU_CLEAR ||
     53			  flags & I915_BO_ALLOC_PM_EARLY)))
     54		return ERR_PTR(-EINVAL);
     55
     56	if (!mem)
     57		return ERR_PTR(-ENODEV);
     58
     59	default_page_size = mem->min_page_size;
     60	if (page_size)
     61		default_page_size = page_size;
     62
     63	GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
     64	GEM_BUG_ON(default_page_size < PAGE_SIZE);
     65
     66	size = round_up(size, default_page_size);
     67
     68	if (default_page_size == size)
     69		flags |= I915_BO_ALLOC_CONTIGUOUS;
     70
     71	GEM_BUG_ON(!size);
     72	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
     73
     74	if (i915_gem_object_size_2big(size))
     75		return ERR_PTR(-E2BIG);
     76
     77	obj = i915_gem_object_alloc();
     78	if (!obj)
     79		return ERR_PTR(-ENOMEM);
     80
     81	/*
     82	 * Anything smaller than the min_page_size can't be freely inserted into
     83	 * the GTT, due to alignemnt restrictions. For such special objects,
     84	 * make sure we force memcpy based suspend-resume. In the future we can
     85	 * revisit this, either by allowing special mis-aligned objects in the
     86	 * migration path, or by mapping all of LMEM upfront using cheap 1G
     87	 * GTT entries.
     88	 */
     89	if (default_page_size < mem->min_page_size)
     90		flags |= I915_BO_ALLOC_PM_EARLY;
     91
     92	err = mem->ops->init_object(mem, obj, offset, size, page_size, flags);
     93	if (err)
     94		goto err_object_free;
     95
     96	trace_i915_gem_object_create(obj);
     97	return obj;
     98
     99err_object_free:
    100	i915_gem_object_free(obj);
    101	return ERR_PTR(err);
    102}
    103
    104struct drm_i915_gem_object *
    105i915_gem_object_create_region(struct intel_memory_region *mem,
    106			      resource_size_t size,
    107			      resource_size_t page_size,
    108			      unsigned int flags)
    109{
    110	return __i915_gem_object_create_region(mem, I915_BO_INVALID_OFFSET,
    111					       size, page_size, flags);
    112}
    113
    114struct drm_i915_gem_object *
    115i915_gem_object_create_region_at(struct intel_memory_region *mem,
    116				 resource_size_t offset,
    117				 resource_size_t size,
    118				 unsigned int flags)
    119{
    120	GEM_BUG_ON(offset == I915_BO_INVALID_OFFSET);
    121
    122	if (GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
    123	    GEM_WARN_ON(!IS_ALIGNED(offset, mem->min_page_size)))
    124		return ERR_PTR(-EINVAL);
    125
    126	if (range_overflows(offset, size, resource_size(&mem->region)))
    127		return ERR_PTR(-EINVAL);
    128
    129	if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
    130	    offset + size > mem->io_size &&
    131	    !i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
    132		return ERR_PTR(-ENOSPC);
    133
    134	return __i915_gem_object_create_region(mem, offset, size, 0,
    135					       flags | I915_BO_ALLOC_CONTIGUOUS);
    136}
    137
    138/**
    139 * i915_gem_process_region - Iterate over all objects of a region using ops
    140 * to process and optionally skip objects
    141 * @mr: The memory region
    142 * @apply: ops and private data
    143 *
    144 * This function can be used to iterate over the regions object list,
    145 * checking whether to skip objects, and, if not, lock the objects and
    146 * process them using the supplied ops. Note that this function temporarily
    147 * removes objects from the region list while iterating, so that if run
    148 * concurrently with itself may not iterate over all objects.
    149 *
    150 * Return: 0 if successful, negative error code on failure.
    151 */
    152int i915_gem_process_region(struct intel_memory_region *mr,
    153			    struct i915_gem_apply_to_region *apply)
    154{
    155	const struct i915_gem_apply_to_region_ops *ops = apply->ops;
    156	struct drm_i915_gem_object *obj;
    157	struct list_head still_in_list;
    158	int ret = 0;
    159
    160	/*
    161	 * In the future, a non-NULL apply->ww could mean the caller is
    162	 * already in a locking transaction and provides its own context.
    163	 */
    164	GEM_WARN_ON(apply->ww);
    165
    166	INIT_LIST_HEAD(&still_in_list);
    167	mutex_lock(&mr->objects.lock);
    168	for (;;) {
    169		struct i915_gem_ww_ctx ww;
    170
    171		obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj),
    172					       mm.region_link);
    173		if (!obj)
    174			break;
    175
    176		list_move_tail(&obj->mm.region_link, &still_in_list);
    177		if (!kref_get_unless_zero(&obj->base.refcount))
    178			continue;
    179
    180		/*
    181		 * Note: Someone else might be migrating the object at this
    182		 * point. The object's region is not stable until we lock
    183		 * the object.
    184		 */
    185		mutex_unlock(&mr->objects.lock);
    186		apply->ww = &ww;
    187		for_i915_gem_ww(&ww, ret, apply->interruptible) {
    188			ret = i915_gem_object_lock(obj, apply->ww);
    189			if (ret)
    190				continue;
    191
    192			if (obj->mm.region == mr)
    193				ret = ops->process_obj(apply, obj);
    194			/* Implicit object unlock */
    195		}
    196
    197		i915_gem_object_put(obj);
    198		mutex_lock(&mr->objects.lock);
    199		if (ret)
    200			break;
    201	}
    202	list_splice_tail(&still_in_list, &mr->objects.list);
    203	mutex_unlock(&mr->objects.lock);
    204
    205	return ret;
    206}