cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_gem_create.c (10645B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2020 Intel Corporation
      4 */
      5
      6#include <drm/drm_fourcc.h>
      7
      8#include "gem/i915_gem_ioctls.h"
      9#include "gem/i915_gem_lmem.h"
     10#include "gem/i915_gem_region.h"
     11#include "pxp/intel_pxp.h"
     12
     13#include "i915_drv.h"
     14#include "i915_gem_create.h"
     15#include "i915_trace.h"
     16#include "i915_user_extensions.h"
     17
     18static u32 object_max_page_size(struct intel_memory_region **placements,
     19				unsigned int n_placements)
     20{
     21	u32 max_page_size = 0;
     22	int i;
     23
     24	for (i = 0; i < n_placements; i++) {
     25		struct intel_memory_region *mr = placements[i];
     26
     27		GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
     28		max_page_size = max_t(u32, max_page_size, mr->min_page_size);
     29	}
     30
     31	GEM_BUG_ON(!max_page_size);
     32	return max_page_size;
     33}
     34
     35static int object_set_placements(struct drm_i915_gem_object *obj,
     36				 struct intel_memory_region **placements,
     37				 unsigned int n_placements)
     38{
     39	struct intel_memory_region **arr;
     40	unsigned int i;
     41
     42	GEM_BUG_ON(!n_placements);
     43
     44	/*
     45	 * For the common case of one memory region, skip storing an
     46	 * allocated array and just point at the region directly.
     47	 */
     48	if (n_placements == 1) {
     49		struct intel_memory_region *mr = placements[0];
     50		struct drm_i915_private *i915 = mr->i915;
     51
     52		obj->mm.placements = &i915->mm.regions[mr->id];
     53		obj->mm.n_placements = 1;
     54	} else {
     55		arr = kmalloc_array(n_placements,
     56				    sizeof(struct intel_memory_region *),
     57				    GFP_KERNEL);
     58		if (!arr)
     59			return -ENOMEM;
     60
     61		for (i = 0; i < n_placements; i++)
     62			arr[i] = placements[i];
     63
     64		obj->mm.placements = arr;
     65		obj->mm.n_placements = n_placements;
     66	}
     67
     68	return 0;
     69}
     70
     71static int i915_gem_publish(struct drm_i915_gem_object *obj,
     72			    struct drm_file *file,
     73			    u64 *size_p,
     74			    u32 *handle_p)
     75{
     76	u64 size = obj->base.size;
     77	int ret;
     78
     79	ret = drm_gem_handle_create(file, &obj->base, handle_p);
     80	/* drop reference from allocate - handle holds it now */
     81	i915_gem_object_put(obj);
     82	if (ret)
     83		return ret;
     84
     85	*size_p = size;
     86	return 0;
     87}
     88
     89static struct drm_i915_gem_object *
     90__i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
     91				  struct intel_memory_region **placements,
     92				  unsigned int n_placements,
     93				  unsigned int ext_flags)
     94{
     95	struct intel_memory_region *mr = placements[0];
     96	struct drm_i915_gem_object *obj;
     97	unsigned int flags;
     98	int ret;
     99
    100	i915_gem_flush_free_objects(i915);
    101
    102	size = round_up(size, object_max_page_size(placements, n_placements));
    103	if (size == 0)
    104		return ERR_PTR(-EINVAL);
    105
    106	/* For most of the ABI (e.g. mmap) we think in system pages */
    107	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
    108
    109	if (i915_gem_object_size_2big(size))
    110		return ERR_PTR(-E2BIG);
    111
    112	obj = i915_gem_object_alloc();
    113	if (!obj)
    114		return ERR_PTR(-ENOMEM);
    115
    116	ret = object_set_placements(obj, placements, n_placements);
    117	if (ret)
    118		goto object_free;
    119
    120	/*
    121	 * I915_BO_ALLOC_USER will make sure the object is cleared before
    122	 * any user access.
    123	 */
    124	flags = I915_BO_ALLOC_USER;
    125
    126	ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
    127	if (ret)
    128		goto object_free;
    129
    130	GEM_BUG_ON(size != obj->base.size);
    131
    132	/* Add any flag set by create_ext options */
    133	obj->flags |= ext_flags;
    134
    135	trace_i915_gem_object_create(obj);
    136	return obj;
    137
    138object_free:
    139	if (obj->mm.n_placements > 1)
    140		kfree(obj->mm.placements);
    141	i915_gem_object_free(obj);
    142	return ERR_PTR(ret);
    143}
    144
    145/**
    146 * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT
    147 * @i915: i915 private
    148 * @size: size of the buffer, in bytes
    149 * @placements: possible placement regions, in priority order
    150 * @n_placements: number of possible placement regions
    151 *
    152 * This function is exposed primarily for selftests and does very little
    153 * error checking.  It is assumed that the set of placement regions has
    154 * already been verified to be valid.
    155 */
    156struct drm_i915_gem_object *
    157__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
    158			      struct intel_memory_region **placements,
    159			      unsigned int n_placements)
    160{
    161	return __i915_gem_object_create_user_ext(i915, size, placements,
    162						 n_placements, 0);
    163}
    164
    165int
    166i915_gem_dumb_create(struct drm_file *file,
    167		     struct drm_device *dev,
    168		     struct drm_mode_create_dumb *args)
    169{
    170	struct drm_i915_gem_object *obj;
    171	struct intel_memory_region *mr;
    172	enum intel_memory_type mem_type;
    173	int cpp = DIV_ROUND_UP(args->bpp, 8);
    174	u32 format;
    175
    176	switch (cpp) {
    177	case 1:
    178		format = DRM_FORMAT_C8;
    179		break;
    180	case 2:
    181		format = DRM_FORMAT_RGB565;
    182		break;
    183	case 4:
    184		format = DRM_FORMAT_XRGB8888;
    185		break;
    186	default:
    187		return -EINVAL;
    188	}
    189
    190	/* have to work out size/pitch and return them */
    191	args->pitch = ALIGN(args->width * cpp, 64);
    192
    193	/* align stride to page size so that we can remap */
    194	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
    195						    DRM_FORMAT_MOD_LINEAR))
    196		args->pitch = ALIGN(args->pitch, 4096);
    197
    198	if (args->pitch < args->width)
    199		return -EINVAL;
    200
    201	args->size = mul_u32_u32(args->pitch, args->height);
    202
    203	mem_type = INTEL_MEMORY_SYSTEM;
    204	if (HAS_LMEM(to_i915(dev)))
    205		mem_type = INTEL_MEMORY_LOCAL;
    206
    207	mr = intel_memory_region_by_type(to_i915(dev), mem_type);
    208
    209	obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1);
    210	if (IS_ERR(obj))
    211		return PTR_ERR(obj);
    212
    213	return i915_gem_publish(obj, file, &args->size, &args->handle);
    214}
    215
    216/**
    217 * Creates a new mm object and returns a handle to it.
    218 * @dev: drm device pointer
    219 * @data: ioctl data blob
    220 * @file: drm file pointer
    221 */
    222int
    223i915_gem_create_ioctl(struct drm_device *dev, void *data,
    224		      struct drm_file *file)
    225{
    226	struct drm_i915_private *i915 = to_i915(dev);
    227	struct drm_i915_gem_create *args = data;
    228	struct drm_i915_gem_object *obj;
    229	struct intel_memory_region *mr;
    230
    231	mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
    232
    233	obj = __i915_gem_object_create_user(i915, args->size, &mr, 1);
    234	if (IS_ERR(obj))
    235		return PTR_ERR(obj);
    236
    237	return i915_gem_publish(obj, file, &args->size, &args->handle);
    238}
    239
    240struct create_ext {
    241	struct drm_i915_private *i915;
    242	struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
    243	unsigned int n_placements;
    244	unsigned long flags;
    245};
    246
    247static void repr_placements(char *buf, size_t size,
    248			    struct intel_memory_region **placements,
    249			    int n_placements)
    250{
    251	int i;
    252
    253	buf[0] = '\0';
    254
    255	for (i = 0; i < n_placements; i++) {
    256		struct intel_memory_region *mr = placements[i];
    257		int r;
    258
    259		r = snprintf(buf, size, "\n  %s -> { class: %d, inst: %d }",
    260			     mr->name, mr->type, mr->instance);
    261		if (r >= size)
    262			return;
    263
    264		buf += r;
    265		size -= r;
    266	}
    267}
    268
    269static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
    270			  struct create_ext *ext_data)
    271{
    272	struct drm_i915_private *i915 = ext_data->i915;
    273	struct drm_i915_gem_memory_class_instance __user *uregions =
    274		u64_to_user_ptr(args->regions);
    275	struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
    276	u32 mask;
    277	int i, ret = 0;
    278
    279	if (args->pad) {
    280		drm_dbg(&i915->drm, "pad should be zero\n");
    281		ret = -EINVAL;
    282	}
    283
    284	if (!args->num_regions) {
    285		drm_dbg(&i915->drm, "num_regions is zero\n");
    286		ret = -EINVAL;
    287	}
    288
    289	BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
    290	BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
    291	if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
    292		drm_dbg(&i915->drm, "num_regions is too large\n");
    293		ret = -EINVAL;
    294	}
    295
    296	if (ret)
    297		return ret;
    298
    299	mask = 0;
    300	for (i = 0; i < args->num_regions; i++) {
    301		struct drm_i915_gem_memory_class_instance region;
    302		struct intel_memory_region *mr;
    303
    304		if (copy_from_user(&region, uregions, sizeof(region)))
    305			return -EFAULT;
    306
    307		mr = intel_memory_region_lookup(i915,
    308						region.memory_class,
    309						region.memory_instance);
    310		if (!mr || mr->private) {
    311			drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
    312				region.memory_class, region.memory_instance, i);
    313			ret = -EINVAL;
    314			goto out_dump;
    315		}
    316
    317		if (mask & BIT(mr->id)) {
    318			drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
    319				mr->name, region.memory_class,
    320				region.memory_instance, i);
    321			ret = -EINVAL;
    322			goto out_dump;
    323		}
    324
    325		placements[i] = mr;
    326		mask |= BIT(mr->id);
    327
    328		++uregions;
    329	}
    330
    331	if (ext_data->n_placements) {
    332		ret = -EINVAL;
    333		goto out_dump;
    334	}
    335
    336	ext_data->n_placements = args->num_regions;
    337	for (i = 0; i < args->num_regions; i++)
    338		ext_data->placements[i] = placements[i];
    339
    340	return 0;
    341
    342out_dump:
    343	if (1) {
    344		char buf[256];
    345
    346		if (ext_data->n_placements) {
    347			repr_placements(buf,
    348					sizeof(buf),
    349					ext_data->placements,
    350					ext_data->n_placements);
    351			drm_dbg(&i915->drm,
    352				"Placements were already set in previous EXT. Existing placements: %s\n",
    353				buf);
    354		}
    355
    356		repr_placements(buf, sizeof(buf), placements, i);
    357		drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
    358	}
    359
    360	return ret;
    361}
    362
    363static int ext_set_placements(struct i915_user_extension __user *base,
    364			      void *data)
    365{
    366	struct drm_i915_gem_create_ext_memory_regions ext;
    367
    368	if (copy_from_user(&ext, base, sizeof(ext)))
    369		return -EFAULT;
    370
    371	return set_placements(&ext, data);
    372}
    373
    374static int ext_set_protected(struct i915_user_extension __user *base, void *data)
    375{
    376	struct drm_i915_gem_create_ext_protected_content ext;
    377	struct create_ext *ext_data = data;
    378
    379	if (copy_from_user(&ext, base, sizeof(ext)))
    380		return -EFAULT;
    381
    382	if (ext.flags)
    383		return -EINVAL;
    384
    385	if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp))
    386		return -ENODEV;
    387
    388	ext_data->flags |= I915_BO_PROTECTED;
    389
    390	return 0;
    391}
    392
    393static const i915_user_extension_fn create_extensions[] = {
    394	[I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
    395	[I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
    396};
    397
    398/**
    399 * Creates a new mm object and returns a handle to it.
    400 * @dev: drm device pointer
    401 * @data: ioctl data blob
    402 * @file: drm file pointer
    403 */
    404int
    405i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
    406			  struct drm_file *file)
    407{
    408	struct drm_i915_private *i915 = to_i915(dev);
    409	struct drm_i915_gem_create_ext *args = data;
    410	struct create_ext ext_data = { .i915 = i915 };
    411	struct drm_i915_gem_object *obj;
    412	int ret;
    413
    414	if (args->flags)
    415		return -EINVAL;
    416
    417	ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
    418				   create_extensions,
    419				   ARRAY_SIZE(create_extensions),
    420				   &ext_data);
    421	if (ret)
    422		return ret;
    423
    424	if (!ext_data.n_placements) {
    425		ext_data.placements[0] =
    426			intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
    427		ext_data.n_placements = 1;
    428	}
    429
    430	obj = __i915_gem_object_create_user_ext(i915, args->size,
    431						ext_data.placements,
    432						ext_data.n_placements,
    433						ext_data.flags);
    434	if (IS_ERR(obj))
    435		return PTR_ERR(obj);
    436
    437	return i915_gem_publish(obj, file, &args->size, &args->handle);
    438}