cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qxl_ioctl.c (11146B)


      1/*
      2 * Copyright 2013 Red Hat Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: Dave Airlie
     23 *          Alon Levy
     24 */
     25
     26#include <linux/pci.h>
     27#include <linux/uaccess.h>
     28
     29#include "qxl_drv.h"
     30#include "qxl_object.h"
     31
     32/*
     33 * TODO: allocating a new gem(in qxl_bo) for each request.
     34 * This is wasteful since bo's are page aligned.
     35 */
     36int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
     37{
     38	struct qxl_device *qdev = to_qxl(dev);
     39	struct drm_qxl_alloc *qxl_alloc = data;
     40	int ret;
     41	struct qxl_bo *qobj;
     42	uint32_t handle;
     43	u32 domain = QXL_GEM_DOMAIN_VRAM;
     44
     45	if (qxl_alloc->size == 0) {
     46		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
     47		return -EINVAL;
     48	}
     49	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
     50						domain,
     51						qxl_alloc->size,
     52						NULL,
     53						&qobj, &handle);
     54	if (ret) {
     55		DRM_ERROR("%s: failed to create gem ret=%d\n",
     56			  __func__, ret);
     57		return -ENOMEM;
     58	}
     59	qxl_alloc->handle = handle;
     60	return 0;
     61}
     62
     63int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
     64{
     65	struct qxl_device *qdev = to_qxl(dev);
     66	struct drm_qxl_map *qxl_map = data;
     67
     68	return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle,
     69					   &qxl_map->offset);
     70}
     71
     72struct qxl_reloc_info {
     73	int type;
     74	struct qxl_bo *dst_bo;
     75	uint32_t dst_offset;
     76	struct qxl_bo *src_bo;
     77	int src_offset;
     78};
     79
     80/*
     81 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
     82 * are on vram).
     83 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
     84 */
     85static void
     86apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
     87{
     88	void *reloc_page;
     89
     90	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
     91	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
     92											      info->src_bo,
     93											      info->src_offset);
     94	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
     95}
     96
     97static void
     98apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
     99{
    100	uint32_t id = 0;
    101	void *reloc_page;
    102
    103	if (info->src_bo && !info->src_bo->is_primary)
    104		id = info->src_bo->surface_id;
    105
    106	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
    107	*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
    108	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
    109}
    110
    111/* return holding the reference to this object */
    112static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
    113			      struct qxl_release *release, struct qxl_bo **qbo_p)
    114{
    115	struct drm_gem_object *gobj;
    116	struct qxl_bo *qobj;
    117	int ret;
    118
    119	gobj = drm_gem_object_lookup(file_priv, handle);
    120	if (!gobj)
    121		return -EINVAL;
    122
    123	qobj = gem_to_qxl_bo(gobj);
    124
    125	ret = qxl_release_list_add(release, qobj);
    126	drm_gem_object_put(gobj);
    127	if (ret)
    128		return ret;
    129
    130	*qbo_p = qobj;
    131	return 0;
    132}
    133
    134/*
    135 * Usage of execbuffer:
    136 * Relocations need to take into account the full QXLDrawable size.
    137 * However, the command as passed from user space must *not* contain the initial
    138 * QXLReleaseInfo struct (first XXX bytes)
    139 */
    140static int qxl_process_single_command(struct qxl_device *qdev,
    141				      struct drm_qxl_command *cmd,
    142				      struct drm_file *file_priv)
    143{
    144	struct qxl_reloc_info *reloc_info;
    145	int release_type;
    146	struct qxl_release *release;
    147	struct qxl_bo *cmd_bo;
    148	void *fb_cmd;
    149	int i, ret, num_relocs;
    150	int unwritten;
    151
    152	switch (cmd->type) {
    153	case QXL_CMD_DRAW:
    154		release_type = QXL_RELEASE_DRAWABLE;
    155		break;
    156	case QXL_CMD_SURFACE:
    157	case QXL_CMD_CURSOR:
    158	default:
    159		DRM_DEBUG("Only draw commands in execbuffers\n");
    160		return -EINVAL;
    161	}
    162
    163	if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
    164		return -EINVAL;
    165
    166	if (!access_ok(u64_to_user_ptr(cmd->command),
    167		       cmd->command_size))
    168		return -EFAULT;
    169
    170	reloc_info = kmalloc_array(cmd->relocs_num,
    171				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
    172	if (!reloc_info)
    173		return -ENOMEM;
    174
    175	ret = qxl_alloc_release_reserved(qdev,
    176					 sizeof(union qxl_release_info) +
    177					 cmd->command_size,
    178					 release_type,
    179					 &release,
    180					 &cmd_bo);
    181	if (ret)
    182		goto out_free_reloc;
    183
    184	/* TODO copy slow path code from i915 */
    185	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
    186	unwritten = __copy_from_user_inatomic_nocache
    187		(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
    188		 u64_to_user_ptr(cmd->command), cmd->command_size);
    189
    190	{
    191		struct qxl_drawable *draw = fb_cmd;
    192
    193		draw->mm_time = qdev->rom->mm_clock;
    194	}
    195
    196	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
    197	if (unwritten) {
    198		DRM_ERROR("got unwritten %d\n", unwritten);
    199		ret = -EFAULT;
    200		goto out_free_release;
    201	}
    202
    203	/* fill out reloc info structs */
    204	num_relocs = 0;
    205	for (i = 0; i < cmd->relocs_num; ++i) {
    206		struct drm_qxl_reloc reloc;
    207		struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
    208
    209		if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
    210			ret = -EFAULT;
    211			goto out_free_bos;
    212		}
    213
    214		/* add the bos to the list of bos to validate -
    215		   need to validate first then process relocs? */
    216		if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
    217			DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
    218
    219			ret = -EINVAL;
    220			goto out_free_bos;
    221		}
    222		reloc_info[i].type = reloc.reloc_type;
    223
    224		if (reloc.dst_handle) {
    225			ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
    226						 &reloc_info[i].dst_bo);
    227			if (ret)
    228				goto out_free_bos;
    229			reloc_info[i].dst_offset = reloc.dst_offset;
    230		} else {
    231			reloc_info[i].dst_bo = cmd_bo;
    232			reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
    233		}
    234		num_relocs++;
    235
    236		/* reserve and validate the reloc dst bo */
    237		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
    238			ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
    239						 &reloc_info[i].src_bo);
    240			if (ret)
    241				goto out_free_bos;
    242			reloc_info[i].src_offset = reloc.src_offset;
    243		} else {
    244			reloc_info[i].src_bo = NULL;
    245			reloc_info[i].src_offset = 0;
    246		}
    247	}
    248
    249	/* validate all buffers */
    250	ret = qxl_release_reserve_list(release, false);
    251	if (ret)
    252		goto out_free_bos;
    253
    254	for (i = 0; i < cmd->relocs_num; ++i) {
    255		if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
    256			apply_reloc(qdev, &reloc_info[i]);
    257		else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
    258			apply_surf_reloc(qdev, &reloc_info[i]);
    259	}
    260
    261	qxl_release_fence_buffer_objects(release);
    262	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
    263
    264out_free_bos:
    265out_free_release:
    266	if (ret)
    267		qxl_release_free(qdev, release);
    268out_free_reloc:
    269	kfree(reloc_info);
    270	return ret;
    271}
    272
    273int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
    274{
    275	struct qxl_device *qdev = to_qxl(dev);
    276	struct drm_qxl_execbuffer *execbuffer = data;
    277	struct drm_qxl_command user_cmd;
    278	int cmd_num;
    279	int ret;
    280
    281	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
    282
    283		struct drm_qxl_command __user *commands =
    284			u64_to_user_ptr(execbuffer->commands);
    285
    286		if (copy_from_user(&user_cmd, commands + cmd_num,
    287				       sizeof(user_cmd)))
    288			return -EFAULT;
    289
    290		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
    291		if (ret)
    292			return ret;
    293	}
    294	return 0;
    295}
    296
    297int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
    298{
    299	struct qxl_device *qdev = to_qxl(dev);
    300	struct drm_qxl_update_area *update_area = data;
    301	struct qxl_rect area = {.left = update_area->left,
    302				.top = update_area->top,
    303				.right = update_area->right,
    304				.bottom = update_area->bottom};
    305	int ret;
    306	struct drm_gem_object *gobj = NULL;
    307	struct qxl_bo *qobj = NULL;
    308	struct ttm_operation_ctx ctx = { true, false };
    309
    310	if (update_area->left >= update_area->right ||
    311	    update_area->top >= update_area->bottom)
    312		return -EINVAL;
    313
    314	gobj = drm_gem_object_lookup(file, update_area->handle);
    315	if (gobj == NULL)
    316		return -ENOENT;
    317
    318	qobj = gem_to_qxl_bo(gobj);
    319
    320	ret = qxl_bo_reserve(qobj);
    321	if (ret)
    322		goto out;
    323
    324	if (!qobj->tbo.pin_count) {
    325		qxl_ttm_placement_from_domain(qobj, qobj->type);
    326		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
    327		if (unlikely(ret))
    328			goto out;
    329	}
    330
    331	ret = qxl_bo_check_id(qdev, qobj);
    332	if (ret)
    333		goto out2;
    334	if (!qobj->surface_id)
    335		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
    336	ret = qxl_io_update_area(qdev, qobj, &area);
    337
    338out2:
    339	qxl_bo_unreserve(qobj);
    340
    341out:
    342	drm_gem_object_put(gobj);
    343	return ret;
    344}
    345
    346int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
    347{
    348	struct qxl_device *qdev = to_qxl(dev);
    349	struct drm_qxl_getparam *param = data;
    350
    351	switch (param->param) {
    352	case QXL_PARAM_NUM_SURFACES:
    353		param->value = qdev->rom->n_surfaces;
    354		break;
    355	case QXL_PARAM_MAX_RELOCS:
    356		param->value = QXL_MAX_RES;
    357		break;
    358	default:
    359		return -EINVAL;
    360	}
    361	return 0;
    362}
    363
    364int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
    365{
    366	struct qxl_device *qdev = to_qxl(dev);
    367	struct pci_dev *pdev = to_pci_dev(dev->dev);
    368	struct drm_qxl_clientcap *param = data;
    369	int byte, idx;
    370
    371	byte = param->index / 8;
    372	idx = param->index % 8;
    373
    374	if (pdev->revision < 4)
    375		return -ENOSYS;
    376
    377	if (byte >= 58)
    378		return -ENOSYS;
    379
    380	if (qdev->rom->client_capabilities[byte] & (1 << idx))
    381		return 0;
    382	return -ENOSYS;
    383}
    384
    385int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
    386{
    387	struct qxl_device *qdev = to_qxl(dev);
    388	struct drm_qxl_alloc_surf *param = data;
    389	struct qxl_bo *qobj;
    390	int handle;
    391	int ret;
    392	int size, actual_stride;
    393	struct qxl_surface surf;
    394
    395	/* work out size allocate bo with handle */
    396	actual_stride = param->stride < 0 ? -param->stride : param->stride;
    397	size = actual_stride * param->height + actual_stride;
    398
    399	surf.format = param->format;
    400	surf.width = param->width;
    401	surf.height = param->height;
    402	surf.stride = param->stride;
    403	surf.data = 0;
    404
    405	ret = qxl_gem_object_create_with_handle(qdev, file,
    406						QXL_GEM_DOMAIN_SURFACE,
    407						size,
    408						&surf,
    409						&qobj, &handle);
    410	if (ret) {
    411		DRM_ERROR("%s: failed to create gem ret=%d\n",
    412			  __func__, ret);
    413		return -ENOMEM;
    414	} else
    415		param->handle = handle;
    416	return ret;
    417}