cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qxl_cmd.c (16590B)


      1/*
      2 * Copyright 2013 Red Hat Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: Dave Airlie
     23 *          Alon Levy
     24 */
     25
     26/* QXL cmd/ring handling */
     27
     28#include <linux/delay.h>
     29
     30#include <drm/drm_util.h>
     31
     32#include "qxl_drv.h"
     33#include "qxl_object.h"
     34
     35static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
     36
     37struct ring {
     38	struct qxl_ring_header      header;
     39	uint8_t                     elements[];
     40};
     41
     42struct qxl_ring {
     43	struct ring	       *ring;
     44	int			element_size;
     45	int			n_elements;
     46	int			prod_notify;
     47	wait_queue_head_t      *push_event;
     48	spinlock_t             lock;
     49};
     50
     51void qxl_ring_free(struct qxl_ring *ring)
     52{
     53	kfree(ring);
     54}
     55
     56void qxl_ring_init_hdr(struct qxl_ring *ring)
     57{
     58	ring->ring->header.notify_on_prod = ring->n_elements;
     59}
     60
     61struct qxl_ring *
     62qxl_ring_create(struct qxl_ring_header *header,
     63		int element_size,
     64		int n_elements,
     65		int prod_notify,
     66		bool set_prod_notify,
     67		wait_queue_head_t *push_event)
     68{
     69	struct qxl_ring *ring;
     70
     71	ring = kmalloc(sizeof(*ring), GFP_KERNEL);
     72	if (!ring)
     73		return NULL;
     74
     75	ring->ring = (struct ring *)header;
     76	ring->element_size = element_size;
     77	ring->n_elements = n_elements;
     78	ring->prod_notify = prod_notify;
     79	ring->push_event = push_event;
     80	if (set_prod_notify)
     81		qxl_ring_init_hdr(ring);
     82	spin_lock_init(&ring->lock);
     83	return ring;
     84}
     85
     86static int qxl_check_header(struct qxl_ring *ring)
     87{
     88	int ret;
     89	struct qxl_ring_header *header = &(ring->ring->header);
     90	unsigned long flags;
     91
     92	spin_lock_irqsave(&ring->lock, flags);
     93	ret = header->prod - header->cons < header->num_items;
     94	if (ret == 0)
     95		header->notify_on_cons = header->cons + 1;
     96	spin_unlock_irqrestore(&ring->lock, flags);
     97	return ret;
     98}
     99
    100int qxl_check_idle(struct qxl_ring *ring)
    101{
    102	int ret;
    103	struct qxl_ring_header *header = &(ring->ring->header);
    104	unsigned long flags;
    105
    106	spin_lock_irqsave(&ring->lock, flags);
    107	ret = header->prod == header->cons;
    108	spin_unlock_irqrestore(&ring->lock, flags);
    109	return ret;
    110}
    111
    112int qxl_ring_push(struct qxl_ring *ring,
    113		  const void *new_elt, bool interruptible)
    114{
    115	struct qxl_ring_header *header = &(ring->ring->header);
    116	uint8_t *elt;
    117	int idx, ret;
    118	unsigned long flags;
    119
    120	spin_lock_irqsave(&ring->lock, flags);
    121	if (header->prod - header->cons == header->num_items) {
    122		header->notify_on_cons = header->cons + 1;
    123		mb();
    124		spin_unlock_irqrestore(&ring->lock, flags);
    125		if (!drm_can_sleep()) {
    126			while (!qxl_check_header(ring))
    127				udelay(1);
    128		} else {
    129			if (interruptible) {
    130				ret = wait_event_interruptible(*ring->push_event,
    131							       qxl_check_header(ring));
    132				if (ret)
    133					return ret;
    134			} else {
    135				wait_event(*ring->push_event,
    136					   qxl_check_header(ring));
    137			}
    138
    139		}
    140		spin_lock_irqsave(&ring->lock, flags);
    141	}
    142
    143	idx = header->prod & (ring->n_elements - 1);
    144	elt = ring->ring->elements + idx * ring->element_size;
    145
    146	memcpy((void *)elt, new_elt, ring->element_size);
    147
    148	header->prod++;
    149
    150	mb();
    151
    152	if (header->prod == header->notify_on_prod)
    153		outb(0, ring->prod_notify);
    154
    155	spin_unlock_irqrestore(&ring->lock, flags);
    156	return 0;
    157}
    158
    159static bool qxl_ring_pop(struct qxl_ring *ring,
    160			 void *element)
    161{
    162	volatile struct qxl_ring_header *header = &(ring->ring->header);
    163	volatile uint8_t *ring_elt;
    164	int idx;
    165	unsigned long flags;
    166
    167	spin_lock_irqsave(&ring->lock, flags);
    168	if (header->cons == header->prod) {
    169		header->notify_on_prod = header->cons + 1;
    170		spin_unlock_irqrestore(&ring->lock, flags);
    171		return false;
    172	}
    173
    174	idx = header->cons & (ring->n_elements - 1);
    175	ring_elt = ring->ring->elements + idx * ring->element_size;
    176
    177	memcpy(element, (void *)ring_elt, ring->element_size);
    178
    179	header->cons++;
    180
    181	spin_unlock_irqrestore(&ring->lock, flags);
    182	return true;
    183}
    184
    185int
    186qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
    187			      uint32_t type, bool interruptible)
    188{
    189	struct qxl_command cmd;
    190
    191	cmd.type = type;
    192	cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
    193
    194	return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
    195}
    196
    197int
    198qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
    199			     uint32_t type, bool interruptible)
    200{
    201	struct qxl_command cmd;
    202
    203	cmd.type = type;
    204	cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
    205
    206	return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
    207}
    208
    209bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
    210{
    211	if (!qxl_check_idle(qdev->release_ring)) {
    212		schedule_work(&qdev->gc_work);
    213		if (flush)
    214			flush_work(&qdev->gc_work);
    215		return true;
    216	}
    217	return false;
    218}
    219
    220int qxl_garbage_collect(struct qxl_device *qdev)
    221{
    222	struct qxl_release *release;
    223	uint64_t id, next_id;
    224	int i = 0;
    225	union qxl_release_info *info;
    226
    227	while (qxl_ring_pop(qdev->release_ring, &id)) {
    228		DRM_DEBUG_DRIVER("popped %lld\n", id);
    229		while (id) {
    230			release = qxl_release_from_id_locked(qdev, id);
    231			if (release == NULL)
    232				break;
    233
    234			info = qxl_release_map(qdev, release);
    235			next_id = info->next;
    236			qxl_release_unmap(qdev, release, info);
    237
    238			DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
    239					 next_id);
    240
    241			switch (release->type) {
    242			case QXL_RELEASE_DRAWABLE:
    243			case QXL_RELEASE_SURFACE_CMD:
    244			case QXL_RELEASE_CURSOR_CMD:
    245				break;
    246			default:
    247				DRM_ERROR("unexpected release type\n");
    248				break;
    249			}
    250			id = next_id;
    251
    252			qxl_release_free(qdev, release);
    253			++i;
    254		}
    255	}
    256
    257	wake_up_all(&qdev->release_event);
    258	DRM_DEBUG_DRIVER("%d\n", i);
    259
    260	return i;
    261}
    262
    263int qxl_alloc_bo_reserved(struct qxl_device *qdev,
    264			  struct qxl_release *release,
    265			  unsigned long size,
    266			  struct qxl_bo **_bo)
    267{
    268	struct qxl_bo *bo;
    269	int ret;
    270
    271	ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
    272			    false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
    273	if (ret) {
    274		DRM_ERROR("failed to allocate VRAM BO\n");
    275		return ret;
    276	}
    277	ret = qxl_release_list_add(release, bo);
    278	if (ret)
    279		goto out_unref;
    280
    281	*_bo = bo;
    282	return 0;
    283out_unref:
    284	qxl_bo_unref(&bo);
    285	return ret;
    286}
    287
    288static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
    289{
    290	int irq_num;
    291	long addr = qdev->io_base + port;
    292	int ret;
    293
    294	mutex_lock(&qdev->async_io_mutex);
    295	irq_num = atomic_read(&qdev->irq_received_io_cmd);
    296	if (qdev->last_sent_io_cmd > irq_num) {
    297		if (intr)
    298			ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
    299							       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
    300		else
    301			ret = wait_event_timeout(qdev->io_cmd_event,
    302						 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
    303		/* 0 is timeout, just bail the "hw" has gone away */
    304		if (ret <= 0)
    305			goto out;
    306		irq_num = atomic_read(&qdev->irq_received_io_cmd);
    307	}
    308	outb(val, addr);
    309	qdev->last_sent_io_cmd = irq_num + 1;
    310	if (intr)
    311		ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
    312						       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
    313	else
    314		ret = wait_event_timeout(qdev->io_cmd_event,
    315					 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
    316out:
    317	if (ret > 0)
    318		ret = 0;
    319	mutex_unlock(&qdev->async_io_mutex);
    320	return ret;
    321}
    322
    323static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
    324{
    325	int ret;
    326
    327restart:
    328	ret = wait_for_io_cmd_user(qdev, val, port, false);
    329	if (ret == -ERESTARTSYS)
    330		goto restart;
    331}
    332
    333int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
    334			const struct qxl_rect *area)
    335{
    336	int surface_id;
    337	uint32_t surface_width, surface_height;
    338	int ret;
    339
    340	if (!surf->hw_surf_alloc)
    341		DRM_ERROR("got io update area with no hw surface\n");
    342
    343	if (surf->is_primary)
    344		surface_id = 0;
    345	else
    346		surface_id = surf->surface_id;
    347	surface_width = surf->surf.width;
    348	surface_height = surf->surf.height;
    349
    350	if (area->left < 0 || area->top < 0 ||
    351	    area->right > surface_width || area->bottom > surface_height)
    352		return -EINVAL;
    353
    354	mutex_lock(&qdev->update_area_mutex);
    355	qdev->ram_header->update_area = *area;
    356	qdev->ram_header->update_surface = surface_id;
    357	ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
    358	mutex_unlock(&qdev->update_area_mutex);
    359	return ret;
    360}
    361
    362void qxl_io_notify_oom(struct qxl_device *qdev)
    363{
    364	outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
    365}
    366
    367void qxl_io_flush_release(struct qxl_device *qdev)
    368{
    369	outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
    370}
    371
    372void qxl_io_flush_surfaces(struct qxl_device *qdev)
    373{
    374	wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
    375}
    376
    377void qxl_io_destroy_primary(struct qxl_device *qdev)
    378{
    379	wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
    380	qdev->primary_bo->is_primary = false;
    381	drm_gem_object_put(&qdev->primary_bo->tbo.base);
    382	qdev->primary_bo = NULL;
    383}
    384
    385void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
    386{
    387	struct qxl_surface_create *create;
    388
    389	if (WARN_ON(qdev->primary_bo))
    390		return;
    391
    392	DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
    393	create = &qdev->ram_header->create_surface;
    394	create->format = bo->surf.format;
    395	create->width = bo->surf.width;
    396	create->height = bo->surf.height;
    397	create->stride = bo->surf.stride;
    398	create->mem = qxl_bo_physical_address(qdev, bo, 0);
    399
    400	DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
    401
    402	create->flags = QXL_SURF_FLAG_KEEP_DATA;
    403	create->type = QXL_SURF_TYPE_PRIMARY;
    404
    405	wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
    406	qdev->primary_bo = bo;
    407	qdev->primary_bo->is_primary = true;
    408	drm_gem_object_get(&qdev->primary_bo->tbo.base);
    409}
    410
    411void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
    412{
    413	DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
    414	wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
    415}
    416
    417void qxl_io_reset(struct qxl_device *qdev)
    418{
    419	outb(0, qdev->io_base + QXL_IO_RESET);
    420}
    421
    422void qxl_io_monitors_config(struct qxl_device *qdev)
    423{
    424	wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
    425}
    426
    427int qxl_surface_id_alloc(struct qxl_device *qdev,
    428		      struct qxl_bo *surf)
    429{
    430	uint32_t handle;
    431	int idr_ret;
    432	int count = 0;
    433again:
    434	idr_preload(GFP_ATOMIC);
    435	spin_lock(&qdev->surf_id_idr_lock);
    436	idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
    437	spin_unlock(&qdev->surf_id_idr_lock);
    438	idr_preload_end();
    439	if (idr_ret < 0)
    440		return idr_ret;
    441	handle = idr_ret;
    442
    443	if (handle >= qdev->rom->n_surfaces) {
    444		count++;
    445		spin_lock(&qdev->surf_id_idr_lock);
    446		idr_remove(&qdev->surf_id_idr, handle);
    447		spin_unlock(&qdev->surf_id_idr_lock);
    448		qxl_reap_surface_id(qdev, 2);
    449		goto again;
    450	}
    451	surf->surface_id = handle;
    452
    453	spin_lock(&qdev->surf_id_idr_lock);
    454	qdev->last_alloced_surf_id = handle;
    455	spin_unlock(&qdev->surf_id_idr_lock);
    456	return 0;
    457}
    458
    459void qxl_surface_id_dealloc(struct qxl_device *qdev,
    460			    uint32_t surface_id)
    461{
    462	spin_lock(&qdev->surf_id_idr_lock);
    463	idr_remove(&qdev->surf_id_idr, surface_id);
    464	spin_unlock(&qdev->surf_id_idr_lock);
    465}
    466
    467int qxl_hw_surface_alloc(struct qxl_device *qdev,
    468			 struct qxl_bo *surf)
    469{
    470	struct qxl_surface_cmd *cmd;
    471	struct qxl_release *release;
    472	int ret;
    473
    474	if (surf->hw_surf_alloc)
    475		return 0;
    476
    477	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
    478						 NULL,
    479						 &release);
    480	if (ret)
    481		return ret;
    482
    483	ret = qxl_release_reserve_list(release, true);
    484	if (ret) {
    485		qxl_release_free(qdev, release);
    486		return ret;
    487	}
    488	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
    489	cmd->type = QXL_SURFACE_CMD_CREATE;
    490	cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
    491	cmd->u.surface_create.format = surf->surf.format;
    492	cmd->u.surface_create.width = surf->surf.width;
    493	cmd->u.surface_create.height = surf->surf.height;
    494	cmd->u.surface_create.stride = surf->surf.stride;
    495	cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
    496	cmd->surface_id = surf->surface_id;
    497	qxl_release_unmap(qdev, release, &cmd->release_info);
    498
    499	surf->surf_create = release;
    500
    501	/* no need to add a release to the fence for this surface bo,
    502	   since it is only released when we ask to destroy the surface
    503	   and it would never signal otherwise */
    504	qxl_release_fence_buffer_objects(release);
    505	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
    506
    507	surf->hw_surf_alloc = true;
    508	spin_lock(&qdev->surf_id_idr_lock);
    509	idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
    510	spin_unlock(&qdev->surf_id_idr_lock);
    511	return 0;
    512}
    513
    514int qxl_hw_surface_dealloc(struct qxl_device *qdev,
    515			   struct qxl_bo *surf)
    516{
    517	struct qxl_surface_cmd *cmd;
    518	struct qxl_release *release;
    519	int ret;
    520	int id;
    521
    522	if (!surf->hw_surf_alloc)
    523		return 0;
    524
    525	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
    526						 surf->surf_create,
    527						 &release);
    528	if (ret)
    529		return ret;
    530
    531	surf->surf_create = NULL;
    532	/* remove the surface from the idr, but not the surface id yet */
    533	spin_lock(&qdev->surf_id_idr_lock);
    534	idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
    535	spin_unlock(&qdev->surf_id_idr_lock);
    536	surf->hw_surf_alloc = false;
    537
    538	id = surf->surface_id;
    539	surf->surface_id = 0;
    540
    541	release->surface_release_id = id;
    542	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
    543	cmd->type = QXL_SURFACE_CMD_DESTROY;
    544	cmd->surface_id = id;
    545	qxl_release_unmap(qdev, release, &cmd->release_info);
    546
    547	qxl_release_fence_buffer_objects(release);
    548	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
    549
    550	return 0;
    551}
    552
    553static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
    554{
    555	struct qxl_rect rect;
    556	int ret;
    557
    558	/* if we are evicting, we need to make sure the surface is up
    559	   to date */
    560	rect.left = 0;
    561	rect.right = surf->surf.width;
    562	rect.top = 0;
    563	rect.bottom = surf->surf.height;
    564retry:
    565	ret = qxl_io_update_area(qdev, surf, &rect);
    566	if (ret == -ERESTARTSYS)
    567		goto retry;
    568	return ret;
    569}
    570
    571static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
    572{
    573	/* no need to update area if we are just freeing the surface normally */
    574	if (do_update_area)
    575		qxl_update_surface(qdev, surf);
    576
    577	/* nuke the surface id at the hw */
    578	qxl_hw_surface_dealloc(qdev, surf);
    579}
    580
    581void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
    582{
    583	mutex_lock(&qdev->surf_evict_mutex);
    584	qxl_surface_evict_locked(qdev, surf, do_update_area);
    585	mutex_unlock(&qdev->surf_evict_mutex);
    586}
    587
    588static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
    589{
    590	int ret;
    591
    592	ret = qxl_bo_reserve(surf);
    593	if (ret)
    594		return ret;
    595
    596	if (stall)
    597		mutex_unlock(&qdev->surf_evict_mutex);
    598
    599	ret = ttm_bo_wait(&surf->tbo, true, !stall);
    600
    601	if (stall)
    602		mutex_lock(&qdev->surf_evict_mutex);
    603	if (ret) {
    604		qxl_bo_unreserve(surf);
    605		return ret;
    606	}
    607
    608	qxl_surface_evict_locked(qdev, surf, true);
    609	qxl_bo_unreserve(surf);
    610	return 0;
    611}
    612
    613static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
    614{
    615	int num_reaped = 0;
    616	int i, ret;
    617	bool stall = false;
    618	int start = 0;
    619
    620	mutex_lock(&qdev->surf_evict_mutex);
    621again:
    622
    623	spin_lock(&qdev->surf_id_idr_lock);
    624	start = qdev->last_alloced_surf_id + 1;
    625	spin_unlock(&qdev->surf_id_idr_lock);
    626
    627	for (i = start; i < start + qdev->rom->n_surfaces; i++) {
    628		void *objptr;
    629		int surfid = i % qdev->rom->n_surfaces;
    630
    631		/* this avoids the case where the objects is in the
    632		   idr but has been evicted half way - its makes
    633		   the idr lookup atomic with the eviction */
    634		spin_lock(&qdev->surf_id_idr_lock);
    635		objptr = idr_find(&qdev->surf_id_idr, surfid);
    636		spin_unlock(&qdev->surf_id_idr_lock);
    637
    638		if (!objptr)
    639			continue;
    640
    641		ret = qxl_reap_surf(qdev, objptr, stall);
    642		if (ret == 0)
    643			num_reaped++;
    644		if (num_reaped >= max_to_reap)
    645			break;
    646	}
    647	if (num_reaped == 0 && stall == false) {
    648		stall = true;
    649		goto again;
    650	}
    651
    652	mutex_unlock(&qdev->surf_evict_mutex);
    653	if (num_reaped) {
    654		usleep_range(500, 1000);
    655		qxl_queue_garbage_collect(qdev, true);
    656	}
    657
    658	return 0;
    659}