cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qxl_release.c (11740B)


      1/*
      2 * Copyright 2011 Red Hat, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * on the rights to use, copy, modify, merge, publish, distribute, sub
      8 * license, and/or sell copies of the Software, and to permit persons to whom
      9 * the Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
     19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     21 */
     22
     23#include <linux/delay.h>
     24
     25#include <trace/events/dma_fence.h>
     26
     27#include "qxl_drv.h"
     28#include "qxl_object.h"
     29
     30/*
     31 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
     32 * into 256 byte chunks for now - gives 16 cmds per page.
     33 *
     34 * use an ida to index into the chunks?
     35 */
     36/* manage releaseables */
     37/* stack them 16 high for now -drawable object is 191 */
     38#define RELEASE_SIZE 256
     39#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
     40/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
     41#define SURFACE_RELEASE_SIZE 128
     42#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
     43
     44static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
     45static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
     46
     47static const char *qxl_get_driver_name(struct dma_fence *fence)
     48{
     49	return "qxl";
     50}
     51
     52static const char *qxl_get_timeline_name(struct dma_fence *fence)
     53{
     54	return "release";
     55}
     56
     57static long qxl_fence_wait(struct dma_fence *fence, bool intr,
     58			   signed long timeout)
     59{
     60	struct qxl_device *qdev;
     61	unsigned long cur, end = jiffies + timeout;
     62
     63	qdev = container_of(fence->lock, struct qxl_device, release_lock);
     64
     65	if (!wait_event_timeout(qdev->release_event,
     66				(dma_fence_is_signaled(fence) ||
     67				 (qxl_io_notify_oom(qdev), 0)),
     68				timeout))
     69		return 0;
     70
     71	cur = jiffies;
     72	if (time_after(cur, end))
     73		return 0;
     74	return end - cur;
     75}
     76
     77static const struct dma_fence_ops qxl_fence_ops = {
     78	.get_driver_name = qxl_get_driver_name,
     79	.get_timeline_name = qxl_get_timeline_name,
     80	.wait = qxl_fence_wait,
     81};
     82
     83static int
     84qxl_release_alloc(struct qxl_device *qdev, int type,
     85		  struct qxl_release **ret)
     86{
     87	struct qxl_release *release;
     88	int handle;
     89	size_t size = sizeof(*release);
     90
     91	release = kmalloc(size, GFP_KERNEL);
     92	if (!release) {
     93		DRM_ERROR("Out of memory\n");
     94		return -ENOMEM;
     95	}
     96	release->base.ops = NULL;
     97	release->type = type;
     98	release->release_offset = 0;
     99	release->surface_release_id = 0;
    100	INIT_LIST_HEAD(&release->bos);
    101
    102	idr_preload(GFP_KERNEL);
    103	spin_lock(&qdev->release_idr_lock);
    104	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
    105	release->base.seqno = ++qdev->release_seqno;
    106	spin_unlock(&qdev->release_idr_lock);
    107	idr_preload_end();
    108	if (handle < 0) {
    109		kfree(release);
    110		*ret = NULL;
    111		return handle;
    112	}
    113	*ret = release;
    114	DRM_DEBUG_DRIVER("allocated release %d\n", handle);
    115	release->id = handle;
    116	return handle;
    117}
    118
    119static void
    120qxl_release_free_list(struct qxl_release *release)
    121{
    122	while (!list_empty(&release->bos)) {
    123		struct qxl_bo_list *entry;
    124		struct qxl_bo *bo;
    125
    126		entry = container_of(release->bos.next,
    127				     struct qxl_bo_list, tv.head);
    128		bo = to_qxl_bo(entry->tv.bo);
    129		qxl_bo_unref(&bo);
    130		list_del(&entry->tv.head);
    131		kfree(entry);
    132	}
    133	release->release_bo = NULL;
    134}
    135
    136void
    137qxl_release_free(struct qxl_device *qdev,
    138		 struct qxl_release *release)
    139{
    140	DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
    141
    142	if (release->surface_release_id)
    143		qxl_surface_id_dealloc(qdev, release->surface_release_id);
    144
    145	spin_lock(&qdev->release_idr_lock);
    146	idr_remove(&qdev->release_idr, release->id);
    147	spin_unlock(&qdev->release_idr_lock);
    148
    149	if (release->base.ops) {
    150		WARN_ON(list_empty(&release->bos));
    151		qxl_release_free_list(release);
    152
    153		dma_fence_signal(&release->base);
    154		dma_fence_put(&release->base);
    155	} else {
    156		qxl_release_free_list(release);
    157		kfree(release);
    158	}
    159	atomic_dec(&qdev->release_count);
    160}
    161
    162static int qxl_release_bo_alloc(struct qxl_device *qdev,
    163				struct qxl_bo **bo,
    164				u32 priority)
    165{
    166	/* pin releases bo's they are too messy to evict */
    167	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
    168			     QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
    169}
    170
    171int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
    172{
    173	struct qxl_bo_list *entry;
    174
    175	list_for_each_entry(entry, &release->bos, tv.head) {
    176		if (entry->tv.bo == &bo->tbo)
    177			return 0;
    178	}
    179
    180	entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
    181	if (!entry)
    182		return -ENOMEM;
    183
    184	qxl_bo_ref(bo);
    185	entry->tv.bo = &bo->tbo;
    186	entry->tv.num_shared = 0;
    187	list_add_tail(&entry->tv.head, &release->bos);
    188	return 0;
    189}
    190
    191static int qxl_release_validate_bo(struct qxl_bo *bo)
    192{
    193	struct ttm_operation_ctx ctx = { true, false };
    194	int ret;
    195
    196	if (!bo->tbo.pin_count) {
    197		qxl_ttm_placement_from_domain(bo, bo->type);
    198		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
    199		if (ret)
    200			return ret;
    201	}
    202
    203	ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
    204	if (ret)
    205		return ret;
    206
    207	/* allocate a surface for reserved + validated buffers */
    208	ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
    209	if (ret)
    210		return ret;
    211	return 0;
    212}
    213
    214int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
    215{
    216	int ret;
    217	struct qxl_bo_list *entry;
    218
    219	/* if only one object on the release its the release itself
    220	   since these objects are pinned no need to reserve */
    221	if (list_is_singular(&release->bos))
    222		return 0;
    223
    224	ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
    225				     !no_intr, NULL);
    226	if (ret)
    227		return ret;
    228
    229	list_for_each_entry(entry, &release->bos, tv.head) {
    230		struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
    231
    232		ret = qxl_release_validate_bo(bo);
    233		if (ret) {
    234			ttm_eu_backoff_reservation(&release->ticket, &release->bos);
    235			return ret;
    236		}
    237	}
    238	return 0;
    239}
    240
    241void qxl_release_backoff_reserve_list(struct qxl_release *release)
    242{
    243	/* if only one object on the release its the release itself
    244	   since these objects are pinned no need to reserve */
    245	if (list_is_singular(&release->bos))
    246		return;
    247
    248	ttm_eu_backoff_reservation(&release->ticket, &release->bos);
    249}
    250
    251int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
    252				       enum qxl_surface_cmd_type surface_cmd_type,
    253				       struct qxl_release *create_rel,
    254				       struct qxl_release **release)
    255{
    256	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
    257		int idr_ret;
    258		struct qxl_bo *bo;
    259		union qxl_release_info *info;
    260
    261		/* stash the release after the create command */
    262		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
    263		if (idr_ret < 0)
    264			return idr_ret;
    265		bo = create_rel->release_bo;
    266
    267		(*release)->release_bo = bo;
    268		(*release)->release_offset = create_rel->release_offset + 64;
    269
    270		qxl_release_list_add(*release, bo);
    271
    272		info = qxl_release_map(qdev, *release);
    273		info->id = idr_ret;
    274		qxl_release_unmap(qdev, *release, info);
    275		return 0;
    276	}
    277
    278	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
    279					 QXL_RELEASE_SURFACE_CMD, release, NULL);
    280}
    281
    282int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
    283				       int type, struct qxl_release **release,
    284				       struct qxl_bo **rbo)
    285{
    286	struct qxl_bo *bo, *free_bo = NULL;
    287	int idr_ret;
    288	int ret = 0;
    289	union qxl_release_info *info;
    290	int cur_idx;
    291	u32 priority;
    292
    293	if (type == QXL_RELEASE_DRAWABLE) {
    294		cur_idx = 0;
    295		priority = 0;
    296	} else if (type == QXL_RELEASE_SURFACE_CMD) {
    297		cur_idx = 1;
    298		priority = 1;
    299	} else if (type == QXL_RELEASE_CURSOR_CMD) {
    300		cur_idx = 2;
    301		priority = 1;
    302	}
    303	else {
    304		DRM_ERROR("got illegal type: %d\n", type);
    305		return -EINVAL;
    306	}
    307
    308	idr_ret = qxl_release_alloc(qdev, type, release);
    309	if (idr_ret < 0) {
    310		if (rbo)
    311			*rbo = NULL;
    312		return idr_ret;
    313	}
    314	atomic_inc(&qdev->release_count);
    315
    316	mutex_lock(&qdev->release_mutex);
    317	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
    318		free_bo = qdev->current_release_bo[cur_idx];
    319		qdev->current_release_bo_offset[cur_idx] = 0;
    320		qdev->current_release_bo[cur_idx] = NULL;
    321	}
    322	if (!qdev->current_release_bo[cur_idx]) {
    323		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
    324		if (ret) {
    325			mutex_unlock(&qdev->release_mutex);
    326			if (free_bo) {
    327				qxl_bo_unpin(free_bo);
    328				qxl_bo_unref(&free_bo);
    329			}
    330			qxl_release_free(qdev, *release);
    331			return ret;
    332		}
    333	}
    334
    335	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
    336
    337	(*release)->release_bo = bo;
    338	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
    339	qdev->current_release_bo_offset[cur_idx]++;
    340
    341	if (rbo)
    342		*rbo = bo;
    343
    344	mutex_unlock(&qdev->release_mutex);
    345	if (free_bo) {
    346		qxl_bo_unpin(free_bo);
    347		qxl_bo_unref(&free_bo);
    348	}
    349
    350	ret = qxl_release_list_add(*release, bo);
    351	qxl_bo_unref(&bo);
    352	if (ret) {
    353		qxl_release_free(qdev, *release);
    354		return ret;
    355	}
    356
    357	info = qxl_release_map(qdev, *release);
    358	info->id = idr_ret;
    359	qxl_release_unmap(qdev, *release, info);
    360
    361	return ret;
    362}
    363
    364struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
    365						   uint64_t id)
    366{
    367	struct qxl_release *release;
    368
    369	spin_lock(&qdev->release_idr_lock);
    370	release = idr_find(&qdev->release_idr, id);
    371	spin_unlock(&qdev->release_idr_lock);
    372	if (!release) {
    373		DRM_ERROR("failed to find id in release_idr\n");
    374		return NULL;
    375	}
    376
    377	return release;
    378}
    379
    380union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
    381					struct qxl_release *release)
    382{
    383	void *ptr;
    384	union qxl_release_info *info;
    385	struct qxl_bo *bo = release->release_bo;
    386
    387	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
    388	if (!ptr)
    389		return NULL;
    390	info = ptr + (release->release_offset & ~PAGE_MASK);
    391	return info;
    392}
    393
    394void qxl_release_unmap(struct qxl_device *qdev,
    395		       struct qxl_release *release,
    396		       union qxl_release_info *info)
    397{
    398	struct qxl_bo *bo = release->release_bo;
    399	void *ptr;
    400
    401	ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
    402	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
    403}
    404
    405void qxl_release_fence_buffer_objects(struct qxl_release *release)
    406{
    407	struct ttm_buffer_object *bo;
    408	struct ttm_device *bdev;
    409	struct ttm_validate_buffer *entry;
    410	struct qxl_device *qdev;
    411
    412	/* if only one object on the release its the release itself
    413	   since these objects are pinned no need to reserve */
    414	if (list_is_singular(&release->bos) || list_empty(&release->bos))
    415		return;
    416
    417	bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
    418	bdev = bo->bdev;
    419	qdev = container_of(bdev, struct qxl_device, mman.bdev);
    420
    421	/*
    422	 * Since we never really allocated a context and we don't want to conflict,
    423	 * set the highest bits. This will break if we really allow exporting of dma-bufs.
    424	 */
    425	dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
    426		       release->id | 0xf0000000, release->base.seqno);
    427	trace_dma_fence_emit(&release->base);
    428
    429	list_for_each_entry(entry, &release->bos, head) {
    430		bo = entry->bo;
    431
    432		dma_resv_add_fence(bo->base.resv, &release->base,
    433				   DMA_RESV_USAGE_READ);
    434		ttm_bo_move_to_lru_tail_unlocked(bo);
    435		dma_resv_unlock(bo->base.resv);
    436	}
    437	ww_acquire_fini(&release->ticket);
    438}
    439