cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmwgfx_cmd.c (18382B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2/**************************************************************************
      3 *
      4 * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a
      7 * copy of this software and associated documentation files (the
      8 * "Software"), to deal in the Software without restriction, including
      9 * without limitation the rights to use, copy, modify, merge, publish,
     10 * distribute, sub license, and/or sell copies of the Software, and to
     11 * permit persons to whom the Software is furnished to do so, subject to
     12 * the following conditions:
     13 *
     14 * The above copyright notice and this permission notice (including the
     15 * next paragraph) shall be included in all copies or substantial portions
     16 * of the Software.
     17 *
     18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25 *
     26 **************************************************************************/
     27
     28#include <linux/sched/signal.h>
     29
     30#include <drm/ttm/ttm_placement.h>
     31
     32#include "vmwgfx_drv.h"
     33#include "vmwgfx_devcaps.h"
     34
     35bool vmw_supports_3d(struct vmw_private *dev_priv)
     36{
     37	uint32_t fifo_min, hwversion;
     38	const struct vmw_fifo_state *fifo = dev_priv->fifo;
     39
     40	if (!(dev_priv->capabilities & SVGA_CAP_3D))
     41		return false;
     42
     43	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
     44		uint32_t result;
     45
     46		if (!dev_priv->has_mob)
     47			return false;
     48
     49		result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D);
     50
     51		return (result != 0);
     52	}
     53
     54	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
     55		return false;
     56
     57	BUG_ON(vmw_is_svga_v3(dev_priv));
     58
     59	fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
     60	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
     61		return false;
     62
     63	hwversion = vmw_fifo_mem_read(dev_priv,
     64				      ((fifo->capabilities &
     65					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
     66					       SVGA_FIFO_3D_HWVERSION_REVISED :
     67					       SVGA_FIFO_3D_HWVERSION));
     68
     69	if (hwversion == 0)
     70		return false;
     71
     72	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
     73		return false;
     74
     75	/* Legacy Display Unit does not support surfaces */
     76	if (dev_priv->active_display_unit == vmw_du_legacy)
     77		return false;
     78
     79	return true;
     80}
     81
     82bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
     83{
     84	uint32_t caps;
     85
     86	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
     87		return false;
     88
     89	caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
     90	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
     91		return true;
     92
     93	return false;
     94}
     95
     96struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
     97{
     98	struct vmw_fifo_state *fifo;
     99	uint32_t max;
    100	uint32_t min;
    101
    102	if (!dev_priv->fifo_mem)
    103		return NULL;
    104
    105	fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
    106	if (!fifo)
    107		return ERR_PTR(-ENOMEM);
    108	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
    109	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
    110	if (unlikely(fifo->static_buffer == NULL)) {
    111		kfree(fifo);
    112		return ERR_PTR(-ENOMEM);
    113	}
    114
    115	fifo->dynamic_buffer = NULL;
    116	fifo->reserved_size = 0;
    117	fifo->using_bounce_buffer = false;
    118
    119	mutex_init(&fifo->fifo_mutex);
    120	init_rwsem(&fifo->rwsem);
    121	min = 4;
    122	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
    123		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
    124	min <<= 2;
    125
    126	if (min < PAGE_SIZE)
    127		min = PAGE_SIZE;
    128
    129	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
    130	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
    131	wmb();
    132	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
    133	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
    134	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
    135	mb();
    136
    137	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
    138
    139	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
    140	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
    141	fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
    142
    143	drm_info(&dev_priv->drm,
    144		 "Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
    145		 (unsigned int) max,
    146		 (unsigned int) min,
    147		 (unsigned int) fifo->capabilities);
    148
    149	if (unlikely(min >= max)) {
    150		drm_warn(&dev_priv->drm,
    151			 "FIFO memory is not usable. Driver failed to initialize.");
    152		return ERR_PTR(-ENXIO);
    153	}
    154
    155	return fifo;
    156}
    157
    158void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
    159{
    160	u32 *fifo_mem = dev_priv->fifo_mem;
    161	if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
    162		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
    163
    164}
    165
    166void vmw_fifo_destroy(struct vmw_private *dev_priv)
    167{
    168	struct vmw_fifo_state *fifo = dev_priv->fifo;
    169
    170	if (!fifo)
    171		return;
    172
    173	if (likely(fifo->static_buffer != NULL)) {
    174		vfree(fifo->static_buffer);
    175		fifo->static_buffer = NULL;
    176	}
    177
    178	if (likely(fifo->dynamic_buffer != NULL)) {
    179		vfree(fifo->dynamic_buffer);
    180		fifo->dynamic_buffer = NULL;
    181	}
    182	kfree(fifo);
    183	dev_priv->fifo = NULL;
    184}
    185
    186static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
    187{
    188	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
    189	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
    190	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
    191	uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
    192
    193	return ((max - next_cmd) + (stop - min) <= bytes);
    194}
    195
    196static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
    197			       uint32_t bytes, bool interruptible,
    198			       unsigned long timeout)
    199{
    200	int ret = 0;
    201	unsigned long end_jiffies = jiffies + timeout;
    202	DEFINE_WAIT(__wait);
    203
    204	DRM_INFO("Fifo wait noirq.\n");
    205
    206	for (;;) {
    207		prepare_to_wait(&dev_priv->fifo_queue, &__wait,
    208				(interruptible) ?
    209				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
    210		if (!vmw_fifo_is_full(dev_priv, bytes))
    211			break;
    212		if (time_after_eq(jiffies, end_jiffies)) {
    213			ret = -EBUSY;
    214			DRM_ERROR("SVGA device lockup.\n");
    215			break;
    216		}
    217		schedule_timeout(1);
    218		if (interruptible && signal_pending(current)) {
    219			ret = -ERESTARTSYS;
    220			break;
    221		}
    222	}
    223	finish_wait(&dev_priv->fifo_queue, &__wait);
    224	wake_up_all(&dev_priv->fifo_queue);
    225	DRM_INFO("Fifo noirq exit.\n");
    226	return ret;
    227}
    228
    229static int vmw_fifo_wait(struct vmw_private *dev_priv,
    230			 uint32_t bytes, bool interruptible,
    231			 unsigned long timeout)
    232{
    233	long ret = 1L;
    234
    235	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
    236		return 0;
    237
    238	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
    239	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
    240		return vmw_fifo_wait_noirq(dev_priv, bytes,
    241					   interruptible, timeout);
    242
    243	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
    244			       &dev_priv->fifo_queue_waiters);
    245
    246	if (interruptible)
    247		ret = wait_event_interruptible_timeout
    248		    (dev_priv->fifo_queue,
    249		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
    250	else
    251		ret = wait_event_timeout
    252		    (dev_priv->fifo_queue,
    253		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
    254
    255	if (unlikely(ret == 0))
    256		ret = -EBUSY;
    257	else if (likely(ret > 0))
    258		ret = 0;
    259
    260	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
    261				  &dev_priv->fifo_queue_waiters);
    262
    263	return ret;
    264}
    265
    266/*
    267 * Reserve @bytes number of bytes in the fifo.
    268 *
    269 * This function will return NULL (error) on two conditions:
    270 *  If it timeouts waiting for fifo space, or if @bytes is larger than the
    271 *   available fifo space.
    272 *
    273 * Returns:
    274 *   Pointer to the fifo, or null on error (possible hardware hang).
    275 */
    276static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
    277				    uint32_t bytes)
    278{
    279	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
    280	u32  *fifo_mem = dev_priv->fifo_mem;
    281	uint32_t max;
    282	uint32_t min;
    283	uint32_t next_cmd;
    284	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
    285	int ret;
    286
    287	mutex_lock(&fifo_state->fifo_mutex);
    288	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
    289	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
    290	next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
    291
    292	if (unlikely(bytes >= (max - min)))
    293		goto out_err;
    294
    295	BUG_ON(fifo_state->reserved_size != 0);
    296	BUG_ON(fifo_state->dynamic_buffer != NULL);
    297
    298	fifo_state->reserved_size = bytes;
    299
    300	while (1) {
    301		uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
    302		bool need_bounce = false;
    303		bool reserve_in_place = false;
    304
    305		if (next_cmd >= stop) {
    306			if (likely((next_cmd + bytes < max ||
    307				    (next_cmd + bytes == max && stop > min))))
    308				reserve_in_place = true;
    309
    310			else if (vmw_fifo_is_full(dev_priv, bytes)) {
    311				ret = vmw_fifo_wait(dev_priv, bytes,
    312						    false, 3 * HZ);
    313				if (unlikely(ret != 0))
    314					goto out_err;
    315			} else
    316				need_bounce = true;
    317
    318		} else {
    319
    320			if (likely((next_cmd + bytes < stop)))
    321				reserve_in_place = true;
    322			else {
    323				ret = vmw_fifo_wait(dev_priv, bytes,
    324						    false, 3 * HZ);
    325				if (unlikely(ret != 0))
    326					goto out_err;
    327			}
    328		}
    329
    330		if (reserve_in_place) {
    331			if (reserveable || bytes <= sizeof(uint32_t)) {
    332				fifo_state->using_bounce_buffer = false;
    333
    334				if (reserveable)
    335					vmw_fifo_mem_write(dev_priv,
    336							   SVGA_FIFO_RESERVED,
    337							   bytes);
    338				return (void __force *) (fifo_mem +
    339							 (next_cmd >> 2));
    340			} else {
    341				need_bounce = true;
    342			}
    343		}
    344
    345		if (need_bounce) {
    346			fifo_state->using_bounce_buffer = true;
    347			if (bytes < fifo_state->static_buffer_size)
    348				return fifo_state->static_buffer;
    349			else {
    350				fifo_state->dynamic_buffer = vmalloc(bytes);
    351				if (!fifo_state->dynamic_buffer)
    352					goto out_err;
    353				return fifo_state->dynamic_buffer;
    354			}
    355		}
    356	}
    357out_err:
    358	fifo_state->reserved_size = 0;
    359	mutex_unlock(&fifo_state->fifo_mutex);
    360
    361	return NULL;
    362}
    363
    364void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
    365			  int ctx_id)
    366{
    367	void *ret;
    368
    369	if (dev_priv->cman)
    370		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
    371					 ctx_id, false, NULL);
    372	else if (ctx_id == SVGA3D_INVALID_ID)
    373		ret = vmw_local_fifo_reserve(dev_priv, bytes);
    374	else {
    375		WARN(1, "Command buffer has not been allocated.\n");
    376		ret = NULL;
    377	}
    378	if (IS_ERR_OR_NULL(ret))
    379		return NULL;
    380
    381	return ret;
    382}
    383
    384static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
    385			      struct vmw_private *vmw,
    386			      uint32_t next_cmd,
    387			      uint32_t max, uint32_t min, uint32_t bytes)
    388{
    389	u32 *fifo_mem = vmw->fifo_mem;
    390	uint32_t chunk_size = max - next_cmd;
    391	uint32_t rest;
    392	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
    393	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
    394
    395	if (bytes < chunk_size)
    396		chunk_size = bytes;
    397
    398	vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
    399	mb();
    400	memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
    401	rest = bytes - chunk_size;
    402	if (rest)
    403		memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
    404}
    405
    406static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
    407			       struct vmw_private *vmw,
    408			       uint32_t next_cmd,
    409			       uint32_t max, uint32_t min, uint32_t bytes)
    410{
    411	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
    412	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
    413
    414	while (bytes > 0) {
    415		vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
    416		next_cmd += sizeof(uint32_t);
    417		if (unlikely(next_cmd == max))
    418			next_cmd = min;
    419		mb();
    420		vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
    421		mb();
    422		bytes -= sizeof(uint32_t);
    423	}
    424}
    425
    426static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
    427{
    428	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
    429	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
    430	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
    431	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
    432	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
    433
    434	BUG_ON((bytes & 3) != 0);
    435	BUG_ON(bytes > fifo_state->reserved_size);
    436
    437	fifo_state->reserved_size = 0;
    438
    439	if (fifo_state->using_bounce_buffer) {
    440		if (reserveable)
    441			vmw_fifo_res_copy(fifo_state, dev_priv,
    442					  next_cmd, max, min, bytes);
    443		else
    444			vmw_fifo_slow_copy(fifo_state, dev_priv,
    445					   next_cmd, max, min, bytes);
    446
    447		if (fifo_state->dynamic_buffer) {
    448			vfree(fifo_state->dynamic_buffer);
    449			fifo_state->dynamic_buffer = NULL;
    450		}
    451
    452	}
    453
    454	down_write(&fifo_state->rwsem);
    455	if (fifo_state->using_bounce_buffer || reserveable) {
    456		next_cmd += bytes;
    457		if (next_cmd >= max)
    458			next_cmd -= max - min;
    459		mb();
    460		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
    461	}
    462
    463	if (reserveable)
    464		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
    465	mb();
    466	up_write(&fifo_state->rwsem);
    467	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
    468	mutex_unlock(&fifo_state->fifo_mutex);
    469}
    470
    471void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
    472{
    473	if (dev_priv->cman)
    474		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
    475	else
    476		vmw_local_fifo_commit(dev_priv, bytes);
    477}
    478
    479
    480/**
    481 * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
    482 *
    483 * @dev_priv: Pointer to device private structure.
    484 * @bytes: Number of bytes to commit.
    485 */
    486void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
    487{
    488	if (dev_priv->cman)
    489		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
    490	else
    491		vmw_local_fifo_commit(dev_priv, bytes);
    492}
    493
    494/**
    495 * vmw_cmd_flush - Flush any buffered commands and make sure command processing
    496 * starts.
    497 *
    498 * @dev_priv: Pointer to device private structure.
    499 * @interruptible: Whether to wait interruptible if function needs to sleep.
    500 */
    501int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
    502{
    503	might_sleep();
    504
    505	if (dev_priv->cman)
    506		return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
    507	else
    508		return 0;
    509}
    510
    511int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
    512{
    513	struct svga_fifo_cmd_fence *cmd_fence;
    514	u32 *fm;
    515	int ret = 0;
    516	uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
    517
    518	fm = VMW_CMD_RESERVE(dev_priv, bytes);
    519	if (unlikely(fm == NULL)) {
    520		*seqno = atomic_read(&dev_priv->marker_seq);
    521		ret = -ENOMEM;
    522		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
    523					false, 3*HZ);
    524		goto out_err;
    525	}
    526
    527	do {
    528		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
    529	} while (*seqno == 0);
    530
    531	if (!vmw_has_fences(dev_priv)) {
    532
    533		/*
    534		 * Don't request hardware to send a fence. The
    535		 * waiting code in vmwgfx_irq.c will emulate this.
    536		 */
    537
    538		vmw_cmd_commit(dev_priv, 0);
    539		return 0;
    540	}
    541
    542	*fm++ = SVGA_CMD_FENCE;
    543	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
    544	cmd_fence->fence = *seqno;
    545	vmw_cmd_commit_flush(dev_priv, bytes);
    546	vmw_update_seqno(dev_priv);
    547
    548out_err:
    549	return ret;
    550}
    551
    552/**
    553 * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
    554 * legacy query commands.
    555 *
    556 * @dev_priv: The device private structure.
    557 * @cid: The hardware context id used for the query.
    558 *
    559 * See the vmw_cmd_emit_dummy_query documentation.
    560 */
    561static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
    562					    uint32_t cid)
    563{
    564	/*
    565	 * A query wait without a preceding query end will
    566	 * actually finish all queries for this cid
    567	 * without writing to the query result structure.
    568	 */
    569
    570	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
    571	struct {
    572		SVGA3dCmdHeader header;
    573		SVGA3dCmdWaitForQuery body;
    574	} *cmd;
    575
    576	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
    577	if (unlikely(cmd == NULL))
    578		return -ENOMEM;
    579
    580	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
    581	cmd->header.size = sizeof(cmd->body);
    582	cmd->body.cid = cid;
    583	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
    584
    585	if (bo->resource->mem_type == TTM_PL_VRAM) {
    586		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
    587		cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
    588	} else {
    589		cmd->body.guestResult.gmrId = bo->resource->start;
    590		cmd->body.guestResult.offset = 0;
    591	}
    592
    593	vmw_cmd_commit(dev_priv, sizeof(*cmd));
    594
    595	return 0;
    596}
    597
    598/**
    599 * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
    600 * guest-backed resource query commands.
    601 *
    602 * @dev_priv: The device private structure.
    603 * @cid: The hardware context id used for the query.
    604 *
    605 * See the vmw_cmd_emit_dummy_query documentation.
    606 */
    607static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
    608				       uint32_t cid)
    609{
    610	/*
    611	 * A query wait without a preceding query end will
    612	 * actually finish all queries for this cid
    613	 * without writing to the query result structure.
    614	 */
    615
    616	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
    617	struct {
    618		SVGA3dCmdHeader header;
    619		SVGA3dCmdWaitForGBQuery body;
    620	} *cmd;
    621
    622	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
    623	if (unlikely(cmd == NULL))
    624		return -ENOMEM;
    625
    626	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
    627	cmd->header.size = sizeof(cmd->body);
    628	cmd->body.cid = cid;
    629	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
    630	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
    631	cmd->body.mobid = bo->resource->start;
    632	cmd->body.offset = 0;
    633
    634	vmw_cmd_commit(dev_priv, sizeof(*cmd));
    635
    636	return 0;
    637}
    638
    639
    640/**
    641 * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
    642 * appropriate resource query commands.
    643 *
    644 * @dev_priv: The device private structure.
    645 * @cid: The hardware context id used for the query.
    646 *
    647 * This function is used to emit a dummy occlusion query with
    648 * no primitives rendered between query begin and query end.
    649 * It's used to provide a query barrier, in order to know that when
    650 * this query is finished, all preceding queries are also finished.
    651 *
    652 * A Query results structure should have been initialized at the start
    653 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
    654 * must also be either reserved or pinned when this function is called.
    655 *
    656 * Returns -ENOMEM on failure to reserve fifo space.
    657 */
    658int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
    659			      uint32_t cid)
    660{
    661	if (dev_priv->has_mob)
    662		return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
    663
    664	return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
    665}
    666
    667
    668/**
    669 * vmw_cmd_supported - returns true if the given device supports
    670 * command queues.
    671 *
    672 * @vmw: The device private structure.
    673 *
    674 * Returns true if we can issue commands.
    675 */
    676bool vmw_cmd_supported(struct vmw_private *vmw)
    677{
    678	bool has_cmdbufs =
    679		(vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
    680				      SVGA_CAP_CMD_BUFFERS_2)) != 0;
    681	if (vmw_is_svga_v3(vmw))
    682		return (has_cmdbufs &&
    683			(vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
    684	/*
    685	 * We have FIFO cmd's
    686	 */
    687	return has_cmdbufs || vmw->fifo_mem != NULL;
    688}