cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmwgfx_mob.c (17794B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2/**************************************************************************
      3 *
      4 * Copyright 2012-2021 VMware, Inc., Palo Alto, CA., USA
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a
      7 * copy of this software and associated documentation files (the
      8 * "Software"), to deal in the Software without restriction, including
      9 * without limitation the rights to use, copy, modify, merge, publish,
     10 * distribute, sub license, and/or sell copies of the Software, and to
     11 * permit persons to whom the Software is furnished to do so, subject to
     12 * the following conditions:
     13 *
     14 * The above copyright notice and this permission notice (including the
     15 * next paragraph) shall be included in all copies or substantial portions
     16 * of the Software.
     17 *
     18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25 *
     26 **************************************************************************/
     27
     28#include <linux/highmem.h>
     29
     30#include "vmwgfx_drv.h"
     31
     32#ifdef CONFIG_64BIT
     33#define VMW_PPN_SIZE 8
     34#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT64_0
     35#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PT64_1
     36#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PT64_2
     37#else
     38#define VMW_PPN_SIZE 4
     39#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT_0
     40#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PT_1
     41#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PT_2
     42#endif
     43
     44/*
     45 * struct vmw_mob - Structure containing page table and metadata for a
     46 * Guest Memory OBject.
     47 *
     48 * @num_pages       Number of pages that make up the page table.
     49 * @pt_level        The indirection level of the page table. 0-2.
     50 * @pt_root_page    DMA address of the level 0 page of the page table.
     51 */
     52struct vmw_mob {
     53	struct ttm_buffer_object *pt_bo;
     54	unsigned long num_pages;
     55	unsigned pt_level;
     56	dma_addr_t pt_root_page;
     57	uint32_t id;
     58};
     59
     60/*
     61 * struct vmw_otable - Guest Memory OBject table metadata
     62 *
     63 * @size:           Size of the table (page-aligned).
     64 * @page_table:     Pointer to a struct vmw_mob holding the page table.
     65 */
     66static const struct vmw_otable pre_dx_tables[] = {
     67	{VMWGFX_NUM_MOB * sizeof(SVGAOTableMobEntry), NULL, true},
     68	{VMWGFX_NUM_GB_SURFACE * sizeof(SVGAOTableSurfaceEntry), NULL, true},
     69	{VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true},
     70	{VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true},
     71	{VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry),
     72	 NULL, true}
     73};
     74
     75static const struct vmw_otable dx_tables[] = {
     76	{VMWGFX_NUM_MOB * sizeof(SVGAOTableMobEntry), NULL, true},
     77	{VMWGFX_NUM_GB_SURFACE * sizeof(SVGAOTableSurfaceEntry), NULL, true},
     78	{VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true},
     79	{VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true},
     80	{VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry),
     81	 NULL, true},
     82	{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
     83};
     84
     85static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
     86			       struct vmw_mob *mob);
     87static void vmw_mob_pt_setup(struct vmw_mob *mob,
     88			     struct vmw_piter data_iter,
     89			     unsigned long num_data_pages);
     90
     91
     92static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo)
     93{
     94	int ret = ttm_bo_reserve(bo, false, true, NULL);
     95	BUG_ON(ret != 0);
     96	ttm_bo_unpin(bo);
     97	ttm_bo_unreserve(bo);
     98}
     99
    100
    101/*
    102 * vmw_setup_otable_base - Issue an object table base setup command to
    103 * the device
    104 *
    105 * @dev_priv:       Pointer to a device private structure
    106 * @type:           Type of object table base
    107 * @offset          Start of table offset into dev_priv::otable_bo
    108 * @otable          Pointer to otable metadata;
    109 *
    110 * This function returns -ENOMEM if it fails to reserve fifo space,
    111 * and may block waiting for fifo space.
    112 */
    113static int vmw_setup_otable_base(struct vmw_private *dev_priv,
    114				 SVGAOTableType type,
    115				 struct ttm_buffer_object *otable_bo,
    116				 unsigned long offset,
    117				 struct vmw_otable *otable)
    118{
    119	struct {
    120		SVGA3dCmdHeader header;
    121		SVGA3dCmdSetOTableBase64 body;
    122	} *cmd;
    123	struct vmw_mob *mob;
    124	const struct vmw_sg_table *vsgt;
    125	struct vmw_piter iter;
    126	int ret;
    127
    128	BUG_ON(otable->page_table != NULL);
    129
    130	vsgt = vmw_bo_sg_table(otable_bo);
    131	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
    132	WARN_ON(!vmw_piter_next(&iter));
    133
    134	mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
    135	if (unlikely(mob == NULL)) {
    136		DRM_ERROR("Failed creating OTable page table.\n");
    137		return -ENOMEM;
    138	}
    139
    140	if (otable->size <= PAGE_SIZE) {
    141		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
    142		mob->pt_root_page = vmw_piter_dma_addr(&iter);
    143	} else {
    144		ret = vmw_mob_pt_populate(dev_priv, mob);
    145		if (unlikely(ret != 0))
    146			goto out_no_populate;
    147
    148		vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
    149		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PT_1;
    150	}
    151
    152	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
    153	if (unlikely(cmd == NULL)) {
    154		ret = -ENOMEM;
    155		goto out_no_fifo;
    156	}
    157
    158	memset(cmd, 0, sizeof(*cmd));
    159	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
    160	cmd->header.size = sizeof(cmd->body);
    161	cmd->body.type = type;
    162	cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
    163	cmd->body.sizeInBytes = otable->size;
    164	cmd->body.validSizeInBytes = 0;
    165	cmd->body.ptDepth = mob->pt_level;
    166
    167	/*
    168	 * The device doesn't support this, But the otable size is
    169	 * determined at compile-time, so this BUG shouldn't trigger
    170	 * randomly.
    171	 */
    172	BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
    173
    174	vmw_cmd_commit(dev_priv, sizeof(*cmd));
    175	otable->page_table = mob;
    176
    177	return 0;
    178
    179out_no_fifo:
    180out_no_populate:
    181	vmw_mob_destroy(mob);
    182	return ret;
    183}
    184
    185/*
    186 * vmw_takedown_otable_base - Issue an object table base takedown command
    187 * to the device
    188 *
    189 * @dev_priv:       Pointer to a device private structure
    190 * @type:           Type of object table base
    191 *
    192 */
    193static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
    194				     SVGAOTableType type,
    195				     struct vmw_otable *otable)
    196{
    197	struct {
    198		SVGA3dCmdHeader header;
    199		SVGA3dCmdSetOTableBase body;
    200	} *cmd;
    201	struct ttm_buffer_object *bo;
    202
    203	if (otable->page_table == NULL)
    204		return;
    205
    206	bo = otable->page_table->pt_bo;
    207	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
    208	if (unlikely(cmd == NULL))
    209		return;
    210
    211	memset(cmd, 0, sizeof(*cmd));
    212	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
    213	cmd->header.size = sizeof(cmd->body);
    214	cmd->body.type = type;
    215	cmd->body.baseAddress = 0;
    216	cmd->body.sizeInBytes = 0;
    217	cmd->body.validSizeInBytes = 0;
    218	cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
    219	vmw_cmd_commit(dev_priv, sizeof(*cmd));
    220
    221	if (bo) {
    222		int ret;
    223
    224		ret = ttm_bo_reserve(bo, false, true, NULL);
    225		BUG_ON(ret != 0);
    226
    227		vmw_bo_fence_single(bo, NULL);
    228		ttm_bo_unreserve(bo);
    229	}
    230
    231	vmw_mob_destroy(otable->page_table);
    232	otable->page_table = NULL;
    233}
    234
    235
    236static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
    237				  struct vmw_otable_batch *batch)
    238{
    239	unsigned long offset;
    240	unsigned long bo_size;
    241	struct vmw_otable *otables = batch->otables;
    242	SVGAOTableType i;
    243	int ret;
    244
    245	bo_size = 0;
    246	for (i = 0; i < batch->num_otables; ++i) {
    247		if (!otables[i].enabled)
    248			continue;
    249
    250		otables[i].size = PFN_ALIGN(otables[i].size);
    251		bo_size += otables[i].size;
    252	}
    253
    254	ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo);
    255	if (unlikely(ret != 0))
    256		return ret;
    257
    258	offset = 0;
    259	for (i = 0; i < batch->num_otables; ++i) {
    260		if (!batch->otables[i].enabled)
    261			continue;
    262
    263		ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
    264					    offset,
    265					    &otables[i]);
    266		if (unlikely(ret != 0))
    267			goto out_no_setup;
    268		offset += otables[i].size;
    269	}
    270
    271	return 0;
    272
    273out_no_setup:
    274	for (i = 0; i < batch->num_otables; ++i) {
    275		if (batch->otables[i].enabled)
    276			vmw_takedown_otable_base(dev_priv, i,
    277						 &batch->otables[i]);
    278	}
    279
    280	vmw_bo_unpin_unlocked(batch->otable_bo);
    281	ttm_bo_put(batch->otable_bo);
    282	batch->otable_bo = NULL;
    283	return ret;
    284}
    285
    286/*
    287 * vmw_otables_setup - Set up guest backed memory object tables
    288 *
    289 * @dev_priv:       Pointer to a device private structure
    290 *
    291 * Takes care of the device guest backed surface
    292 * initialization, by setting up the guest backed memory object tables.
    293 * Returns 0 on success and various error codes on failure. A successful return
    294 * means the object tables can be taken down using the vmw_otables_takedown
    295 * function.
    296 */
    297int vmw_otables_setup(struct vmw_private *dev_priv)
    298{
    299	struct vmw_otable **otables = &dev_priv->otable_batch.otables;
    300	int ret;
    301
    302	if (has_sm4_context(dev_priv)) {
    303		*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
    304		if (!(*otables))
    305			return -ENOMEM;
    306
    307		dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
    308	} else {
    309		*otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
    310				   GFP_KERNEL);
    311		if (!(*otables))
    312			return -ENOMEM;
    313
    314		dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
    315	}
    316
    317	ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
    318	if (unlikely(ret != 0))
    319		goto out_setup;
    320
    321	return 0;
    322
    323out_setup:
    324	kfree(*otables);
    325	return ret;
    326}
    327
    328static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
    329			       struct vmw_otable_batch *batch)
    330{
    331	SVGAOTableType i;
    332	struct ttm_buffer_object *bo = batch->otable_bo;
    333	int ret;
    334
    335	for (i = 0; i < batch->num_otables; ++i)
    336		if (batch->otables[i].enabled)
    337			vmw_takedown_otable_base(dev_priv, i,
    338						 &batch->otables[i]);
    339
    340	ret = ttm_bo_reserve(bo, false, true, NULL);
    341	BUG_ON(ret != 0);
    342
    343	vmw_bo_fence_single(bo, NULL);
    344	ttm_bo_unpin(bo);
    345	ttm_bo_unreserve(bo);
    346
    347	ttm_bo_put(batch->otable_bo);
    348	batch->otable_bo = NULL;
    349}
    350
    351/*
    352 * vmw_otables_takedown - Take down guest backed memory object tables
    353 *
    354 * @dev_priv:       Pointer to a device private structure
    355 *
    356 * Take down the Guest Memory Object tables.
    357 */
    358void vmw_otables_takedown(struct vmw_private *dev_priv)
    359{
    360	vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
    361	kfree(dev_priv->otable_batch.otables);
    362}
    363
    364/*
    365 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
    366 * needed for a guest backed memory object.
    367 *
    368 * @data_pages:  Number of data pages in the memory object buffer.
    369 */
    370static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
    371{
    372	unsigned long data_size = data_pages * PAGE_SIZE;
    373	unsigned long tot_size = 0;
    374
    375	while (likely(data_size > PAGE_SIZE)) {
    376		data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
    377		data_size *= VMW_PPN_SIZE;
    378		tot_size += PFN_ALIGN(data_size);
    379	}
    380
    381	return tot_size >> PAGE_SHIFT;
    382}
    383
    384/*
    385 * vmw_mob_create - Create a mob, but don't populate it.
    386 *
    387 * @data_pages:  Number of data pages of the underlying buffer object.
    388 */
    389struct vmw_mob *vmw_mob_create(unsigned long data_pages)
    390{
    391	struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
    392
    393	if (unlikely(!mob))
    394		return NULL;
    395
    396	mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
    397
    398	return mob;
    399}
    400
    401/*
    402 * vmw_mob_pt_populate - Populate the mob pagetable
    403 *
    404 * @mob:         Pointer to the mob the pagetable of which we want to
    405 *               populate.
    406 *
    407 * This function allocates memory to be used for the pagetable.
    408 * Returns ENOMEM if memory resources aren't sufficient and may
    409 * cause TTM buffer objects to be swapped out.
    410 */
    411static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
    412			       struct vmw_mob *mob)
    413{
    414	BUG_ON(mob->pt_bo != NULL);
    415
    416	return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, &mob->pt_bo);
    417}
    418
    419/**
    420 * vmw_mob_assign_ppn - Assign a value to a page table entry
    421 *
    422 * @addr: Pointer to pointer to page table entry.
    423 * @val: The page table entry
    424 *
    425 * Assigns a value to a page table entry pointed to by *@addr and increments
    426 * *@addr according to the page table entry size.
    427 */
    428#if (VMW_PPN_SIZE == 8)
    429static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
    430{
    431	*((u64 *) *addr) = val >> PAGE_SHIFT;
    432	*addr += 2;
    433}
    434#else
    435static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
    436{
    437	*(*addr)++ = val >> PAGE_SHIFT;
    438}
    439#endif
    440
    441/*
    442 * vmw_mob_build_pt - Build a pagetable
    443 *
    444 * @data_addr:      Array of DMA addresses to the underlying buffer
    445 *                  object's data pages.
    446 * @num_data_pages: Number of buffer object data pages.
    447 * @pt_pages:       Array of page pointers to the page table pages.
    448 *
    449 * Returns the number of page table pages actually used.
    450 * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
    451 */
    452static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
    453				      unsigned long num_data_pages,
    454				      struct vmw_piter *pt_iter)
    455{
    456	unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
    457	unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
    458	unsigned long pt_page;
    459	u32 *addr, *save_addr;
    460	unsigned long i;
    461	struct page *page;
    462
    463	for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
    464		page = vmw_piter_page(pt_iter);
    465
    466		save_addr = addr = kmap_atomic(page);
    467
    468		for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
    469			vmw_mob_assign_ppn(&addr,
    470					   vmw_piter_dma_addr(data_iter));
    471			if (unlikely(--num_data_pages == 0))
    472				break;
    473			WARN_ON(!vmw_piter_next(data_iter));
    474		}
    475		kunmap_atomic(save_addr);
    476		vmw_piter_next(pt_iter);
    477	}
    478
    479	return num_pt_pages;
    480}
    481
    482/*
    483 * vmw_mob_build_pt - Set up a multilevel mob pagetable
    484 *
    485 * @mob:            Pointer to a mob whose page table needs setting up.
    486 * @data_addr       Array of DMA addresses to the buffer object's data
    487 *                  pages.
    488 * @num_data_pages: Number of buffer object data pages.
    489 *
    490 * Uses tail recursion to set up a multilevel mob page table.
    491 */
    492static void vmw_mob_pt_setup(struct vmw_mob *mob,
    493			     struct vmw_piter data_iter,
    494			     unsigned long num_data_pages)
    495{
    496	unsigned long num_pt_pages = 0;
    497	struct ttm_buffer_object *bo = mob->pt_bo;
    498	struct vmw_piter save_pt_iter = {0};
    499	struct vmw_piter pt_iter;
    500	const struct vmw_sg_table *vsgt;
    501	int ret;
    502
    503	BUG_ON(num_data_pages == 0);
    504
    505	ret = ttm_bo_reserve(bo, false, true, NULL);
    506	BUG_ON(ret != 0);
    507
    508	vsgt = vmw_bo_sg_table(bo);
    509	vmw_piter_start(&pt_iter, vsgt, 0);
    510	BUG_ON(!vmw_piter_next(&pt_iter));
    511	mob->pt_level = 0;
    512	while (likely(num_data_pages > 1)) {
    513		++mob->pt_level;
    514		BUG_ON(mob->pt_level > 2);
    515		save_pt_iter = pt_iter;
    516		num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
    517						&pt_iter);
    518		data_iter = save_pt_iter;
    519		num_data_pages = num_pt_pages;
    520	}
    521
    522	mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
    523	ttm_bo_unreserve(bo);
    524}
    525
    526/*
    527 * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
    528 *
    529 * @mob:            Pointer to a mob to destroy.
    530 */
    531void vmw_mob_destroy(struct vmw_mob *mob)
    532{
    533	if (mob->pt_bo) {
    534		vmw_bo_unpin_unlocked(mob->pt_bo);
    535		ttm_bo_put(mob->pt_bo);
    536		mob->pt_bo = NULL;
    537	}
    538	kfree(mob);
    539}
    540
    541/*
    542 * vmw_mob_unbind - Hide a mob from the device.
    543 *
    544 * @dev_priv:       Pointer to a device private.
    545 * @mob_id:         Device id of the mob to unbind.
    546 */
    547void vmw_mob_unbind(struct vmw_private *dev_priv,
    548		    struct vmw_mob *mob)
    549{
    550	struct {
    551		SVGA3dCmdHeader header;
    552		SVGA3dCmdDestroyGBMob body;
    553	} *cmd;
    554	int ret;
    555	struct ttm_buffer_object *bo = mob->pt_bo;
    556
    557	if (bo) {
    558		ret = ttm_bo_reserve(bo, false, true, NULL);
    559		/*
    560		 * Noone else should be using this buffer.
    561		 */
    562		BUG_ON(ret != 0);
    563	}
    564
    565	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
    566	if (cmd) {
    567		cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
    568		cmd->header.size = sizeof(cmd->body);
    569		cmd->body.mobid = mob->id;
    570		vmw_cmd_commit(dev_priv, sizeof(*cmd));
    571	}
    572
    573	if (bo) {
    574		vmw_bo_fence_single(bo, NULL);
    575		ttm_bo_unreserve(bo);
    576	}
    577	vmw_fifo_resource_dec(dev_priv);
    578}
    579
    580/*
    581 * vmw_mob_bind - Make a mob visible to the device after first
    582 *                populating it if necessary.
    583 *
    584 * @dev_priv:       Pointer to a device private.
    585 * @mob:            Pointer to the mob we're making visible.
    586 * @data_addr:      Array of DMA addresses to the data pages of the underlying
    587 *                  buffer object.
    588 * @num_data_pages: Number of data pages of the underlying buffer
    589 *                  object.
    590 * @mob_id:         Device id of the mob to bind
    591 *
    592 * This function is intended to be interfaced with the ttm_tt backend
    593 * code.
    594 */
    595int vmw_mob_bind(struct vmw_private *dev_priv,
    596		 struct vmw_mob *mob,
    597		 const struct vmw_sg_table *vsgt,
    598		 unsigned long num_data_pages,
    599		 int32_t mob_id)
    600{
    601	int ret;
    602	bool pt_set_up = false;
    603	struct vmw_piter data_iter;
    604	struct {
    605		SVGA3dCmdHeader header;
    606		SVGA3dCmdDefineGBMob64 body;
    607	} *cmd;
    608
    609	mob->id = mob_id;
    610	vmw_piter_start(&data_iter, vsgt, 0);
    611	if (unlikely(!vmw_piter_next(&data_iter)))
    612		return 0;
    613
    614	if (likely(num_data_pages == 1)) {
    615		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
    616		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
    617	} else if (unlikely(mob->pt_bo == NULL)) {
    618		ret = vmw_mob_pt_populate(dev_priv, mob);
    619		if (unlikely(ret != 0))
    620			return ret;
    621
    622		vmw_mob_pt_setup(mob, data_iter, num_data_pages);
    623		pt_set_up = true;
    624		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PT_1;
    625	}
    626
    627	vmw_fifo_resource_inc(dev_priv);
    628
    629	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
    630	if (unlikely(cmd == NULL))
    631		goto out_no_cmd_space;
    632
    633	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
    634	cmd->header.size = sizeof(cmd->body);
    635	cmd->body.mobid = mob_id;
    636	cmd->body.ptDepth = mob->pt_level;
    637	cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
    638	cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
    639
    640	vmw_cmd_commit(dev_priv, sizeof(*cmd));
    641
    642	return 0;
    643
    644out_no_cmd_space:
    645	vmw_fifo_resource_dec(dev_priv);
    646	if (pt_set_up) {
    647		vmw_bo_unpin_unlocked(mob->pt_bo);
    648		ttm_bo_put(mob->pt_bo);
    649		mob->pt_bo = NULL;
    650	}
    651
    652	return -ENOMEM;
    653}