cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmwgfx_surface.c (60817B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2/**************************************************************************
      3 *
      4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a
      7 * copy of this software and associated documentation files (the
      8 * "Software"), to deal in the Software without restriction, including
      9 * without limitation the rights to use, copy, modify, merge, publish,
     10 * distribute, sub license, and/or sell copies of the Software, and to
     11 * permit persons to whom the Software is furnished to do so, subject to
     12 * the following conditions:
     13 *
     14 * The above copyright notice and this permission notice (including the
     15 * next paragraph) shall be included in all copies or substantial portions
     16 * of the Software.
     17 *
     18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25 *
     26 **************************************************************************/
     27
     28#include <drm/ttm/ttm_placement.h>
     29
     30#include "vmwgfx_drv.h"
     31#include "vmwgfx_resource_priv.h"
     32#include "vmwgfx_so.h"
     33#include "vmwgfx_binding.h"
     34#include "vmw_surface_cache.h"
     35#include "device_include/svga3d_surfacedefs.h"
     36
     37#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
     38#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
     39#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
     40	(svga3d_flags & ((uint64_t)U32_MAX))
     41
     42/**
     43 * struct vmw_user_surface - User-space visible surface resource
     44 *
     45 * @prime:          The TTM prime object.
     46 * @base:           The TTM base object handling user-space visibility.
     47 * @srf:            The surface metadata.
     48 * @master:         Master of the creating client. Used for security check.
     49 */
     50struct vmw_user_surface {
     51	struct ttm_prime_object prime;
     52	struct vmw_surface srf;
     53	struct drm_master *master;
     54};
     55
     56/**
     57 * struct vmw_surface_offset - Backing store mip level offset info
     58 *
     59 * @face:           Surface face.
     60 * @mip:            Mip level.
     61 * @bo_offset:      Offset into backing store of this mip level.
     62 *
     63 */
     64struct vmw_surface_offset {
     65	uint32_t face;
     66	uint32_t mip;
     67	uint32_t bo_offset;
     68};
     69
     70/**
     71 * struct vmw_surface_dirty - Surface dirty-tracker
     72 * @cache: Cached layout information of the surface.
     73 * @num_subres: Number of subresources.
     74 * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
     75 */
     76struct vmw_surface_dirty {
     77	struct vmw_surface_cache cache;
     78	u32 num_subres;
     79	SVGA3dBox boxes[];
     80};
     81
     82static void vmw_user_surface_free(struct vmw_resource *res);
     83static struct vmw_resource *
     84vmw_user_surface_base_to_res(struct ttm_base_object *base);
     85static int vmw_legacy_srf_bind(struct vmw_resource *res,
     86			       struct ttm_validate_buffer *val_buf);
     87static int vmw_legacy_srf_unbind(struct vmw_resource *res,
     88				 bool readback,
     89				 struct ttm_validate_buffer *val_buf);
     90static int vmw_legacy_srf_create(struct vmw_resource *res);
     91static int vmw_legacy_srf_destroy(struct vmw_resource *res);
     92static int vmw_gb_surface_create(struct vmw_resource *res);
     93static int vmw_gb_surface_bind(struct vmw_resource *res,
     94			       struct ttm_validate_buffer *val_buf);
     95static int vmw_gb_surface_unbind(struct vmw_resource *res,
     96				 bool readback,
     97				 struct ttm_validate_buffer *val_buf);
     98static int vmw_gb_surface_destroy(struct vmw_resource *res);
     99static int
    100vmw_gb_surface_define_internal(struct drm_device *dev,
    101			       struct drm_vmw_gb_surface_create_ext_req *req,
    102			       struct drm_vmw_gb_surface_create_rep *rep,
    103			       struct drm_file *file_priv);
    104static int
    105vmw_gb_surface_reference_internal(struct drm_device *dev,
    106				  struct drm_vmw_surface_arg *req,
    107				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
    108				  struct drm_file *file_priv);
    109
    110static void vmw_surface_dirty_free(struct vmw_resource *res);
    111static int vmw_surface_dirty_alloc(struct vmw_resource *res);
    112static int vmw_surface_dirty_sync(struct vmw_resource *res);
    113static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
    114					size_t end);
    115static int vmw_surface_clean(struct vmw_resource *res);
    116
    117static const struct vmw_user_resource_conv user_surface_conv = {
    118	.object_type = VMW_RES_SURFACE,
    119	.base_obj_to_res = vmw_user_surface_base_to_res,
    120	.res_free = vmw_user_surface_free
    121};
    122
    123const struct vmw_user_resource_conv *user_surface_converter =
    124	&user_surface_conv;
    125
    126static const struct vmw_res_func vmw_legacy_surface_func = {
    127	.res_type = vmw_res_surface,
    128	.needs_backup = false,
    129	.may_evict = true,
    130	.prio = 1,
    131	.dirty_prio = 1,
    132	.type_name = "legacy surfaces",
    133	.backup_placement = &vmw_srf_placement,
    134	.create = &vmw_legacy_srf_create,
    135	.destroy = &vmw_legacy_srf_destroy,
    136	.bind = &vmw_legacy_srf_bind,
    137	.unbind = &vmw_legacy_srf_unbind
    138};
    139
    140static const struct vmw_res_func vmw_gb_surface_func = {
    141	.res_type = vmw_res_surface,
    142	.needs_backup = true,
    143	.may_evict = true,
    144	.prio = 1,
    145	.dirty_prio = 2,
    146	.type_name = "guest backed surfaces",
    147	.backup_placement = &vmw_mob_placement,
    148	.create = vmw_gb_surface_create,
    149	.destroy = vmw_gb_surface_destroy,
    150	.bind = vmw_gb_surface_bind,
    151	.unbind = vmw_gb_surface_unbind,
    152	.dirty_alloc = vmw_surface_dirty_alloc,
    153	.dirty_free = vmw_surface_dirty_free,
    154	.dirty_sync = vmw_surface_dirty_sync,
    155	.dirty_range_add = vmw_surface_dirty_range_add,
    156	.clean = vmw_surface_clean,
    157};
    158
    159/*
    160 * struct vmw_surface_dma - SVGA3D DMA command
    161 */
    162struct vmw_surface_dma {
    163	SVGA3dCmdHeader header;
    164	SVGA3dCmdSurfaceDMA body;
    165	SVGA3dCopyBox cb;
    166	SVGA3dCmdSurfaceDMASuffix suffix;
    167};
    168
    169/*
    170 * struct vmw_surface_define - SVGA3D Surface Define command
    171 */
    172struct vmw_surface_define {
    173	SVGA3dCmdHeader header;
    174	SVGA3dCmdDefineSurface body;
    175};
    176
    177/*
    178 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
    179 */
    180struct vmw_surface_destroy {
    181	SVGA3dCmdHeader header;
    182	SVGA3dCmdDestroySurface body;
    183};
    184
    185
    186/**
    187 * vmw_surface_dma_size - Compute fifo size for a dma command.
    188 *
    189 * @srf: Pointer to a struct vmw_surface
    190 *
    191 * Computes the required size for a surface dma command for backup or
    192 * restoration of the surface represented by @srf.
    193 */
    194static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
    195{
    196	return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
    197}
    198
    199
    200/**
    201 * vmw_surface_define_size - Compute fifo size for a surface define command.
    202 *
    203 * @srf: Pointer to a struct vmw_surface
    204 *
    205 * Computes the required size for a surface define command for the definition
    206 * of the surface represented by @srf.
    207 */
    208static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
    209{
    210	return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
    211		sizeof(SVGA3dSize);
    212}
    213
    214
    215/**
    216 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
    217 *
    218 * Computes the required size for a surface destroy command for the destruction
    219 * of a hw surface.
    220 */
    221static inline uint32_t vmw_surface_destroy_size(void)
    222{
    223	return sizeof(struct vmw_surface_destroy);
    224}
    225
    226/**
    227 * vmw_surface_destroy_encode - Encode a surface_destroy command.
    228 *
    229 * @id: The surface id
    230 * @cmd_space: Pointer to memory area in which the commands should be encoded.
    231 */
    232static void vmw_surface_destroy_encode(uint32_t id,
    233				       void *cmd_space)
    234{
    235	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
    236		cmd_space;
    237
    238	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
    239	cmd->header.size = sizeof(cmd->body);
    240	cmd->body.sid = id;
    241}
    242
    243/**
    244 * vmw_surface_define_encode - Encode a surface_define command.
    245 *
    246 * @srf: Pointer to a struct vmw_surface object.
    247 * @cmd_space: Pointer to memory area in which the commands should be encoded.
    248 */
    249static void vmw_surface_define_encode(const struct vmw_surface *srf,
    250				      void *cmd_space)
    251{
    252	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
    253		cmd_space;
    254	struct drm_vmw_size *src_size;
    255	SVGA3dSize *cmd_size;
    256	uint32_t cmd_len;
    257	int i;
    258
    259	cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
    260		sizeof(SVGA3dSize);
    261
    262	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
    263	cmd->header.size = cmd_len;
    264	cmd->body.sid = srf->res.id;
    265	/*
    266	 * Downcast of surfaceFlags, was upcasted when received from user-space,
    267	 * since driver internally stores as 64 bit.
    268	 * For legacy surface define only 32 bit flag is supported.
    269	 */
    270	cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
    271	cmd->body.format = srf->metadata.format;
    272	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
    273		cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
    274
    275	cmd += 1;
    276	cmd_size = (SVGA3dSize *) cmd;
    277	src_size = srf->metadata.sizes;
    278
    279	for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
    280		cmd_size->width = src_size->width;
    281		cmd_size->height = src_size->height;
    282		cmd_size->depth = src_size->depth;
    283	}
    284}
    285
    286/**
    287 * vmw_surface_dma_encode - Encode a surface_dma command.
    288 *
    289 * @srf: Pointer to a struct vmw_surface object.
    290 * @cmd_space: Pointer to memory area in which the commands should be encoded.
    291 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
    292 * should be placed or read from.
    293 * @to_surface: Boolean whether to DMA to the surface or from the surface.
    294 */
    295static void vmw_surface_dma_encode(struct vmw_surface *srf,
    296				   void *cmd_space,
    297				   const SVGAGuestPtr *ptr,
    298				   bool to_surface)
    299{
    300	uint32_t i;
    301	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
    302	const struct SVGA3dSurfaceDesc *desc =
    303		vmw_surface_get_desc(srf->metadata.format);
    304
    305	for (i = 0; i < srf->metadata.num_sizes; ++i) {
    306		SVGA3dCmdHeader *header = &cmd->header;
    307		SVGA3dCmdSurfaceDMA *body = &cmd->body;
    308		SVGA3dCopyBox *cb = &cmd->cb;
    309		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
    310		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
    311		const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
    312
    313		header->id = SVGA_3D_CMD_SURFACE_DMA;
    314		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
    315
    316		body->guest.ptr = *ptr;
    317		body->guest.ptr.offset += cur_offset->bo_offset;
    318		body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size);
    319		body->host.sid = srf->res.id;
    320		body->host.face = cur_offset->face;
    321		body->host.mipmap = cur_offset->mip;
    322		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
    323				  SVGA3D_READ_HOST_VRAM);
    324		cb->x = 0;
    325		cb->y = 0;
    326		cb->z = 0;
    327		cb->srcx = 0;
    328		cb->srcy = 0;
    329		cb->srcz = 0;
    330		cb->w = cur_size->width;
    331		cb->h = cur_size->height;
    332		cb->d = cur_size->depth;
    333
    334		suffix->suffixSize = sizeof(*suffix);
    335		suffix->maximumOffset =
    336			vmw_surface_get_image_buffer_size(desc, cur_size,
    337							    body->guest.pitch);
    338		suffix->flags.discard = 0;
    339		suffix->flags.unsynchronized = 0;
    340		suffix->flags.reserved = 0;
    341		++cmd;
    342	}
    343};
    344
    345
    346/**
    347 * vmw_hw_surface_destroy - destroy a Device surface
    348 *
    349 * @res:        Pointer to a struct vmw_resource embedded in a struct
    350 *              vmw_surface.
    351 *
    352 * Destroys a the device surface associated with a struct vmw_surface if
    353 * any, and adjusts resource count accordingly.
    354 */
    355static void vmw_hw_surface_destroy(struct vmw_resource *res)
    356{
    357
    358	struct vmw_private *dev_priv = res->dev_priv;
    359	void *cmd;
    360
    361	if (res->func->destroy == vmw_gb_surface_destroy) {
    362		(void) vmw_gb_surface_destroy(res);
    363		return;
    364	}
    365
    366	if (res->id != -1) {
    367
    368		cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
    369		if (unlikely(!cmd))
    370			return;
    371
    372		vmw_surface_destroy_encode(res->id, cmd);
    373		vmw_cmd_commit(dev_priv, vmw_surface_destroy_size());
    374
    375		/*
    376		 * used_memory_size_atomic, or separate lock
    377		 * to avoid taking dev_priv::cmdbuf_mutex in
    378		 * the destroy path.
    379		 */
    380
    381		mutex_lock(&dev_priv->cmdbuf_mutex);
    382		dev_priv->used_memory_size -= res->backup_size;
    383		mutex_unlock(&dev_priv->cmdbuf_mutex);
    384	}
    385}
    386
    387/**
    388 * vmw_legacy_srf_create - Create a device surface as part of the
    389 * resource validation process.
    390 *
    391 * @res: Pointer to a struct vmw_surface.
    392 *
    393 * If the surface doesn't have a hw id.
    394 *
    395 * Returns -EBUSY if there wasn't sufficient device resources to
    396 * complete the validation. Retry after freeing up resources.
    397 *
    398 * May return other errors if the kernel is out of guest resources.
    399 */
    400static int vmw_legacy_srf_create(struct vmw_resource *res)
    401{
    402	struct vmw_private *dev_priv = res->dev_priv;
    403	struct vmw_surface *srf;
    404	uint32_t submit_size;
    405	uint8_t *cmd;
    406	int ret;
    407
    408	if (likely(res->id != -1))
    409		return 0;
    410
    411	srf = vmw_res_to_srf(res);
    412	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
    413		     dev_priv->memory_size))
    414		return -EBUSY;
    415
    416	/*
    417	 * Alloc id for the resource.
    418	 */
    419
    420	ret = vmw_resource_alloc_id(res);
    421	if (unlikely(ret != 0)) {
    422		DRM_ERROR("Failed to allocate a surface id.\n");
    423		goto out_no_id;
    424	}
    425
    426	if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) {
    427		ret = -EBUSY;
    428		goto out_no_fifo;
    429	}
    430
    431	/*
    432	 * Encode surface define- commands.
    433	 */
    434
    435	submit_size = vmw_surface_define_size(srf);
    436	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
    437	if (unlikely(!cmd)) {
    438		ret = -ENOMEM;
    439		goto out_no_fifo;
    440	}
    441
    442	vmw_surface_define_encode(srf, cmd);
    443	vmw_cmd_commit(dev_priv, submit_size);
    444	vmw_fifo_resource_inc(dev_priv);
    445
    446	/*
    447	 * Surface memory usage accounting.
    448	 */
    449
    450	dev_priv->used_memory_size += res->backup_size;
    451	return 0;
    452
    453out_no_fifo:
    454	vmw_resource_release_id(res);
    455out_no_id:
    456	return ret;
    457}
    458
    459/**
    460 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
    461 *
    462 * @res:            Pointer to a struct vmw_res embedded in a struct
    463 *                  vmw_surface.
    464 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
    465 *                  information about the backup buffer.
    466 * @bind:           Boolean wether to DMA to the surface.
    467 *
    468 * Transfer backup data to or from a legacy surface as part of the
    469 * validation process.
    470 * May return other errors if the kernel is out of guest resources.
    471 * The backup buffer will be fenced or idle upon successful completion,
    472 * and if the surface needs persistent backup storage, the backup buffer
    473 * will also be returned reserved iff @bind is true.
    474 */
    475static int vmw_legacy_srf_dma(struct vmw_resource *res,
    476			      struct ttm_validate_buffer *val_buf,
    477			      bool bind)
    478{
    479	SVGAGuestPtr ptr;
    480	struct vmw_fence_obj *fence;
    481	uint32_t submit_size;
    482	struct vmw_surface *srf = vmw_res_to_srf(res);
    483	uint8_t *cmd;
    484	struct vmw_private *dev_priv = res->dev_priv;
    485
    486	BUG_ON(!val_buf->bo);
    487	submit_size = vmw_surface_dma_size(srf);
    488	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
    489	if (unlikely(!cmd))
    490		return -ENOMEM;
    491
    492	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
    493	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
    494
    495	vmw_cmd_commit(dev_priv, submit_size);
    496
    497	/*
    498	 * Create a fence object and fence the backup buffer.
    499	 */
    500
    501	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
    502					  &fence, NULL);
    503
    504	vmw_bo_fence_single(val_buf->bo, fence);
    505
    506	if (likely(fence != NULL))
    507		vmw_fence_obj_unreference(&fence);
    508
    509	return 0;
    510}
    511
    512/**
    513 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
    514 *                       surface validation process.
    515 *
    516 * @res:            Pointer to a struct vmw_res embedded in a struct
    517 *                  vmw_surface.
    518 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
    519 *                  information about the backup buffer.
    520 *
    521 * This function will copy backup data to the surface if the
    522 * backup buffer is dirty.
    523 */
    524static int vmw_legacy_srf_bind(struct vmw_resource *res,
    525			       struct ttm_validate_buffer *val_buf)
    526{
    527	if (!res->backup_dirty)
    528		return 0;
    529
    530	return vmw_legacy_srf_dma(res, val_buf, true);
    531}
    532
    533
    534/**
    535 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
    536 *                         surface eviction process.
    537 *
    538 * @res:            Pointer to a struct vmw_res embedded in a struct
    539 *                  vmw_surface.
    540 * @readback:       Readback - only true if dirty
    541 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
    542 *                  information about the backup buffer.
    543 *
    544 * This function will copy backup data from the surface.
    545 */
    546static int vmw_legacy_srf_unbind(struct vmw_resource *res,
    547				 bool readback,
    548				 struct ttm_validate_buffer *val_buf)
    549{
    550	if (unlikely(readback))
    551		return vmw_legacy_srf_dma(res, val_buf, false);
    552	return 0;
    553}
    554
    555/**
    556 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
    557 *                          resource eviction process.
    558 *
    559 * @res:            Pointer to a struct vmw_res embedded in a struct
    560 *                  vmw_surface.
    561 */
    562static int vmw_legacy_srf_destroy(struct vmw_resource *res)
    563{
    564	struct vmw_private *dev_priv = res->dev_priv;
    565	uint32_t submit_size;
    566	uint8_t *cmd;
    567
    568	BUG_ON(res->id == -1);
    569
    570	/*
    571	 * Encode the dma- and surface destroy commands.
    572	 */
    573
    574	submit_size = vmw_surface_destroy_size();
    575	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
    576	if (unlikely(!cmd))
    577		return -ENOMEM;
    578
    579	vmw_surface_destroy_encode(res->id, cmd);
    580	vmw_cmd_commit(dev_priv, submit_size);
    581
    582	/*
    583	 * Surface memory usage accounting.
    584	 */
    585
    586	dev_priv->used_memory_size -= res->backup_size;
    587
    588	/*
    589	 * Release the surface ID.
    590	 */
    591
    592	vmw_resource_release_id(res);
    593	vmw_fifo_resource_dec(dev_priv);
    594
    595	return 0;
    596}
    597
    598
    599/**
    600 * vmw_surface_init - initialize a struct vmw_surface
    601 *
    602 * @dev_priv:       Pointer to a device private struct.
    603 * @srf:            Pointer to the struct vmw_surface to initialize.
    604 * @res_free:       Pointer to a resource destructor used to free
    605 *                  the object.
    606 */
    607static int vmw_surface_init(struct vmw_private *dev_priv,
    608			    struct vmw_surface *srf,
    609			    void (*res_free) (struct vmw_resource *res))
    610{
    611	int ret;
    612	struct vmw_resource *res = &srf->res;
    613
    614	BUG_ON(!res_free);
    615	ret = vmw_resource_init(dev_priv, res, true, res_free,
    616				(dev_priv->has_mob) ? &vmw_gb_surface_func :
    617				&vmw_legacy_surface_func);
    618
    619	if (unlikely(ret != 0)) {
    620		res_free(res);
    621		return ret;
    622	}
    623
    624	/*
    625	 * The surface won't be visible to hardware until a
    626	 * surface validate.
    627	 */
    628
    629	INIT_LIST_HEAD(&srf->view_list);
    630	res->hw_destroy = vmw_hw_surface_destroy;
    631	return ret;
    632}
    633
    634/**
    635 * vmw_user_surface_base_to_res - TTM base object to resource converter for
    636 *                                user visible surfaces
    637 *
    638 * @base:           Pointer to a TTM base object
    639 *
    640 * Returns the struct vmw_resource embedded in a struct vmw_surface
    641 * for the user-visible object identified by the TTM base object @base.
    642 */
    643static struct vmw_resource *
    644vmw_user_surface_base_to_res(struct ttm_base_object *base)
    645{
    646	return &(container_of(base, struct vmw_user_surface,
    647			      prime.base)->srf.res);
    648}
    649
    650/**
    651 * vmw_user_surface_free - User visible surface resource destructor
    652 *
    653 * @res:            A struct vmw_resource embedded in a struct vmw_surface.
    654 */
    655static void vmw_user_surface_free(struct vmw_resource *res)
    656{
    657	struct vmw_surface *srf = vmw_res_to_srf(res);
    658	struct vmw_user_surface *user_srf =
    659	    container_of(srf, struct vmw_user_surface, srf);
    660
    661	WARN_ON_ONCE(res->dirty);
    662	if (user_srf->master)
    663		drm_master_put(&user_srf->master);
    664	kfree(srf->offsets);
    665	kfree(srf->metadata.sizes);
    666	kfree(srf->snooper.image);
    667	ttm_prime_object_kfree(user_srf, prime);
    668}
    669
    670/**
    671 * vmw_user_surface_base_release - User visible surface TTM base object destructor
    672 *
    673 * @p_base:         Pointer to a pointer to a TTM base object
    674 *                  embedded in a struct vmw_user_surface.
    675 *
    676 * Drops the base object's reference on its resource, and the
    677 * pointer pointed to by *p_base is set to NULL.
    678 */
    679static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
    680{
    681	struct ttm_base_object *base = *p_base;
    682	struct vmw_user_surface *user_srf =
    683	    container_of(base, struct vmw_user_surface, prime.base);
    684	struct vmw_resource *res = &user_srf->srf.res;
    685
    686	if (base->shareable && res && res->backup)
    687		drm_gem_object_put(&res->backup->base.base);
    688
    689	*p_base = NULL;
    690	vmw_resource_unreference(&res);
    691}
    692
    693/**
    694 * vmw_surface_destroy_ioctl - Ioctl function implementing
    695 *                                  the user surface destroy functionality.
    696 *
    697 * @dev:            Pointer to a struct drm_device.
    698 * @data:           Pointer to data copied from / to user-space.
    699 * @file_priv:      Pointer to a drm file private structure.
    700 */
    701int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
    702			      struct drm_file *file_priv)
    703{
    704	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
    705	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    706
    707	return ttm_ref_object_base_unref(tfile, arg->sid);
    708}
    709
    710/**
    711 * vmw_surface_define_ioctl - Ioctl function implementing
    712 *                                  the user surface define functionality.
    713 *
    714 * @dev:            Pointer to a struct drm_device.
    715 * @data:           Pointer to data copied from / to user-space.
    716 * @file_priv:      Pointer to a drm file private structure.
    717 */
    718int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
    719			     struct drm_file *file_priv)
    720{
    721	struct vmw_private *dev_priv = vmw_priv(dev);
    722	struct vmw_user_surface *user_srf;
    723	struct vmw_surface *srf;
    724	struct vmw_surface_metadata *metadata;
    725	struct vmw_resource *res;
    726	struct vmw_resource *tmp;
    727	union drm_vmw_surface_create_arg *arg =
    728	    (union drm_vmw_surface_create_arg *)data;
    729	struct drm_vmw_surface_create_req *req = &arg->req;
    730	struct drm_vmw_surface_arg *rep = &arg->rep;
    731	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    732	int ret;
    733	int i, j;
    734	uint32_t cur_bo_offset;
    735	struct drm_vmw_size *cur_size;
    736	struct vmw_surface_offset *cur_offset;
    737	uint32_t num_sizes;
    738	const SVGA3dSurfaceDesc *desc;
    739
    740	num_sizes = 0;
    741	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
    742		if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
    743			return -EINVAL;
    744		num_sizes += req->mip_levels[i];
    745	}
    746
    747	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
    748	    num_sizes == 0)
    749		return -EINVAL;
    750
    751	desc = vmw_surface_get_desc(req->format);
    752	if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
    753		VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
    754			       req->format);
    755		return -EINVAL;
    756	}
    757
    758	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
    759	if (unlikely(!user_srf)) {
    760		ret = -ENOMEM;
    761		goto out_unlock;
    762	}
    763
    764	srf = &user_srf->srf;
    765	metadata = &srf->metadata;
    766	res = &srf->res;
    767
    768	/* Driver internally stores as 64-bit flags */
    769	metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
    770	metadata->format = req->format;
    771	metadata->scanout = req->scanout;
    772
    773	memcpy(metadata->mip_levels, req->mip_levels,
    774	       sizeof(metadata->mip_levels));
    775	metadata->num_sizes = num_sizes;
    776	metadata->sizes =
    777		memdup_user((struct drm_vmw_size __user *)(unsigned long)
    778			    req->size_addr,
    779			    sizeof(*metadata->sizes) * metadata->num_sizes);
    780	if (IS_ERR(metadata->sizes)) {
    781		ret = PTR_ERR(metadata->sizes);
    782		goto out_no_sizes;
    783	}
    784	srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
    785				     GFP_KERNEL);
    786	if (unlikely(!srf->offsets)) {
    787		ret = -ENOMEM;
    788		goto out_no_offsets;
    789	}
    790
    791	metadata->base_size = *srf->metadata.sizes;
    792	metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
    793	metadata->multisample_count = 0;
    794	metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
    795	metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
    796
    797	cur_bo_offset = 0;
    798	cur_offset = srf->offsets;
    799	cur_size = metadata->sizes;
    800
    801	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
    802		for (j = 0; j < metadata->mip_levels[i]; ++j) {
    803			uint32_t stride = vmw_surface_calculate_pitch(
    804						  desc, cur_size);
    805
    806			cur_offset->face = i;
    807			cur_offset->mip = j;
    808			cur_offset->bo_offset = cur_bo_offset;
    809			cur_bo_offset += vmw_surface_get_image_buffer_size
    810				(desc, cur_size, stride);
    811			++cur_offset;
    812			++cur_size;
    813		}
    814	}
    815	res->backup_size = cur_bo_offset;
    816	if (metadata->scanout &&
    817	    metadata->num_sizes == 1 &&
    818	    metadata->sizes[0].width == 64 &&
    819	    metadata->sizes[0].height == 64 &&
    820	    metadata->format == SVGA3D_A8R8G8B8) {
    821
    822		srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
    823		if (!srf->snooper.image) {
    824			DRM_ERROR("Failed to allocate cursor_image\n");
    825			ret = -ENOMEM;
    826			goto out_no_copy;
    827		}
    828	} else {
    829		srf->snooper.image = NULL;
    830	}
    831
    832	user_srf->prime.base.shareable = false;
    833	user_srf->prime.base.tfile = NULL;
    834	if (drm_is_primary_client(file_priv))
    835		user_srf->master = drm_file_get_master(file_priv);
    836
    837	/**
    838	 * From this point, the generic resource management functions
    839	 * destroy the object on failure.
    840	 */
    841
    842	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
    843	if (unlikely(ret != 0))
    844		goto out_unlock;
    845
    846	/*
    847	 * A gb-aware client referencing a shared surface will
    848	 * expect a backup buffer to be present.
    849	 */
    850	if (dev_priv->has_mob && req->shareable) {
    851		uint32_t backup_handle;
    852
    853		ret = vmw_gem_object_create_with_handle(dev_priv,
    854							file_priv,
    855							res->backup_size,
    856							&backup_handle,
    857							&res->backup);
    858		if (unlikely(ret != 0)) {
    859			vmw_resource_unreference(&res);
    860			goto out_unlock;
    861		}
    862		vmw_bo_reference(res->backup);
    863		drm_gem_object_get(&res->backup->base.base);
    864	}
    865
    866	tmp = vmw_resource_reference(&srf->res);
    867	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
    868				    req->shareable, VMW_RES_SURFACE,
    869				    &vmw_user_surface_base_release);
    870
    871	if (unlikely(ret != 0)) {
    872		vmw_resource_unreference(&tmp);
    873		vmw_resource_unreference(&res);
    874		goto out_unlock;
    875	}
    876
    877	rep->sid = user_srf->prime.base.handle;
    878	vmw_resource_unreference(&res);
    879
    880	return 0;
    881out_no_copy:
    882	kfree(srf->offsets);
    883out_no_offsets:
    884	kfree(metadata->sizes);
    885out_no_sizes:
    886	ttm_prime_object_kfree(user_srf, prime);
    887out_unlock:
    888	return ret;
    889}
    890
    891
    892static int
    893vmw_surface_handle_reference(struct vmw_private *dev_priv,
    894			     struct drm_file *file_priv,
    895			     uint32_t u_handle,
    896			     enum drm_vmw_handle_type handle_type,
    897			     struct ttm_base_object **base_p)
    898{
    899	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    900	struct vmw_user_surface *user_srf;
    901	uint32_t handle;
    902	struct ttm_base_object *base;
    903	int ret;
    904
    905	if (handle_type == DRM_VMW_HANDLE_PRIME) {
    906		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
    907		if (unlikely(ret != 0))
    908			return ret;
    909	} else {
    910		handle = u_handle;
    911	}
    912
    913	ret = -EINVAL;
    914	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
    915	if (unlikely(!base)) {
    916		VMW_DEBUG_USER("Could not find surface to reference.\n");
    917		goto out_no_lookup;
    918	}
    919
    920	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
    921		VMW_DEBUG_USER("Referenced object is not a surface.\n");
    922		goto out_bad_resource;
    923	}
    924	if (handle_type != DRM_VMW_HANDLE_PRIME) {
    925		bool require_exist = false;
    926
    927		user_srf = container_of(base, struct vmw_user_surface,
    928					prime.base);
    929
    930		/* Error out if we are unauthenticated primary */
    931		if (drm_is_primary_client(file_priv) &&
    932		    !file_priv->authenticated) {
    933			ret = -EACCES;
    934			goto out_bad_resource;
    935		}
    936
    937		/*
    938		 * Make sure the surface creator has the same
    939		 * authenticating master, or is already registered with us.
    940		 */
    941		if (drm_is_primary_client(file_priv) &&
    942		    user_srf->master != file_priv->master)
    943			require_exist = true;
    944
    945		if (unlikely(drm_is_render_client(file_priv)))
    946			require_exist = true;
    947
    948		ret = ttm_ref_object_add(tfile, base, NULL, require_exist);
    949		if (unlikely(ret != 0)) {
    950			DRM_ERROR("Could not add a reference to a surface.\n");
    951			goto out_bad_resource;
    952		}
    953	}
    954
    955	*base_p = base;
    956	return 0;
    957
    958out_bad_resource:
    959	ttm_base_object_unref(&base);
    960out_no_lookup:
    961	if (handle_type == DRM_VMW_HANDLE_PRIME)
    962		(void) ttm_ref_object_base_unref(tfile, handle);
    963
    964	return ret;
    965}
    966
    967/**
    968 * vmw_surface_reference_ioctl - Ioctl function implementing
    969 *                                  the user surface reference functionality.
    970 *
    971 * @dev:            Pointer to a struct drm_device.
    972 * @data:           Pointer to data copied from / to user-space.
    973 * @file_priv:      Pointer to a drm file private structure.
    974 */
    975int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
    976				struct drm_file *file_priv)
    977{
    978	struct vmw_private *dev_priv = vmw_priv(dev);
    979	union drm_vmw_surface_reference_arg *arg =
    980	    (union drm_vmw_surface_reference_arg *)data;
    981	struct drm_vmw_surface_arg *req = &arg->req;
    982	struct drm_vmw_surface_create_req *rep = &arg->rep;
    983	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    984	struct vmw_surface *srf;
    985	struct vmw_user_surface *user_srf;
    986	struct drm_vmw_size __user *user_sizes;
    987	struct ttm_base_object *base;
    988	int ret;
    989
    990	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
    991					   req->handle_type, &base);
    992	if (unlikely(ret != 0))
    993		return ret;
    994
    995	user_srf = container_of(base, struct vmw_user_surface, prime.base);
    996	srf = &user_srf->srf;
    997
    998	/* Downcast of flags when sending back to user space */
    999	rep->flags = (uint32_t)srf->metadata.flags;
   1000	rep->format = srf->metadata.format;
   1001	memcpy(rep->mip_levels, srf->metadata.mip_levels,
   1002	       sizeof(srf->metadata.mip_levels));
   1003	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
   1004	    rep->size_addr;
   1005
   1006	if (user_sizes)
   1007		ret = copy_to_user(user_sizes, &srf->metadata.base_size,
   1008				   sizeof(srf->metadata.base_size));
   1009	if (unlikely(ret != 0)) {
   1010		VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
   1011			       srf->metadata.num_sizes);
   1012		ttm_ref_object_base_unref(tfile, base->handle);
   1013		ret = -EFAULT;
   1014	}
   1015
   1016	ttm_base_object_unref(&base);
   1017
   1018	return ret;
   1019}
   1020
   1021/**
   1022 * vmw_gb_surface_create - Encode a surface_define command.
   1023 *
   1024 * @res:        Pointer to a struct vmw_resource embedded in a struct
   1025 *              vmw_surface.
   1026 */
   1027static int vmw_gb_surface_create(struct vmw_resource *res)
   1028{
   1029	struct vmw_private *dev_priv = res->dev_priv;
   1030	struct vmw_surface *srf = vmw_res_to_srf(res);
   1031	struct vmw_surface_metadata *metadata = &srf->metadata;
   1032	uint32_t cmd_len, cmd_id, submit_len;
   1033	int ret;
   1034	struct {
   1035		SVGA3dCmdHeader header;
   1036		SVGA3dCmdDefineGBSurface body;
   1037	} *cmd;
   1038	struct {
   1039		SVGA3dCmdHeader header;
   1040		SVGA3dCmdDefineGBSurface_v2 body;
   1041	} *cmd2;
   1042	struct {
   1043		SVGA3dCmdHeader header;
   1044		SVGA3dCmdDefineGBSurface_v3 body;
   1045	} *cmd3;
   1046	struct {
   1047		SVGA3dCmdHeader header;
   1048		SVGA3dCmdDefineGBSurface_v4 body;
   1049	} *cmd4;
   1050
   1051	if (likely(res->id != -1))
   1052		return 0;
   1053
   1054	vmw_fifo_resource_inc(dev_priv);
   1055	ret = vmw_resource_alloc_id(res);
   1056	if (unlikely(ret != 0)) {
   1057		DRM_ERROR("Failed to allocate a surface id.\n");
   1058		goto out_no_id;
   1059	}
   1060
   1061	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
   1062		ret = -EBUSY;
   1063		goto out_no_fifo;
   1064	}
   1065
   1066	if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
   1067		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
   1068		cmd_len = sizeof(cmd4->body);
   1069		submit_len = sizeof(*cmd4);
   1070	} else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
   1071		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
   1072		cmd_len = sizeof(cmd3->body);
   1073		submit_len = sizeof(*cmd3);
   1074	} else if (metadata->array_size > 0) {
   1075		/* VMW_SM_4 support verified at creation time. */
   1076		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
   1077		cmd_len = sizeof(cmd2->body);
   1078		submit_len = sizeof(*cmd2);
   1079	} else {
   1080		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
   1081		cmd_len = sizeof(cmd->body);
   1082		submit_len = sizeof(*cmd);
   1083	}
   1084
   1085	cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
   1086	cmd2 = (typeof(cmd2))cmd;
   1087	cmd3 = (typeof(cmd3))cmd;
   1088	cmd4 = (typeof(cmd4))cmd;
   1089	if (unlikely(!cmd)) {
   1090		ret = -ENOMEM;
   1091		goto out_no_fifo;
   1092	}
   1093
   1094	if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
   1095		cmd4->header.id = cmd_id;
   1096		cmd4->header.size = cmd_len;
   1097		cmd4->body.sid = srf->res.id;
   1098		cmd4->body.surfaceFlags = metadata->flags;
   1099		cmd4->body.format = metadata->format;
   1100		cmd4->body.numMipLevels = metadata->mip_levels[0];
   1101		cmd4->body.multisampleCount = metadata->multisample_count;
   1102		cmd4->body.multisamplePattern = metadata->multisample_pattern;
   1103		cmd4->body.qualityLevel = metadata->quality_level;
   1104		cmd4->body.autogenFilter = metadata->autogen_filter;
   1105		cmd4->body.size.width = metadata->base_size.width;
   1106		cmd4->body.size.height = metadata->base_size.height;
   1107		cmd4->body.size.depth = metadata->base_size.depth;
   1108		cmd4->body.arraySize = metadata->array_size;
   1109		cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
   1110	} else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
   1111		cmd3->header.id = cmd_id;
   1112		cmd3->header.size = cmd_len;
   1113		cmd3->body.sid = srf->res.id;
   1114		cmd3->body.surfaceFlags = metadata->flags;
   1115		cmd3->body.format = metadata->format;
   1116		cmd3->body.numMipLevels = metadata->mip_levels[0];
   1117		cmd3->body.multisampleCount = metadata->multisample_count;
   1118		cmd3->body.multisamplePattern = metadata->multisample_pattern;
   1119		cmd3->body.qualityLevel = metadata->quality_level;
   1120		cmd3->body.autogenFilter = metadata->autogen_filter;
   1121		cmd3->body.size.width = metadata->base_size.width;
   1122		cmd3->body.size.height = metadata->base_size.height;
   1123		cmd3->body.size.depth = metadata->base_size.depth;
   1124		cmd3->body.arraySize = metadata->array_size;
   1125	} else if (metadata->array_size > 0) {
   1126		cmd2->header.id = cmd_id;
   1127		cmd2->header.size = cmd_len;
   1128		cmd2->body.sid = srf->res.id;
   1129		cmd2->body.surfaceFlags = metadata->flags;
   1130		cmd2->body.format = metadata->format;
   1131		cmd2->body.numMipLevels = metadata->mip_levels[0];
   1132		cmd2->body.multisampleCount = metadata->multisample_count;
   1133		cmd2->body.autogenFilter = metadata->autogen_filter;
   1134		cmd2->body.size.width = metadata->base_size.width;
   1135		cmd2->body.size.height = metadata->base_size.height;
   1136		cmd2->body.size.depth = metadata->base_size.depth;
   1137		cmd2->body.arraySize = metadata->array_size;
   1138	} else {
   1139		cmd->header.id = cmd_id;
   1140		cmd->header.size = cmd_len;
   1141		cmd->body.sid = srf->res.id;
   1142		cmd->body.surfaceFlags = metadata->flags;
   1143		cmd->body.format = metadata->format;
   1144		cmd->body.numMipLevels = metadata->mip_levels[0];
   1145		cmd->body.multisampleCount = metadata->multisample_count;
   1146		cmd->body.autogenFilter = metadata->autogen_filter;
   1147		cmd->body.size.width = metadata->base_size.width;
   1148		cmd->body.size.height = metadata->base_size.height;
   1149		cmd->body.size.depth = metadata->base_size.depth;
   1150	}
   1151
   1152	vmw_cmd_commit(dev_priv, submit_len);
   1153
   1154	return 0;
   1155
   1156out_no_fifo:
   1157	vmw_resource_release_id(res);
   1158out_no_id:
   1159	vmw_fifo_resource_dec(dev_priv);
   1160	return ret;
   1161}
   1162
   1163
   1164static int vmw_gb_surface_bind(struct vmw_resource *res,
   1165			       struct ttm_validate_buffer *val_buf)
   1166{
   1167	struct vmw_private *dev_priv = res->dev_priv;
   1168	struct {
   1169		SVGA3dCmdHeader header;
   1170		SVGA3dCmdBindGBSurface body;
   1171	} *cmd1;
   1172	struct {
   1173		SVGA3dCmdHeader header;
   1174		SVGA3dCmdUpdateGBSurface body;
   1175	} *cmd2;
   1176	uint32_t submit_size;
   1177	struct ttm_buffer_object *bo = val_buf->bo;
   1178
   1179	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
   1180
   1181	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
   1182
   1183	cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
   1184	if (unlikely(!cmd1))
   1185		return -ENOMEM;
   1186
   1187	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
   1188	cmd1->header.size = sizeof(cmd1->body);
   1189	cmd1->body.sid = res->id;
   1190	cmd1->body.mobid = bo->resource->start;
   1191	if (res->backup_dirty) {
   1192		cmd2 = (void *) &cmd1[1];
   1193		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
   1194		cmd2->header.size = sizeof(cmd2->body);
   1195		cmd2->body.sid = res->id;
   1196	}
   1197	vmw_cmd_commit(dev_priv, submit_size);
   1198
   1199	if (res->backup->dirty && res->backup_dirty) {
   1200		/* We've just made a full upload. Cear dirty regions. */
   1201		vmw_bo_dirty_clear_res(res);
   1202	}
   1203
   1204	res->backup_dirty = false;
   1205
   1206	return 0;
   1207}
   1208
   1209static int vmw_gb_surface_unbind(struct vmw_resource *res,
   1210				 bool readback,
   1211				 struct ttm_validate_buffer *val_buf)
   1212{
   1213	struct vmw_private *dev_priv = res->dev_priv;
   1214	struct ttm_buffer_object *bo = val_buf->bo;
   1215	struct vmw_fence_obj *fence;
   1216
   1217	struct {
   1218		SVGA3dCmdHeader header;
   1219		SVGA3dCmdReadbackGBSurface body;
   1220	} *cmd1;
   1221	struct {
   1222		SVGA3dCmdHeader header;
   1223		SVGA3dCmdInvalidateGBSurface body;
   1224	} *cmd2;
   1225	struct {
   1226		SVGA3dCmdHeader header;
   1227		SVGA3dCmdBindGBSurface body;
   1228	} *cmd3;
   1229	uint32_t submit_size;
   1230	uint8_t *cmd;
   1231
   1232
   1233	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
   1234
   1235	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
   1236	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
   1237	if (unlikely(!cmd))
   1238		return -ENOMEM;
   1239
   1240	if (readback) {
   1241		cmd1 = (void *) cmd;
   1242		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
   1243		cmd1->header.size = sizeof(cmd1->body);
   1244		cmd1->body.sid = res->id;
   1245		cmd3 = (void *) &cmd1[1];
   1246	} else {
   1247		cmd2 = (void *) cmd;
   1248		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
   1249		cmd2->header.size = sizeof(cmd2->body);
   1250		cmd2->body.sid = res->id;
   1251		cmd3 = (void *) &cmd2[1];
   1252	}
   1253
   1254	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
   1255	cmd3->header.size = sizeof(cmd3->body);
   1256	cmd3->body.sid = res->id;
   1257	cmd3->body.mobid = SVGA3D_INVALID_ID;
   1258
   1259	vmw_cmd_commit(dev_priv, submit_size);
   1260
   1261	/*
   1262	 * Create a fence object and fence the backup buffer.
   1263	 */
   1264
   1265	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
   1266					  &fence, NULL);
   1267
   1268	vmw_bo_fence_single(val_buf->bo, fence);
   1269
   1270	if (likely(fence != NULL))
   1271		vmw_fence_obj_unreference(&fence);
   1272
   1273	return 0;
   1274}
   1275
   1276static int vmw_gb_surface_destroy(struct vmw_resource *res)
   1277{
   1278	struct vmw_private *dev_priv = res->dev_priv;
   1279	struct vmw_surface *srf = vmw_res_to_srf(res);
   1280	struct {
   1281		SVGA3dCmdHeader header;
   1282		SVGA3dCmdDestroyGBSurface body;
   1283	} *cmd;
   1284
   1285	if (likely(res->id == -1))
   1286		return 0;
   1287
   1288	mutex_lock(&dev_priv->binding_mutex);
   1289	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
   1290	vmw_binding_res_list_scrub(&res->binding_head);
   1291
   1292	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
   1293	if (unlikely(!cmd)) {
   1294		mutex_unlock(&dev_priv->binding_mutex);
   1295		return -ENOMEM;
   1296	}
   1297
   1298	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
   1299	cmd->header.size = sizeof(cmd->body);
   1300	cmd->body.sid = res->id;
   1301	vmw_cmd_commit(dev_priv, sizeof(*cmd));
   1302	mutex_unlock(&dev_priv->binding_mutex);
   1303	vmw_resource_release_id(res);
   1304	vmw_fifo_resource_dec(dev_priv);
   1305
   1306	return 0;
   1307}
   1308
   1309/**
   1310 * vmw_gb_surface_define_ioctl - Ioctl function implementing
   1311 * the user surface define functionality.
   1312 *
   1313 * @dev: Pointer to a struct drm_device.
   1314 * @data: Pointer to data copied from / to user-space.
   1315 * @file_priv: Pointer to a drm file private structure.
   1316 */
   1317int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
   1318				struct drm_file *file_priv)
   1319{
   1320	union drm_vmw_gb_surface_create_arg *arg =
   1321	    (union drm_vmw_gb_surface_create_arg *)data;
   1322	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
   1323	struct drm_vmw_gb_surface_create_ext_req req_ext;
   1324
   1325	req_ext.base = arg->req;
   1326	req_ext.version = drm_vmw_gb_surface_v1;
   1327	req_ext.svga3d_flags_upper_32_bits = 0;
   1328	req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
   1329	req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
   1330	req_ext.buffer_byte_stride = 0;
   1331	req_ext.must_be_zero = 0;
   1332
   1333	return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
   1334}
   1335
   1336/**
   1337 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
   1338 * the user surface reference functionality.
   1339 *
   1340 * @dev: Pointer to a struct drm_device.
   1341 * @data: Pointer to data copied from / to user-space.
   1342 * @file_priv: Pointer to a drm file private structure.
   1343 */
   1344int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
   1345				   struct drm_file *file_priv)
   1346{
   1347	union drm_vmw_gb_surface_reference_arg *arg =
   1348	    (union drm_vmw_gb_surface_reference_arg *)data;
   1349	struct drm_vmw_surface_arg *req = &arg->req;
   1350	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
   1351	struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
   1352	int ret;
   1353
   1354	ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
   1355
   1356	if (unlikely(ret != 0))
   1357		return ret;
   1358
   1359	rep->creq = rep_ext.creq.base;
   1360	rep->crep = rep_ext.crep;
   1361
   1362	return ret;
   1363}
   1364
   1365/**
   1366 * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
   1367 * the user surface define functionality.
   1368 *
   1369 * @dev: Pointer to a struct drm_device.
   1370 * @data: Pointer to data copied from / to user-space.
   1371 * @file_priv: Pointer to a drm file private structure.
   1372 */
   1373int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
   1374				struct drm_file *file_priv)
   1375{
   1376	union drm_vmw_gb_surface_create_ext_arg *arg =
   1377	    (union drm_vmw_gb_surface_create_ext_arg *)data;
   1378	struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
   1379	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
   1380
   1381	return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
   1382}
   1383
   1384/**
   1385 * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
   1386 * the user surface reference functionality.
   1387 *
   1388 * @dev: Pointer to a struct drm_device.
   1389 * @data: Pointer to data copied from / to user-space.
   1390 * @file_priv: Pointer to a drm file private structure.
   1391 */
   1392int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
   1393				   struct drm_file *file_priv)
   1394{
   1395	union drm_vmw_gb_surface_reference_ext_arg *arg =
   1396	    (union drm_vmw_gb_surface_reference_ext_arg *)data;
   1397	struct drm_vmw_surface_arg *req = &arg->req;
   1398	struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
   1399
   1400	return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
   1401}
   1402
   1403/**
   1404 * vmw_gb_surface_define_internal - Ioctl function implementing
   1405 * the user surface define functionality.
   1406 *
   1407 * @dev: Pointer to a struct drm_device.
   1408 * @req: Request argument from user-space.
   1409 * @rep: Response argument to user-space.
   1410 * @file_priv: Pointer to a drm file private structure.
   1411 */
   1412static int
   1413vmw_gb_surface_define_internal(struct drm_device *dev,
   1414			       struct drm_vmw_gb_surface_create_ext_req *req,
   1415			       struct drm_vmw_gb_surface_create_rep *rep,
   1416			       struct drm_file *file_priv)
   1417{
   1418	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
   1419	struct vmw_private *dev_priv = vmw_priv(dev);
   1420	struct vmw_user_surface *user_srf;
   1421	struct vmw_surface_metadata metadata = {0};
   1422	struct vmw_surface *srf;
   1423	struct vmw_resource *res;
   1424	struct vmw_resource *tmp;
   1425	int ret = 0;
   1426	uint32_t backup_handle = 0;
   1427	SVGA3dSurfaceAllFlags svga3d_flags_64 =
   1428		SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
   1429				req->base.svga3d_flags);
   1430
   1431	/* array_size must be null for non-GL3 host. */
   1432	if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
   1433		VMW_DEBUG_USER("SM4 surface not supported.\n");
   1434		return -EINVAL;
   1435	}
   1436
   1437	if (!has_sm4_1_context(dev_priv)) {
   1438		if (req->svga3d_flags_upper_32_bits != 0)
   1439			ret = -EINVAL;
   1440
   1441		if (req->base.multisample_count != 0)
   1442			ret = -EINVAL;
   1443
   1444		if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
   1445			ret = -EINVAL;
   1446
   1447		if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
   1448			ret = -EINVAL;
   1449
   1450		if (ret) {
   1451			VMW_DEBUG_USER("SM4.1 surface not supported.\n");
   1452			return ret;
   1453		}
   1454	}
   1455
   1456	if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
   1457		VMW_DEBUG_USER("SM5 surface not supported.\n");
   1458		return -EINVAL;
   1459	}
   1460
   1461	if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
   1462	    req->base.multisample_count == 0) {
   1463		VMW_DEBUG_USER("Invalid sample count.\n");
   1464		return -EINVAL;
   1465	}
   1466
   1467	if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
   1468		VMW_DEBUG_USER("Invalid mip level.\n");
   1469		return -EINVAL;
   1470	}
   1471
   1472	metadata.flags = svga3d_flags_64;
   1473	metadata.format = req->base.format;
   1474	metadata.mip_levels[0] = req->base.mip_levels;
   1475	metadata.multisample_count = req->base.multisample_count;
   1476	metadata.multisample_pattern = req->multisample_pattern;
   1477	metadata.quality_level = req->quality_level;
   1478	metadata.array_size = req->base.array_size;
   1479	metadata.buffer_byte_stride = req->buffer_byte_stride;
   1480	metadata.num_sizes = 1;
   1481	metadata.base_size = req->base.base_size;
   1482	metadata.scanout = req->base.drm_surface_flags &
   1483		drm_vmw_surface_flag_scanout;
   1484
   1485	/* Define a surface based on the parameters. */
   1486	ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
   1487	if (ret != 0) {
   1488		VMW_DEBUG_USER("Failed to define surface.\n");
   1489		return ret;
   1490	}
   1491
   1492	user_srf = container_of(srf, struct vmw_user_surface, srf);
   1493	if (drm_is_primary_client(file_priv))
   1494		user_srf->master = drm_file_get_master(file_priv);
   1495
   1496	res = &user_srf->srf.res;
   1497
   1498	if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
   1499		ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
   1500					 &res->backup);
   1501		if (ret == 0) {
   1502			if (res->backup->base.base.size < res->backup_size) {
   1503				VMW_DEBUG_USER("Surface backup buffer too small.\n");
   1504				vmw_bo_unreference(&res->backup);
   1505				ret = -EINVAL;
   1506				goto out_unlock;
   1507			} else {
   1508				backup_handle = req->base.buffer_handle;
   1509			}
   1510		}
   1511	} else if (req->base.drm_surface_flags &
   1512		   (drm_vmw_surface_flag_create_buffer |
   1513		    drm_vmw_surface_flag_coherent)) {
   1514		ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
   1515							res->backup_size,
   1516							&backup_handle,
   1517							&res->backup);
   1518		if (ret == 0)
   1519			vmw_bo_reference(res->backup);
   1520	}
   1521
   1522	if (unlikely(ret != 0)) {
   1523		vmw_resource_unreference(&res);
   1524		goto out_unlock;
   1525	}
   1526
   1527	if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
   1528		struct vmw_buffer_object *backup = res->backup;
   1529
   1530		ttm_bo_reserve(&backup->base, false, false, NULL);
   1531		if (!res->func->dirty_alloc)
   1532			ret = -EINVAL;
   1533		if (!ret)
   1534			ret = vmw_bo_dirty_add(backup);
   1535		if (!ret) {
   1536			res->coherent = true;
   1537			ret = res->func->dirty_alloc(res);
   1538		}
   1539		ttm_bo_unreserve(&backup->base);
   1540		if (ret) {
   1541			vmw_resource_unreference(&res);
   1542			goto out_unlock;
   1543		}
   1544
   1545	}
   1546
   1547	tmp = vmw_resource_reference(res);
   1548	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
   1549				    req->base.drm_surface_flags &
   1550				    drm_vmw_surface_flag_shareable,
   1551				    VMW_RES_SURFACE,
   1552				    &vmw_user_surface_base_release);
   1553
   1554	if (unlikely(ret != 0)) {
   1555		vmw_resource_unreference(&tmp);
   1556		vmw_resource_unreference(&res);
   1557		goto out_unlock;
   1558	}
   1559
   1560	rep->handle      = user_srf->prime.base.handle;
   1561	rep->backup_size = res->backup_size;
   1562	if (res->backup) {
   1563		rep->buffer_map_handle =
   1564			drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
   1565		rep->buffer_size = res->backup->base.base.size;
   1566		rep->buffer_handle = backup_handle;
   1567		if (user_srf->prime.base.shareable)
   1568			drm_gem_object_get(&res->backup->base.base);
   1569	} else {
   1570		rep->buffer_map_handle = 0;
   1571		rep->buffer_size = 0;
   1572		rep->buffer_handle = SVGA3D_INVALID_ID;
   1573	}
   1574	vmw_resource_unreference(&res);
   1575
   1576out_unlock:
   1577	return ret;
   1578}
   1579
   1580/**
   1581 * vmw_gb_surface_reference_internal - Ioctl function implementing
   1582 * the user surface reference functionality.
   1583 *
   1584 * @dev: Pointer to a struct drm_device.
   1585 * @req: Pointer to user-space request surface arg.
   1586 * @rep: Pointer to response to user-space.
   1587 * @file_priv: Pointer to a drm file private structure.
   1588 */
   1589static int
   1590vmw_gb_surface_reference_internal(struct drm_device *dev,
   1591				  struct drm_vmw_surface_arg *req,
   1592				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
   1593				  struct drm_file *file_priv)
   1594{
   1595	struct vmw_private *dev_priv = vmw_priv(dev);
   1596	struct vmw_surface *srf;
   1597	struct vmw_user_surface *user_srf;
   1598	struct vmw_surface_metadata *metadata;
   1599	struct ttm_base_object *base;
   1600	u32 backup_handle;
   1601	int ret;
   1602
   1603	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
   1604					   req->handle_type, &base);
   1605	if (unlikely(ret != 0))
   1606		return ret;
   1607
   1608	user_srf = container_of(base, struct vmw_user_surface, prime.base);
   1609	srf = &user_srf->srf;
   1610	if (!srf->res.backup) {
   1611		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
   1612		goto out_bad_resource;
   1613	}
   1614	metadata = &srf->metadata;
   1615
   1616	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
   1617	ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
   1618				    &backup_handle);
   1619	mutex_unlock(&dev_priv->cmdbuf_mutex);
   1620	if (ret != 0) {
   1621		drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
   1622			req->sid);
   1623		goto out_bad_resource;
   1624	}
   1625
   1626	rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
   1627	rep->creq.base.format = metadata->format;
   1628	rep->creq.base.mip_levels = metadata->mip_levels[0];
   1629	rep->creq.base.drm_surface_flags = 0;
   1630	rep->creq.base.multisample_count = metadata->multisample_count;
   1631	rep->creq.base.autogen_filter = metadata->autogen_filter;
   1632	rep->creq.base.array_size = metadata->array_size;
   1633	rep->creq.base.buffer_handle = backup_handle;
   1634	rep->creq.base.base_size = metadata->base_size;
   1635	rep->crep.handle = user_srf->prime.base.handle;
   1636	rep->crep.backup_size = srf->res.backup_size;
   1637	rep->crep.buffer_handle = backup_handle;
   1638	rep->crep.buffer_map_handle =
   1639		drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
   1640	rep->crep.buffer_size = srf->res.backup->base.base.size;
   1641
   1642	rep->creq.version = drm_vmw_gb_surface_v1;
   1643	rep->creq.svga3d_flags_upper_32_bits =
   1644		SVGA3D_FLAGS_UPPER_32(metadata->flags);
   1645	rep->creq.multisample_pattern = metadata->multisample_pattern;
   1646	rep->creq.quality_level = metadata->quality_level;
   1647	rep->creq.must_be_zero = 0;
   1648
   1649out_bad_resource:
   1650	ttm_base_object_unref(&base);
   1651
   1652	return ret;
   1653}
   1654
   1655/**
   1656 * vmw_subres_dirty_add - Add a dirty region to a subresource
   1657 * @dirty: The surfaces's dirty tracker.
   1658 * @loc_start: The location corresponding to the start of the region.
   1659 * @loc_end: The location corresponding to the end of the region.
   1660 *
   1661 * As we are assuming that @loc_start and @loc_end represent a sequential
   1662 * range of backing store memory, if the region spans multiple lines then
   1663 * regardless of the x coordinate, the full lines are dirtied.
   1664 * Correspondingly if the region spans multiple z slices, then full rather
   1665 * than partial z slices are dirtied.
   1666 */
   1667static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
   1668				 const struct vmw_surface_loc *loc_start,
   1669				 const struct vmw_surface_loc *loc_end)
   1670{
   1671	const struct vmw_surface_cache *cache = &dirty->cache;
   1672	SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
   1673	u32 mip = loc_start->sub_resource % cache->num_mip_levels;
   1674	const struct drm_vmw_size *size = &cache->mip[mip].size;
   1675	u32 box_c2 = box->z + box->d;
   1676
   1677	if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
   1678		return;
   1679
   1680	if (box->d == 0 || box->z > loc_start->z)
   1681		box->z = loc_start->z;
   1682	if (box_c2 < loc_end->z)
   1683		box->d = loc_end->z - box->z;
   1684
   1685	if (loc_start->z + 1 == loc_end->z) {
   1686		box_c2 = box->y + box->h;
   1687		if (box->h == 0 || box->y > loc_start->y)
   1688			box->y = loc_start->y;
   1689		if (box_c2 < loc_end->y)
   1690			box->h = loc_end->y - box->y;
   1691
   1692		if (loc_start->y + 1 == loc_end->y) {
   1693			box_c2 = box->x + box->w;
   1694			if (box->w == 0 || box->x > loc_start->x)
   1695				box->x = loc_start->x;
   1696			if (box_c2 < loc_end->x)
   1697				box->w = loc_end->x - box->x;
   1698		} else {
   1699			box->x = 0;
   1700			box->w = size->width;
   1701		}
   1702	} else {
   1703		box->y = 0;
   1704		box->h = size->height;
   1705		box->x = 0;
   1706		box->w = size->width;
   1707	}
   1708}
   1709
   1710/**
   1711 * vmw_subres_dirty_full - Mark a full subresource as dirty
   1712 * @dirty: The surface's dirty tracker.
   1713 * @subres: The subresource
   1714 */
   1715static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
   1716{
   1717	const struct vmw_surface_cache *cache = &dirty->cache;
   1718	u32 mip = subres % cache->num_mip_levels;
   1719	const struct drm_vmw_size *size = &cache->mip[mip].size;
   1720	SVGA3dBox *box = &dirty->boxes[subres];
   1721
   1722	box->x = 0;
   1723	box->y = 0;
   1724	box->z = 0;
   1725	box->w = size->width;
   1726	box->h = size->height;
   1727	box->d = size->depth;
   1728}
   1729
   1730/*
   1731 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
   1732 * surfaces.
   1733 */
   1734static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
   1735					    size_t start, size_t end)
   1736{
   1737	struct vmw_surface_dirty *dirty =
   1738		(struct vmw_surface_dirty *) res->dirty;
   1739	size_t backup_end = res->backup_offset + res->backup_size;
   1740	struct vmw_surface_loc loc1, loc2;
   1741	const struct vmw_surface_cache *cache;
   1742
   1743	start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
   1744	end = min(end, backup_end) - res->backup_offset;
   1745	cache = &dirty->cache;
   1746	vmw_surface_get_loc(cache, &loc1, start);
   1747	vmw_surface_get_loc(cache, &loc2, end - 1);
   1748	vmw_surface_inc_loc(cache, &loc2);
   1749
   1750	if (loc1.sheet != loc2.sheet) {
   1751		u32 sub_res;
   1752
   1753		/*
   1754		 * Multiple multisample sheets. To do this in an optimized
   1755		 * fashion, compute the dirty region for each sheet and the
   1756		 * resulting union. Since this is not a common case, just dirty
   1757		 * the whole surface.
   1758		 */
   1759		for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
   1760			vmw_subres_dirty_full(dirty, sub_res);
   1761		return;
   1762	}
   1763	if (loc1.sub_resource + 1 == loc2.sub_resource) {
   1764		/* Dirty range covers a single sub-resource */
   1765		vmw_subres_dirty_add(dirty, &loc1, &loc2);
   1766	} else {
   1767		/* Dirty range covers multiple sub-resources */
   1768		struct vmw_surface_loc loc_min, loc_max;
   1769		u32 sub_res;
   1770
   1771		vmw_surface_max_loc(cache, loc1.sub_resource, &loc_max);
   1772		vmw_subres_dirty_add(dirty, &loc1, &loc_max);
   1773		vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
   1774		vmw_subres_dirty_add(dirty, &loc_min, &loc2);
   1775		for (sub_res = loc1.sub_resource + 1;
   1776		     sub_res < loc2.sub_resource - 1; ++sub_res)
   1777			vmw_subres_dirty_full(dirty, sub_res);
   1778	}
   1779}
   1780
   1781/*
   1782 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
   1783 * surfaces.
   1784 */
   1785static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
   1786					    size_t start, size_t end)
   1787{
   1788	struct vmw_surface_dirty *dirty =
   1789		(struct vmw_surface_dirty *) res->dirty;
   1790	const struct vmw_surface_cache *cache = &dirty->cache;
   1791	size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
   1792	SVGA3dBox *box = &dirty->boxes[0];
   1793	u32 box_c2;
   1794
   1795	box->h = box->d = 1;
   1796	start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
   1797	end = min(end, backup_end) - res->backup_offset;
   1798	box_c2 = box->x + box->w;
   1799	if (box->w == 0 || box->x > start)
   1800		box->x = start;
   1801	if (box_c2 < end)
   1802		box->w = end - box->x;
   1803}
   1804
   1805/*
   1806 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
   1807 */
   1808static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
   1809					size_t end)
   1810{
   1811	struct vmw_surface *srf = vmw_res_to_srf(res);
   1812
   1813	if (WARN_ON(end <= res->backup_offset ||
   1814		    start >= res->backup_offset + res->backup_size))
   1815		return;
   1816
   1817	if (srf->metadata.format == SVGA3D_BUFFER)
   1818		vmw_surface_buf_dirty_range_add(res, start, end);
   1819	else
   1820		vmw_surface_tex_dirty_range_add(res, start, end);
   1821}
   1822
   1823/*
   1824 * vmw_surface_dirty_sync - The surface's dirty_sync callback.
   1825 */
   1826static int vmw_surface_dirty_sync(struct vmw_resource *res)
   1827{
   1828	struct vmw_private *dev_priv = res->dev_priv;
   1829	u32 i, num_dirty;
   1830	struct vmw_surface_dirty *dirty =
   1831		(struct vmw_surface_dirty *) res->dirty;
   1832	size_t alloc_size;
   1833	const struct vmw_surface_cache *cache = &dirty->cache;
   1834	struct {
   1835		SVGA3dCmdHeader header;
   1836		SVGA3dCmdDXUpdateSubResource body;
   1837	} *cmd1;
   1838	struct {
   1839		SVGA3dCmdHeader header;
   1840		SVGA3dCmdUpdateGBImage body;
   1841	} *cmd2;
   1842	void *cmd;
   1843
   1844	num_dirty = 0;
   1845	for (i = 0; i < dirty->num_subres; ++i) {
   1846		const SVGA3dBox *box = &dirty->boxes[i];
   1847
   1848		if (box->d)
   1849			num_dirty++;
   1850	}
   1851
   1852	if (!num_dirty)
   1853		goto out;
   1854
   1855	alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2));
   1856	cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
   1857	if (!cmd)
   1858		return -ENOMEM;
   1859
   1860	cmd1 = cmd;
   1861	cmd2 = cmd;
   1862
   1863	for (i = 0; i < dirty->num_subres; ++i) {
   1864		const SVGA3dBox *box = &dirty->boxes[i];
   1865
   1866		if (!box->d)
   1867			continue;
   1868
   1869		/*
   1870		 * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
   1871		 * UPDATE_GB_IMAGE is not.
   1872		 */
   1873		if (has_sm4_context(dev_priv)) {
   1874			cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
   1875			cmd1->header.size = sizeof(cmd1->body);
   1876			cmd1->body.sid = res->id;
   1877			cmd1->body.subResource = i;
   1878			cmd1->body.box = *box;
   1879			cmd1++;
   1880		} else {
   1881			cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
   1882			cmd2->header.size = sizeof(cmd2->body);
   1883			cmd2->body.image.sid = res->id;
   1884			cmd2->body.image.face = i / cache->num_mip_levels;
   1885			cmd2->body.image.mipmap = i -
   1886				(cache->num_mip_levels * cmd2->body.image.face);
   1887			cmd2->body.box = *box;
   1888			cmd2++;
   1889		}
   1890
   1891	}
   1892	vmw_cmd_commit(dev_priv, alloc_size);
   1893 out:
   1894	memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
   1895	       dirty->num_subres);
   1896
   1897	return 0;
   1898}
   1899
   1900/*
   1901 * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
   1902 */
   1903static int vmw_surface_dirty_alloc(struct vmw_resource *res)
   1904{
   1905	struct vmw_surface *srf = vmw_res_to_srf(res);
   1906	const struct vmw_surface_metadata *metadata = &srf->metadata;
   1907	struct vmw_surface_dirty *dirty;
   1908	u32 num_layers = 1;
   1909	u32 num_mip;
   1910	u32 num_subres;
   1911	u32 num_samples;
   1912	size_t dirty_size;
   1913	int ret;
   1914
   1915	if (metadata->array_size)
   1916		num_layers = metadata->array_size;
   1917	else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
   1918		num_layers *= SVGA3D_MAX_SURFACE_FACES;
   1919
   1920	num_mip = metadata->mip_levels[0];
   1921	if (!num_mip)
   1922		num_mip = 1;
   1923
   1924	num_subres = num_layers * num_mip;
   1925	dirty_size = struct_size(dirty, boxes, num_subres);
   1926
   1927	dirty = kvzalloc(dirty_size, GFP_KERNEL);
   1928	if (!dirty) {
   1929		ret = -ENOMEM;
   1930		goto out_no_dirty;
   1931	}
   1932
   1933	num_samples = max_t(u32, 1, metadata->multisample_count);
   1934	ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
   1935				      num_mip, num_layers, num_samples,
   1936				      &dirty->cache);
   1937	if (ret)
   1938		goto out_no_cache;
   1939
   1940	dirty->num_subres = num_subres;
   1941	res->dirty = (struct vmw_resource_dirty *) dirty;
   1942
   1943	return 0;
   1944
   1945out_no_cache:
   1946	kvfree(dirty);
   1947out_no_dirty:
   1948	return ret;
   1949}
   1950
   1951/*
   1952 * vmw_surface_dirty_free - The surface's dirty_free callback
   1953 */
   1954static void vmw_surface_dirty_free(struct vmw_resource *res)
   1955{
   1956	struct vmw_surface_dirty *dirty =
   1957		(struct vmw_surface_dirty *) res->dirty;
   1958
   1959	kvfree(dirty);
   1960	res->dirty = NULL;
   1961}
   1962
   1963/*
   1964 * vmw_surface_clean - The surface's clean callback
   1965 */
   1966static int vmw_surface_clean(struct vmw_resource *res)
   1967{
   1968	struct vmw_private *dev_priv = res->dev_priv;
   1969	size_t alloc_size;
   1970	struct {
   1971		SVGA3dCmdHeader header;
   1972		SVGA3dCmdReadbackGBSurface body;
   1973	} *cmd;
   1974
   1975	alloc_size = sizeof(*cmd);
   1976	cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
   1977	if (!cmd)
   1978		return -ENOMEM;
   1979
   1980	cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
   1981	cmd->header.size = sizeof(cmd->body);
   1982	cmd->body.sid = res->id;
   1983	vmw_cmd_commit(dev_priv, alloc_size);
   1984
   1985	return 0;
   1986}
   1987
   1988/*
   1989 * vmw_gb_surface_define - Define a private GB surface
   1990 *
   1991 * @dev_priv: Pointer to a device private.
   1992 * @metadata: Metadata representing the surface to create.
   1993 * @user_srf_out: allocated user_srf. Set to NULL on failure.
   1994 *
   1995 * GB surfaces allocated by this function will not have a user mode handle, and
   1996 * thus will only be visible to vmwgfx.  For optimization reasons the
   1997 * surface may later be given a user mode handle by another function to make
   1998 * it available to user mode drivers.
   1999 */
   2000int vmw_gb_surface_define(struct vmw_private *dev_priv,
   2001			  const struct vmw_surface_metadata *req,
   2002			  struct vmw_surface **srf_out)
   2003{
   2004	struct vmw_surface_metadata *metadata;
   2005	struct vmw_user_surface *user_srf;
   2006	struct vmw_surface *srf;
   2007	u32 sample_count = 1;
   2008	u32 num_layers = 1;
   2009	int ret;
   2010
   2011	*srf_out = NULL;
   2012
   2013	if (req->scanout) {
   2014		if (!vmw_surface_is_screen_target_format(req->format)) {
   2015			VMW_DEBUG_USER("Invalid Screen Target surface format.");
   2016			return -EINVAL;
   2017		}
   2018
   2019		if (req->base_size.width > dev_priv->texture_max_width ||
   2020		    req->base_size.height > dev_priv->texture_max_height) {
   2021			VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
   2022				       req->base_size.width,
   2023				       req->base_size.height,
   2024				       dev_priv->texture_max_width,
   2025				       dev_priv->texture_max_height);
   2026			return -EINVAL;
   2027		}
   2028	} else {
   2029		const SVGA3dSurfaceDesc *desc =
   2030			vmw_surface_get_desc(req->format);
   2031
   2032		if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) {
   2033			VMW_DEBUG_USER("Invalid surface format.\n");
   2034			return -EINVAL;
   2035		}
   2036	}
   2037
   2038	if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
   2039		return -EINVAL;
   2040
   2041	if (req->num_sizes != 1)
   2042		return -EINVAL;
   2043
   2044	if (req->sizes != NULL)
   2045		return -EINVAL;
   2046
   2047	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
   2048	if (unlikely(!user_srf)) {
   2049		ret = -ENOMEM;
   2050		goto out_unlock;
   2051	}
   2052
   2053	*srf_out  = &user_srf->srf;
   2054	user_srf->prime.base.shareable = false;
   2055	user_srf->prime.base.tfile = NULL;
   2056
   2057	srf = &user_srf->srf;
   2058	srf->metadata = *req;
   2059	srf->offsets = NULL;
   2060
   2061	metadata = &srf->metadata;
   2062
   2063	if (metadata->array_size)
   2064		num_layers = req->array_size;
   2065	else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
   2066		num_layers = SVGA3D_MAX_SURFACE_FACES;
   2067
   2068	if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
   2069		sample_count = metadata->multisample_count;
   2070
   2071	srf->res.backup_size =
   2072		vmw_surface_get_serialized_size_extended(
   2073				metadata->format,
   2074				metadata->base_size,
   2075				metadata->mip_levels[0],
   2076				num_layers,
   2077				sample_count);
   2078
   2079	if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
   2080		srf->res.backup_size += sizeof(SVGA3dDXSOState);
   2081
   2082	/*
   2083	 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
   2084	 * size greater than STDU max width/height. This is really a workaround
   2085	 * to support creation of big framebuffer requested by some user-space
   2086	 * for whole topology. That big framebuffer won't really be used for
   2087	 * binding with screen target as during prepare_fb a separate surface is
   2088	 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
   2089	 */
   2090	if (dev_priv->active_display_unit == vmw_du_screen_target &&
   2091	    metadata->scanout &&
   2092	    metadata->base_size.width <= dev_priv->stdu_max_width &&
   2093	    metadata->base_size.height <= dev_priv->stdu_max_height)
   2094		metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
   2095
   2096	/*
   2097	 * From this point, the generic resource management functions
   2098	 * destroy the object on failure.
   2099	 */
   2100	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
   2101
   2102	return ret;
   2103
   2104out_unlock:
   2105	return ret;
   2106}