cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmwgfx_drv.c (48484B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2/**************************************************************************
      3 *
      4 * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a
      7 * copy of this software and associated documentation files (the
      8 * "Software"), to deal in the Software without restriction, including
      9 * without limitation the rights to use, copy, modify, merge, publish,
     10 * distribute, sub license, and/or sell copies of the Software, and to
     11 * permit persons to whom the Software is furnished to do so, subject to
     12 * the following conditions:
     13 *
     14 * The above copyright notice and this permission notice (including the
     15 * next paragraph) shall be included in all copies or substantial portions
     16 * of the Software.
     17 *
     18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25 *
     26 **************************************************************************/
     27
     28#include <linux/dma-mapping.h>
     29#include <linux/module.h>
     30#include <linux/pci.h>
     31#include <linux/cc_platform.h>
     32
     33#include <drm/drm_aperture.h>
     34#include <drm/drm_drv.h>
     35#include <drm/drm_gem_ttm_helper.h>
     36#include <drm/drm_ioctl.h>
     37#include <drm/drm_module.h>
     38#include <drm/drm_sysfs.h>
     39#include <drm/ttm/ttm_bo_driver.h>
     40#include <drm/ttm/ttm_range_manager.h>
     41#include <drm/ttm/ttm_placement.h>
     42#include <generated/utsrelease.h>
     43
     44#include "ttm_object.h"
     45#include "vmwgfx_binding.h"
     46#include "vmwgfx_devcaps.h"
     47#include "vmwgfx_drv.h"
     48#include "vmwgfx_mksstat.h"
     49
     50#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
     51
     52#define VMW_MIN_INITIAL_WIDTH 800
     53#define VMW_MIN_INITIAL_HEIGHT 600
     54
     55/*
     56 * Fully encoded drm commands. Might move to vmw_drm.h
     57 */
     58
     59#define DRM_IOCTL_VMW_GET_PARAM					\
     60	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
     61		 struct drm_vmw_getparam_arg)
     62#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
     63	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
     64		union drm_vmw_alloc_dmabuf_arg)
     65#define DRM_IOCTL_VMW_UNREF_DMABUF				\
     66	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
     67		struct drm_vmw_unref_dmabuf_arg)
     68#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
     69	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
     70		 struct drm_vmw_cursor_bypass_arg)
     71
     72#define DRM_IOCTL_VMW_CONTROL_STREAM				\
     73	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
     74		 struct drm_vmw_control_stream_arg)
     75#define DRM_IOCTL_VMW_CLAIM_STREAM				\
     76	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
     77		 struct drm_vmw_stream_arg)
     78#define DRM_IOCTL_VMW_UNREF_STREAM				\
     79	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
     80		 struct drm_vmw_stream_arg)
     81
     82#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
     83	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
     84		struct drm_vmw_context_arg)
     85#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
     86	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
     87		struct drm_vmw_context_arg)
     88#define DRM_IOCTL_VMW_CREATE_SURFACE				\
     89	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
     90		 union drm_vmw_surface_create_arg)
     91#define DRM_IOCTL_VMW_UNREF_SURFACE				\
     92	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
     93		 struct drm_vmw_surface_arg)
     94#define DRM_IOCTL_VMW_REF_SURFACE				\
     95	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
     96		 union drm_vmw_surface_reference_arg)
     97#define DRM_IOCTL_VMW_EXECBUF					\
     98	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
     99		struct drm_vmw_execbuf_arg)
    100#define DRM_IOCTL_VMW_GET_3D_CAP				\
    101	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
    102		 struct drm_vmw_get_3d_cap_arg)
    103#define DRM_IOCTL_VMW_FENCE_WAIT				\
    104	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
    105		 struct drm_vmw_fence_wait_arg)
    106#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
    107	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
    108		 struct drm_vmw_fence_signaled_arg)
    109#define DRM_IOCTL_VMW_FENCE_UNREF				\
    110	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
    111		 struct drm_vmw_fence_arg)
    112#define DRM_IOCTL_VMW_FENCE_EVENT				\
    113	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
    114		 struct drm_vmw_fence_event_arg)
    115#define DRM_IOCTL_VMW_PRESENT					\
    116	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
    117		 struct drm_vmw_present_arg)
    118#define DRM_IOCTL_VMW_PRESENT_READBACK				\
    119	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
    120		 struct drm_vmw_present_readback_arg)
    121#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
    122	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
    123		 struct drm_vmw_update_layout_arg)
    124#define DRM_IOCTL_VMW_CREATE_SHADER				\
    125	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
    126		 struct drm_vmw_shader_create_arg)
    127#define DRM_IOCTL_VMW_UNREF_SHADER				\
    128	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
    129		 struct drm_vmw_shader_arg)
    130#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
    131	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
    132		 union drm_vmw_gb_surface_create_arg)
    133#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
    134	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
    135		 union drm_vmw_gb_surface_reference_arg)
    136#define DRM_IOCTL_VMW_SYNCCPU					\
    137	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
    138		 struct drm_vmw_synccpu_arg)
    139#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
    140	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
    141		struct drm_vmw_context_arg)
    142#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT				\
    143	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT,	\
    144		union drm_vmw_gb_surface_create_ext_arg)
    145#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT				\
    146	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,		\
    147		union drm_vmw_gb_surface_reference_ext_arg)
    148#define DRM_IOCTL_VMW_MSG						\
    149	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG,			\
    150		struct drm_vmw_msg_arg)
    151#define DRM_IOCTL_VMW_MKSSTAT_RESET				\
    152	DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET)
    153#define DRM_IOCTL_VMW_MKSSTAT_ADD				\
    154	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD,	\
    155		struct drm_vmw_mksstat_add_arg)
    156#define DRM_IOCTL_VMW_MKSSTAT_REMOVE				\
    157	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE,	\
    158		struct drm_vmw_mksstat_remove_arg)
    159
    160/*
    161 * Ioctl definitions.
    162 */
    163
    164static const struct drm_ioctl_desc vmw_ioctls[] = {
    165	DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl,
    166			  DRM_RENDER_ALLOW),
    167	DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl,
    168			  DRM_RENDER_ALLOW),
    169	DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
    170			  DRM_RENDER_ALLOW),
    171	DRM_IOCTL_DEF_DRV(VMW_CURSOR_BYPASS,
    172			  vmw_kms_cursor_bypass_ioctl,
    173			  DRM_MASTER),
    174
    175	DRM_IOCTL_DEF_DRV(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
    176			  DRM_MASTER),
    177	DRM_IOCTL_DEF_DRV(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
    178			  DRM_MASTER),
    179	DRM_IOCTL_DEF_DRV(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
    180			  DRM_MASTER),
    181
    182	DRM_IOCTL_DEF_DRV(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
    183			  DRM_RENDER_ALLOW),
    184	DRM_IOCTL_DEF_DRV(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
    185			  DRM_RENDER_ALLOW),
    186	DRM_IOCTL_DEF_DRV(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
    187			  DRM_RENDER_ALLOW),
    188	DRM_IOCTL_DEF_DRV(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
    189			  DRM_RENDER_ALLOW),
    190	DRM_IOCTL_DEF_DRV(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
    191			  DRM_RENDER_ALLOW),
    192	DRM_IOCTL_DEF_DRV(VMW_EXECBUF, vmw_execbuf_ioctl,
    193			  DRM_RENDER_ALLOW),
    194	DRM_IOCTL_DEF_DRV(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
    195			  DRM_RENDER_ALLOW),
    196	DRM_IOCTL_DEF_DRV(VMW_FENCE_SIGNALED,
    197			  vmw_fence_obj_signaled_ioctl,
    198			  DRM_RENDER_ALLOW),
    199	DRM_IOCTL_DEF_DRV(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
    200			  DRM_RENDER_ALLOW),
    201	DRM_IOCTL_DEF_DRV(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
    202			  DRM_RENDER_ALLOW),
    203	DRM_IOCTL_DEF_DRV(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
    204			  DRM_RENDER_ALLOW),
    205
    206	/* these allow direct access to the framebuffers mark as master only */
    207	DRM_IOCTL_DEF_DRV(VMW_PRESENT, vmw_present_ioctl,
    208			  DRM_MASTER | DRM_AUTH),
    209	DRM_IOCTL_DEF_DRV(VMW_PRESENT_READBACK,
    210			  vmw_present_readback_ioctl,
    211			  DRM_MASTER | DRM_AUTH),
    212	/*
    213	 * The permissions of the below ioctl are overridden in
    214	 * vmw_generic_ioctl(). We require either
    215	 * DRM_MASTER or capable(CAP_SYS_ADMIN).
    216	 */
    217	DRM_IOCTL_DEF_DRV(VMW_UPDATE_LAYOUT,
    218			  vmw_kms_update_layout_ioctl,
    219			  DRM_RENDER_ALLOW),
    220	DRM_IOCTL_DEF_DRV(VMW_CREATE_SHADER,
    221			  vmw_shader_define_ioctl,
    222			  DRM_RENDER_ALLOW),
    223	DRM_IOCTL_DEF_DRV(VMW_UNREF_SHADER,
    224			  vmw_shader_destroy_ioctl,
    225			  DRM_RENDER_ALLOW),
    226	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE,
    227			  vmw_gb_surface_define_ioctl,
    228			  DRM_RENDER_ALLOW),
    229	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF,
    230			  vmw_gb_surface_reference_ioctl,
    231			  DRM_RENDER_ALLOW),
    232	DRM_IOCTL_DEF_DRV(VMW_SYNCCPU,
    233			  vmw_user_bo_synccpu_ioctl,
    234			  DRM_RENDER_ALLOW),
    235	DRM_IOCTL_DEF_DRV(VMW_CREATE_EXTENDED_CONTEXT,
    236			  vmw_extended_context_define_ioctl,
    237			  DRM_RENDER_ALLOW),
    238	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE_EXT,
    239			  vmw_gb_surface_define_ext_ioctl,
    240			  DRM_RENDER_ALLOW),
    241	DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF_EXT,
    242			  vmw_gb_surface_reference_ext_ioctl,
    243			  DRM_RENDER_ALLOW),
    244	DRM_IOCTL_DEF_DRV(VMW_MSG,
    245			  vmw_msg_ioctl,
    246			  DRM_RENDER_ALLOW),
    247	DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_RESET,
    248			  vmw_mksstat_reset_ioctl,
    249			  DRM_RENDER_ALLOW),
    250	DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_ADD,
    251			  vmw_mksstat_add_ioctl,
    252			  DRM_RENDER_ALLOW),
    253	DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_REMOVE,
    254			  vmw_mksstat_remove_ioctl,
    255			  DRM_RENDER_ALLOW),
    256};
    257
    258static const struct pci_device_id vmw_pci_id_list[] = {
    259	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) },
    260	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) },
    261	{ }
    262};
    263MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
    264
    265static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
    266static int vmw_restrict_iommu;
    267static int vmw_force_coherent;
    268static int vmw_restrict_dma_mask;
    269static int vmw_assume_16bpp;
    270
    271static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
    272static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
    273			      void *ptr);
    274
    275MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
    276module_param_named(enable_fbdev, enable_fbdev, int, 0600);
    277MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
    278module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
    279MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
    280module_param_named(force_coherent, vmw_force_coherent, int, 0600);
    281MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
    282module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
    283MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
    284module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
    285
    286
    287struct bitmap_name {
    288	uint32 value;
    289	const char *name;
    290};
    291
    292static const struct bitmap_name cap1_names[] = {
    293	{ SVGA_CAP_RECT_COPY, "rect copy" },
    294	{ SVGA_CAP_CURSOR, "cursor" },
    295	{ SVGA_CAP_CURSOR_BYPASS, "cursor bypass" },
    296	{ SVGA_CAP_CURSOR_BYPASS_2, "cursor bypass 2" },
    297	{ SVGA_CAP_8BIT_EMULATION, "8bit emulation" },
    298	{ SVGA_CAP_ALPHA_CURSOR, "alpha cursor" },
    299	{ SVGA_CAP_3D, "3D" },
    300	{ SVGA_CAP_EXTENDED_FIFO, "extended fifo" },
    301	{ SVGA_CAP_MULTIMON, "multimon" },
    302	{ SVGA_CAP_PITCHLOCK, "pitchlock" },
    303	{ SVGA_CAP_IRQMASK, "irq mask" },
    304	{ SVGA_CAP_DISPLAY_TOPOLOGY, "display topology" },
    305	{ SVGA_CAP_GMR, "gmr" },
    306	{ SVGA_CAP_TRACES, "traces" },
    307	{ SVGA_CAP_GMR2, "gmr2" },
    308	{ SVGA_CAP_SCREEN_OBJECT_2, "screen object 2" },
    309	{ SVGA_CAP_COMMAND_BUFFERS, "command buffers" },
    310	{ SVGA_CAP_CMD_BUFFERS_2, "command buffers 2" },
    311	{ SVGA_CAP_GBOBJECTS, "gbobject" },
    312	{ SVGA_CAP_DX, "dx" },
    313	{ SVGA_CAP_HP_CMD_QUEUE, "hp cmd queue" },
    314	{ SVGA_CAP_NO_BB_RESTRICTION, "no bb restriction" },
    315	{ SVGA_CAP_CAP2_REGISTER, "cap2 register" },
    316};
    317
    318
    319static const struct bitmap_name cap2_names[] = {
    320	{ SVGA_CAP2_GROW_OTABLE, "grow otable" },
    321	{ SVGA_CAP2_INTRA_SURFACE_COPY, "intra surface copy" },
    322	{ SVGA_CAP2_DX2, "dx2" },
    323	{ SVGA_CAP2_GB_MEMSIZE_2, "gb memsize 2" },
    324	{ SVGA_CAP2_SCREENDMA_REG, "screendma reg" },
    325	{ SVGA_CAP2_OTABLE_PTDEPTH_2, "otable ptdepth2" },
    326	{ SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT, "non ms to ms stretchblt" },
    327	{ SVGA_CAP2_CURSOR_MOB, "cursor mob" },
    328	{ SVGA_CAP2_MSHINT, "mshint" },
    329	{ SVGA_CAP2_CB_MAX_SIZE_4MB, "cb max size 4mb" },
    330	{ SVGA_CAP2_DX3, "dx3" },
    331	{ SVGA_CAP2_FRAME_TYPE, "frame type" },
    332	{ SVGA_CAP2_COTABLE_COPY, "cotable copy" },
    333	{ SVGA_CAP2_TRACE_FULL_FB, "trace full fb" },
    334	{ SVGA_CAP2_EXTRA_REGS, "extra regs" },
    335	{ SVGA_CAP2_LO_STAGING, "lo staging" },
    336};
    337
    338static void vmw_print_bitmap(struct drm_device *drm,
    339			     const char *prefix, uint32_t bitmap,
    340			     const struct bitmap_name *bnames,
    341			     uint32_t num_names)
    342{
    343	char buf[512];
    344	uint32_t i;
    345	uint32_t offset = 0;
    346	for (i = 0; i < num_names; ++i) {
    347		if ((bitmap & bnames[i].value) != 0) {
    348			offset += snprintf(buf + offset,
    349					   ARRAY_SIZE(buf) - offset,
    350					   "%s, ", bnames[i].name);
    351			bitmap &= ~bnames[i].value;
    352		}
    353	}
    354
    355	drm_info(drm, "%s: %s\n", prefix, buf);
    356	if (bitmap != 0)
    357		drm_dbg(drm, "%s: unknown enums: %x\n", prefix, bitmap);
    358}
    359
    360
    361static void vmw_print_sm_type(struct vmw_private *dev_priv)
    362{
    363	static const char *names[] = {
    364		[VMW_SM_LEGACY] = "Legacy",
    365		[VMW_SM_4] = "SM4",
    366		[VMW_SM_4_1] = "SM4_1",
    367		[VMW_SM_5] = "SM_5",
    368		[VMW_SM_5_1X] = "SM_5_1X",
    369		[VMW_SM_MAX] = "Invalid"
    370	};
    371	BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1));
    372	drm_info(&dev_priv->drm, "Available shader model: %s.\n",
    373		 names[dev_priv->sm_type]);
    374}
    375
    376/**
    377 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
    378 *
    379 * @dev_priv: A device private structure.
    380 *
    381 * This function creates a small buffer object that holds the query
    382 * result for dummy queries emitted as query barriers.
    383 * The function will then map the first page and initialize a pending
    384 * occlusion query result structure, Finally it will unmap the buffer.
    385 * No interruptible waits are done within this function.
    386 *
    387 * Returns an error if bo creation or initialization fails.
    388 */
    389static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
    390{
    391	int ret;
    392	struct vmw_buffer_object *vbo;
    393	struct ttm_bo_kmap_obj map;
    394	volatile SVGA3dQueryResult *result;
    395	bool dummy;
    396
    397	/*
    398	 * Create the vbo as pinned, so that a tryreserve will
    399	 * immediately succeed. This is because we're the only
    400	 * user of the bo currently.
    401	 */
    402	ret = vmw_bo_create(dev_priv, PAGE_SIZE,
    403			    &vmw_sys_placement, false, true,
    404			    &vmw_bo_bo_free, &vbo);
    405	if (unlikely(ret != 0))
    406		return ret;
    407
    408	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
    409	BUG_ON(ret != 0);
    410	vmw_bo_pin_reserved(vbo, true);
    411
    412	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
    413	if (likely(ret == 0)) {
    414		result = ttm_kmap_obj_virtual(&map, &dummy);
    415		result->totalSize = sizeof(*result);
    416		result->state = SVGA3D_QUERYSTATE_PENDING;
    417		result->result32 = 0xff;
    418		ttm_bo_kunmap(&map);
    419	}
    420	vmw_bo_pin_reserved(vbo, false);
    421	ttm_bo_unreserve(&vbo->base);
    422
    423	if (unlikely(ret != 0)) {
    424		DRM_ERROR("Dummy query buffer map failed.\n");
    425		vmw_bo_unreference(&vbo);
    426	} else
    427		dev_priv->dummy_query_bo = vbo;
    428
    429	return ret;
    430}
    431
    432static int vmw_device_init(struct vmw_private *dev_priv)
    433{
    434	bool uses_fb_traces = false;
    435
    436	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
    437	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
    438	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
    439
    440	vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
    441		  SVGA_REG_ENABLE_HIDE);
    442
    443	uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
    444			 (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
    445
    446	vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
    447	dev_priv->fifo = vmw_fifo_create(dev_priv);
    448	if (IS_ERR(dev_priv->fifo)) {
    449		int err = PTR_ERR(dev_priv->fifo);
    450		dev_priv->fifo = NULL;
    451		return err;
    452	} else if (!dev_priv->fifo) {
    453		vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
    454	}
    455
    456	dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
    457	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
    458	return 0;
    459}
    460
    461static void vmw_device_fini(struct vmw_private *vmw)
    462{
    463	/*
    464	 * Legacy sync
    465	 */
    466	vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
    467	while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
    468		;
    469
    470	vmw->last_read_seqno = vmw_fence_read(vmw);
    471
    472	vmw_write(vmw, SVGA_REG_CONFIG_DONE,
    473		  vmw->config_done_state);
    474	vmw_write(vmw, SVGA_REG_ENABLE,
    475		  vmw->enable_state);
    476	vmw_write(vmw, SVGA_REG_TRACES,
    477		  vmw->traces_state);
    478
    479	vmw_fifo_destroy(vmw);
    480}
    481
    482/**
    483 * vmw_request_device_late - Perform late device setup
    484 *
    485 * @dev_priv: Pointer to device private.
    486 *
    487 * This function performs setup of otables and enables large command
    488 * buffer submission. These tasks are split out to a separate function
    489 * because it reverts vmw_release_device_early and is intended to be used
    490 * by an error path in the hibernation code.
    491 */
    492static int vmw_request_device_late(struct vmw_private *dev_priv)
    493{
    494	int ret;
    495
    496	if (dev_priv->has_mob) {
    497		ret = vmw_otables_setup(dev_priv);
    498		if (unlikely(ret != 0)) {
    499			DRM_ERROR("Unable to initialize "
    500				  "guest Memory OBjects.\n");
    501			return ret;
    502		}
    503	}
    504
    505	if (dev_priv->cman) {
    506		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
    507		if (ret) {
    508			struct vmw_cmdbuf_man *man = dev_priv->cman;
    509
    510			dev_priv->cman = NULL;
    511			vmw_cmdbuf_man_destroy(man);
    512		}
    513	}
    514
    515	return 0;
    516}
    517
    518static int vmw_request_device(struct vmw_private *dev_priv)
    519{
    520	int ret;
    521
    522	ret = vmw_device_init(dev_priv);
    523	if (unlikely(ret != 0)) {
    524		DRM_ERROR("Unable to initialize the device.\n");
    525		return ret;
    526	}
    527	vmw_fence_fifo_up(dev_priv->fman);
    528	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
    529	if (IS_ERR(dev_priv->cman)) {
    530		dev_priv->cman = NULL;
    531		dev_priv->sm_type = VMW_SM_LEGACY;
    532	}
    533
    534	ret = vmw_request_device_late(dev_priv);
    535	if (ret)
    536		goto out_no_mob;
    537
    538	ret = vmw_dummy_query_bo_create(dev_priv);
    539	if (unlikely(ret != 0))
    540		goto out_no_query_bo;
    541
    542	return 0;
    543
    544out_no_query_bo:
    545	if (dev_priv->cman)
    546		vmw_cmdbuf_remove_pool(dev_priv->cman);
    547	if (dev_priv->has_mob) {
    548		struct ttm_resource_manager *man;
    549
    550		man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
    551		ttm_resource_manager_evict_all(&dev_priv->bdev, man);
    552		vmw_otables_takedown(dev_priv);
    553	}
    554	if (dev_priv->cman)
    555		vmw_cmdbuf_man_destroy(dev_priv->cman);
    556out_no_mob:
    557	vmw_fence_fifo_down(dev_priv->fman);
    558	vmw_device_fini(dev_priv);
    559	return ret;
    560}
    561
    562/**
    563 * vmw_release_device_early - Early part of fifo takedown.
    564 *
    565 * @dev_priv: Pointer to device private struct.
    566 *
    567 * This is the first part of command submission takedown, to be called before
    568 * buffer management is taken down.
    569 */
    570static void vmw_release_device_early(struct vmw_private *dev_priv)
    571{
    572	/*
    573	 * Previous destructions should've released
    574	 * the pinned bo.
    575	 */
    576
    577	BUG_ON(dev_priv->pinned_bo != NULL);
    578
    579	vmw_bo_unreference(&dev_priv->dummy_query_bo);
    580	if (dev_priv->cman)
    581		vmw_cmdbuf_remove_pool(dev_priv->cman);
    582
    583	if (dev_priv->has_mob) {
    584		struct ttm_resource_manager *man;
    585
    586		man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
    587		ttm_resource_manager_evict_all(&dev_priv->bdev, man);
    588		vmw_otables_takedown(dev_priv);
    589	}
    590}
    591
    592/**
    593 * vmw_release_device_late - Late part of fifo takedown.
    594 *
    595 * @dev_priv: Pointer to device private struct.
    596 *
    597 * This is the last part of the command submission takedown, to be called when
    598 * command submission is no longer needed. It may wait on pending fences.
    599 */
    600static void vmw_release_device_late(struct vmw_private *dev_priv)
    601{
    602	vmw_fence_fifo_down(dev_priv->fman);
    603	if (dev_priv->cman)
    604		vmw_cmdbuf_man_destroy(dev_priv->cman);
    605
    606	vmw_device_fini(dev_priv);
    607}
    608
    609/*
    610 * Sets the initial_[width|height] fields on the given vmw_private.
    611 *
    612 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
    613 * clamping the value to fb_max_[width|height] fields and the
    614 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
    615 * If the values appear to be invalid, set them to
    616 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
    617 */
    618static void vmw_get_initial_size(struct vmw_private *dev_priv)
    619{
    620	uint32_t width;
    621	uint32_t height;
    622
    623	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
    624	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
    625
    626	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
    627	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
    628
    629	if (width > dev_priv->fb_max_width ||
    630	    height > dev_priv->fb_max_height) {
    631
    632		/*
    633		 * This is a host error and shouldn't occur.
    634		 */
    635
    636		width = VMW_MIN_INITIAL_WIDTH;
    637		height = VMW_MIN_INITIAL_HEIGHT;
    638	}
    639
    640	dev_priv->initial_width = width;
    641	dev_priv->initial_height = height;
    642}
    643
    644/**
    645 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
    646 * system.
    647 *
    648 * @dev_priv: Pointer to a struct vmw_private
    649 *
    650 * This functions tries to determine what actions need to be taken by the
    651 * driver to make system pages visible to the device.
    652 * If this function decides that DMA is not possible, it returns -EINVAL.
    653 * The driver may then try to disable features of the device that require
    654 * DMA.
    655 */
    656static int vmw_dma_select_mode(struct vmw_private *dev_priv)
    657{
    658	static const char *names[vmw_dma_map_max] = {
    659		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
    660		[vmw_dma_map_populate] = "Caching DMA mappings.",
    661		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
    662
    663	/* TTM currently doesn't fully support SEV encryption. */
    664	if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
    665		return -EINVAL;
    666
    667	if (vmw_force_coherent)
    668		dev_priv->map_mode = vmw_dma_alloc_coherent;
    669	else if (vmw_restrict_iommu)
    670		dev_priv->map_mode = vmw_dma_map_bind;
    671	else
    672		dev_priv->map_mode = vmw_dma_map_populate;
    673
    674	drm_info(&dev_priv->drm,
    675		 "DMA map mode: %s\n", names[dev_priv->map_mode]);
    676	return 0;
    677}
    678
    679/**
    680 * vmw_dma_masks - set required page- and dma masks
    681 *
    682 * @dev_priv: Pointer to struct drm-device
    683 *
    684 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
    685 * restriction also for 64-bit systems.
    686 */
    687static int vmw_dma_masks(struct vmw_private *dev_priv)
    688{
    689	struct drm_device *dev = &dev_priv->drm;
    690	int ret = 0;
    691
    692	ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
    693	if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) {
    694		drm_info(&dev_priv->drm,
    695			 "Restricting DMA addresses to 44 bits.\n");
    696		return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
    697	}
    698
    699	return ret;
    700}
    701
    702static int vmw_vram_manager_init(struct vmw_private *dev_priv)
    703{
    704	int ret;
    705	ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
    706				 dev_priv->vram_size >> PAGE_SHIFT);
    707	ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
    708	return ret;
    709}
    710
    711static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
    712{
    713	ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
    714}
    715
    716static int vmw_setup_pci_resources(struct vmw_private *dev,
    717				   u32 pci_id)
    718{
    719	resource_size_t rmmio_start;
    720	resource_size_t rmmio_size;
    721	resource_size_t fifo_start;
    722	resource_size_t fifo_size;
    723	int ret;
    724	struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
    725
    726	pci_set_master(pdev);
    727
    728	ret = pci_request_regions(pdev, "vmwgfx probe");
    729	if (ret)
    730		return ret;
    731
    732	dev->pci_id = pci_id;
    733	if (pci_id == VMWGFX_PCI_ID_SVGA3) {
    734		rmmio_start = pci_resource_start(pdev, 0);
    735		rmmio_size = pci_resource_len(pdev, 0);
    736		dev->vram_start = pci_resource_start(pdev, 2);
    737		dev->vram_size = pci_resource_len(pdev, 2);
    738
    739		drm_info(&dev->drm,
    740			"Register MMIO at 0x%pa size is %llu kiB\n",
    741			 &rmmio_start, (uint64_t)rmmio_size / 1024);
    742		dev->rmmio = devm_ioremap(dev->drm.dev,
    743					  rmmio_start,
    744					  rmmio_size);
    745		if (!dev->rmmio) {
    746			drm_err(&dev->drm,
    747				"Failed mapping registers mmio memory.\n");
    748			pci_release_regions(pdev);
    749			return -ENOMEM;
    750		}
    751	} else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
    752		dev->io_start = pci_resource_start(pdev, 0);
    753		dev->vram_start = pci_resource_start(pdev, 1);
    754		dev->vram_size = pci_resource_len(pdev, 1);
    755		fifo_start = pci_resource_start(pdev, 2);
    756		fifo_size = pci_resource_len(pdev, 2);
    757
    758		drm_info(&dev->drm,
    759			 "FIFO at %pa size is %llu kiB\n",
    760			 &fifo_start, (uint64_t)fifo_size / 1024);
    761		dev->fifo_mem = devm_memremap(dev->drm.dev,
    762					      fifo_start,
    763					      fifo_size,
    764					      MEMREMAP_WB);
    765
    766		if (IS_ERR(dev->fifo_mem)) {
    767			drm_err(&dev->drm,
    768				  "Failed mapping FIFO memory.\n");
    769			pci_release_regions(pdev);
    770			return PTR_ERR(dev->fifo_mem);
    771		}
    772	} else {
    773		pci_release_regions(pdev);
    774		return -EINVAL;
    775	}
    776
    777	/*
    778	 * This is approximate size of the vram, the exact size will only
    779	 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
    780	 * size will be equal to or bigger than the size reported by
    781	 * SVGA_REG_VRAM_SIZE.
    782	 */
    783	drm_info(&dev->drm,
    784		 "VRAM at %pa size is %llu kiB\n",
    785		 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
    786
    787	return 0;
    788}
    789
    790static int vmw_detect_version(struct vmw_private *dev)
    791{
    792	uint32_t svga_id;
    793
    794	vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
    795			  SVGA_ID_3 : SVGA_ID_2);
    796	svga_id = vmw_read(dev, SVGA_REG_ID);
    797	if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
    798		drm_err(&dev->drm,
    799			"Unsupported SVGA ID 0x%x on chipset 0x%x\n",
    800			svga_id, dev->pci_id);
    801		return -ENOSYS;
    802	}
    803	BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
    804	drm_info(&dev->drm,
    805		 "Running on SVGA version %d.\n", (svga_id & 0xff));
    806	return 0;
    807}
    808
    809static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
    810{
    811	int ret;
    812	enum vmw_res_type i;
    813	bool refuse_dma = false;
    814	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
    815
    816	dev_priv->drm.dev_private = dev_priv;
    817
    818	mutex_init(&dev_priv->cmdbuf_mutex);
    819	mutex_init(&dev_priv->binding_mutex);
    820	spin_lock_init(&dev_priv->resource_lock);
    821	spin_lock_init(&dev_priv->hw_lock);
    822	spin_lock_init(&dev_priv->waiter_lock);
    823	spin_lock_init(&dev_priv->cursor_lock);
    824
    825	ret = vmw_setup_pci_resources(dev_priv, pci_id);
    826	if (ret)
    827		return ret;
    828	ret = vmw_detect_version(dev_priv);
    829	if (ret)
    830		goto out_no_pci_or_version;
    831
    832
    833	for (i = vmw_res_context; i < vmw_res_max; ++i) {
    834		idr_init_base(&dev_priv->res_idr[i], 1);
    835		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
    836	}
    837
    838	init_waitqueue_head(&dev_priv->fence_queue);
    839	init_waitqueue_head(&dev_priv->fifo_queue);
    840	dev_priv->fence_queue_waiters = 0;
    841	dev_priv->fifo_queue_waiters = 0;
    842
    843	dev_priv->used_memory_size = 0;
    844
    845	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
    846
    847	dev_priv->enable_fb = enable_fbdev;
    848
    849
    850	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
    851	vmw_print_bitmap(&dev_priv->drm, "Capabilities",
    852			 dev_priv->capabilities,
    853			 cap1_names, ARRAY_SIZE(cap1_names));
    854	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
    855		dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
    856		vmw_print_bitmap(&dev_priv->drm, "Capabilities2",
    857				 dev_priv->capabilities2,
    858				 cap2_names, ARRAY_SIZE(cap2_names));
    859	}
    860
    861	ret = vmw_dma_select_mode(dev_priv);
    862	if (unlikely(ret != 0)) {
    863		drm_info(&dev_priv->drm,
    864			 "Restricting capabilities since DMA not available.\n");
    865		refuse_dma = true;
    866		if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
    867			drm_info(&dev_priv->drm,
    868				 "Disabling 3D acceleration.\n");
    869	}
    870
    871	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
    872	dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
    873	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
    874	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
    875
    876	vmw_get_initial_size(dev_priv);
    877
    878	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
    879		dev_priv->max_gmr_ids =
    880			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
    881		dev_priv->max_gmr_pages =
    882			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
    883		dev_priv->memory_size =
    884			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
    885		dev_priv->memory_size -= dev_priv->vram_size;
    886	} else {
    887		/*
    888		 * An arbitrary limit of 512MiB on surface
    889		 * memory. But all HWV8 hardware supports GMR2.
    890		 */
    891		dev_priv->memory_size = 512*1024*1024;
    892	}
    893	dev_priv->max_mob_pages = 0;
    894	dev_priv->max_mob_size = 0;
    895	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
    896		uint64_t mem_size;
    897
    898		if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
    899			mem_size = vmw_read(dev_priv,
    900					    SVGA_REG_GBOBJECT_MEM_SIZE_KB);
    901		else
    902			mem_size =
    903				vmw_read(dev_priv,
    904					 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
    905
    906		/*
    907		 * Workaround for low memory 2D VMs to compensate for the
    908		 * allocation taken by fbdev
    909		 */
    910		if (!(dev_priv->capabilities & SVGA_CAP_3D))
    911			mem_size *= 3;
    912
    913		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
    914		dev_priv->max_primary_mem =
    915			vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
    916		dev_priv->max_mob_size =
    917			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
    918		dev_priv->stdu_max_width =
    919			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
    920		dev_priv->stdu_max_height =
    921			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
    922
    923		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
    924			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
    925		dev_priv->texture_max_width = vmw_read(dev_priv,
    926						       SVGA_REG_DEV_CAP);
    927		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
    928			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
    929		dev_priv->texture_max_height = vmw_read(dev_priv,
    930							SVGA_REG_DEV_CAP);
    931	} else {
    932		dev_priv->texture_max_width = 8192;
    933		dev_priv->texture_max_height = 8192;
    934		dev_priv->max_primary_mem = dev_priv->vram_size;
    935	}
    936	drm_info(&dev_priv->drm,
    937		 "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
    938		 (u64)dev_priv->vram_size / 1024,
    939		 (u64)dev_priv->fifo_mem_size / 1024,
    940		 dev_priv->memory_size / 1024);
    941
    942	drm_info(&dev_priv->drm,
    943		 "MOB limits: max mob size = %u kB, max mob pages = %u\n",
    944		 dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
    945
    946	ret = vmw_dma_masks(dev_priv);
    947	if (unlikely(ret != 0))
    948		goto out_err0;
    949
    950	dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
    951
    952	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
    953		drm_info(&dev_priv->drm,
    954			 "Max GMR ids is %u\n",
    955			 (unsigned)dev_priv->max_gmr_ids);
    956		drm_info(&dev_priv->drm,
    957			 "Max number of GMR pages is %u\n",
    958			 (unsigned)dev_priv->max_gmr_pages);
    959	}
    960	drm_info(&dev_priv->drm,
    961		 "Maximum display memory size is %llu kiB\n",
    962		 (uint64_t)dev_priv->max_primary_mem / 1024);
    963
    964	/* Need mmio memory to check for fifo pitchlock cap. */
    965	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
    966	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
    967	    !vmw_fifo_have_pitchlock(dev_priv)) {
    968		ret = -ENOSYS;
    969		DRM_ERROR("Hardware has no pitchlock\n");
    970		goto out_err0;
    971	}
    972
    973	dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops);
    974
    975	if (unlikely(dev_priv->tdev == NULL)) {
    976		drm_err(&dev_priv->drm,
    977			"Unable to initialize TTM object management.\n");
    978		ret = -ENOMEM;
    979		goto out_err0;
    980	}
    981
    982	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
    983		ret = vmw_irq_install(dev_priv);
    984		if (ret != 0) {
    985			drm_err(&dev_priv->drm,
    986				"Failed installing irq: %d\n", ret);
    987			goto out_no_irq;
    988		}
    989	}
    990
    991	dev_priv->fman = vmw_fence_manager_init(dev_priv);
    992	if (unlikely(dev_priv->fman == NULL)) {
    993		ret = -ENOMEM;
    994		goto out_no_fman;
    995	}
    996
    997	ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
    998			      dev_priv->drm.dev,
    999			      dev_priv->drm.anon_inode->i_mapping,
   1000			      dev_priv->drm.vma_offset_manager,
   1001			      dev_priv->map_mode == vmw_dma_alloc_coherent,
   1002			      false);
   1003	if (unlikely(ret != 0)) {
   1004		drm_err(&dev_priv->drm,
   1005			"Failed initializing TTM buffer object driver.\n");
   1006		goto out_no_bdev;
   1007	}
   1008
   1009	/*
   1010	 * Enable VRAM, but initially don't use it until SVGA is enabled and
   1011	 * unhidden.
   1012	 */
   1013
   1014	ret = vmw_vram_manager_init(dev_priv);
   1015	if (unlikely(ret != 0)) {
   1016		drm_err(&dev_priv->drm,
   1017			"Failed initializing memory manager for VRAM.\n");
   1018		goto out_no_vram;
   1019	}
   1020
   1021	ret = vmw_devcaps_create(dev_priv);
   1022	if (unlikely(ret != 0)) {
   1023		drm_err(&dev_priv->drm,
   1024			"Failed initializing device caps.\n");
   1025		goto out_no_vram;
   1026	}
   1027
   1028	/*
   1029	 * "Guest Memory Regions" is an aperture like feature with
   1030	 *  one slot per bo. There is an upper limit of the number of
   1031	 *  slots as well as the bo size.
   1032	 */
   1033	dev_priv->has_gmr = true;
   1034	/* TODO: This is most likely not correct */
   1035	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
   1036	    refuse_dma ||
   1037	    vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
   1038		drm_info(&dev_priv->drm,
   1039			  "No GMR memory available. "
   1040			 "Graphics memory resources are very limited.\n");
   1041		dev_priv->has_gmr = false;
   1042	}
   1043
   1044	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
   1045		dev_priv->has_mob = true;
   1046
   1047		if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
   1048			drm_info(&dev_priv->drm,
   1049				 "No MOB memory available. "
   1050				 "3D will be disabled.\n");
   1051			dev_priv->has_mob = false;
   1052		}
   1053		if (vmw_sys_man_init(dev_priv) != 0) {
   1054			drm_info(&dev_priv->drm,
   1055				 "No MOB page table memory available. "
   1056				 "3D will be disabled.\n");
   1057			dev_priv->has_mob = false;
   1058		}
   1059	}
   1060
   1061	if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
   1062		if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_DXCONTEXT))
   1063			dev_priv->sm_type = VMW_SM_4;
   1064	}
   1065
   1066	/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
   1067	if (has_sm4_context(dev_priv) &&
   1068	    (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
   1069		if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM41))
   1070			dev_priv->sm_type = VMW_SM_4_1;
   1071		if (has_sm4_1_context(dev_priv) &&
   1072				(dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
   1073			if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) {
   1074				dev_priv->sm_type = VMW_SM_5;
   1075				if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43))
   1076					dev_priv->sm_type = VMW_SM_5_1X;
   1077			}
   1078		}
   1079	}
   1080
   1081	ret = vmw_kms_init(dev_priv);
   1082	if (unlikely(ret != 0))
   1083		goto out_no_kms;
   1084	vmw_overlay_init(dev_priv);
   1085
   1086	ret = vmw_request_device(dev_priv);
   1087	if (ret)
   1088		goto out_no_fifo;
   1089
   1090	vmw_print_sm_type(dev_priv);
   1091	vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
   1092			VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
   1093			VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
   1094
   1095	if (dev_priv->enable_fb) {
   1096		vmw_fifo_resource_inc(dev_priv);
   1097		vmw_svga_enable(dev_priv);
   1098		vmw_fb_init(dev_priv);
   1099	}
   1100
   1101	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
   1102	register_pm_notifier(&dev_priv->pm_nb);
   1103
   1104	return 0;
   1105
   1106out_no_fifo:
   1107	vmw_overlay_close(dev_priv);
   1108	vmw_kms_close(dev_priv);
   1109out_no_kms:
   1110	if (dev_priv->has_mob) {
   1111		vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
   1112		vmw_sys_man_fini(dev_priv);
   1113	}
   1114	if (dev_priv->has_gmr)
   1115		vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
   1116	vmw_devcaps_destroy(dev_priv);
   1117	vmw_vram_manager_fini(dev_priv);
   1118out_no_vram:
   1119	ttm_device_fini(&dev_priv->bdev);
   1120out_no_bdev:
   1121	vmw_fence_manager_takedown(dev_priv->fman);
   1122out_no_fman:
   1123	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
   1124		vmw_irq_uninstall(&dev_priv->drm);
   1125out_no_irq:
   1126	ttm_object_device_release(&dev_priv->tdev);
   1127out_err0:
   1128	for (i = vmw_res_context; i < vmw_res_max; ++i)
   1129		idr_destroy(&dev_priv->res_idr[i]);
   1130
   1131	if (dev_priv->ctx.staged_bindings)
   1132		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
   1133out_no_pci_or_version:
   1134	pci_release_regions(pdev);
   1135	return ret;
   1136}
   1137
   1138static void vmw_driver_unload(struct drm_device *dev)
   1139{
   1140	struct vmw_private *dev_priv = vmw_priv(dev);
   1141	struct pci_dev *pdev = to_pci_dev(dev->dev);
   1142	enum vmw_res_type i;
   1143
   1144	unregister_pm_notifier(&dev_priv->pm_nb);
   1145
   1146	if (dev_priv->ctx.res_ht_initialized)
   1147		vmwgfx_ht_remove(&dev_priv->ctx.res_ht);
   1148	vfree(dev_priv->ctx.cmd_bounce);
   1149	if (dev_priv->enable_fb) {
   1150		vmw_fb_off(dev_priv);
   1151		vmw_fb_close(dev_priv);
   1152		vmw_fifo_resource_dec(dev_priv);
   1153		vmw_svga_disable(dev_priv);
   1154	}
   1155
   1156	vmw_kms_close(dev_priv);
   1157	vmw_overlay_close(dev_priv);
   1158
   1159	if (dev_priv->has_gmr)
   1160		vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
   1161
   1162	vmw_release_device_early(dev_priv);
   1163	if (dev_priv->has_mob) {
   1164		vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
   1165		vmw_sys_man_fini(dev_priv);
   1166	}
   1167	vmw_devcaps_destroy(dev_priv);
   1168	vmw_vram_manager_fini(dev_priv);
   1169	ttm_device_fini(&dev_priv->bdev);
   1170	vmw_release_device_late(dev_priv);
   1171	vmw_fence_manager_takedown(dev_priv->fman);
   1172	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
   1173		vmw_irq_uninstall(&dev_priv->drm);
   1174
   1175	ttm_object_device_release(&dev_priv->tdev);
   1176	if (dev_priv->ctx.staged_bindings)
   1177		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
   1178
   1179	for (i = vmw_res_context; i < vmw_res_max; ++i)
   1180		idr_destroy(&dev_priv->res_idr[i]);
   1181
   1182	vmw_mksstat_remove_all(dev_priv);
   1183
   1184	pci_release_regions(pdev);
   1185}
   1186
   1187static void vmw_postclose(struct drm_device *dev,
   1188			 struct drm_file *file_priv)
   1189{
   1190	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
   1191
   1192	ttm_object_file_release(&vmw_fp->tfile);
   1193	kfree(vmw_fp);
   1194}
   1195
   1196static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
   1197{
   1198	struct vmw_private *dev_priv = vmw_priv(dev);
   1199	struct vmw_fpriv *vmw_fp;
   1200	int ret = -ENOMEM;
   1201
   1202	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
   1203	if (unlikely(!vmw_fp))
   1204		return ret;
   1205
   1206	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
   1207	if (unlikely(vmw_fp->tfile == NULL))
   1208		goto out_no_tfile;
   1209
   1210	file_priv->driver_priv = vmw_fp;
   1211
   1212	return 0;
   1213
   1214out_no_tfile:
   1215	kfree(vmw_fp);
   1216	return ret;
   1217}
   1218
   1219static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
   1220			      unsigned long arg,
   1221			      long (*ioctl_func)(struct file *, unsigned int,
   1222						 unsigned long))
   1223{
   1224	struct drm_file *file_priv = filp->private_data;
   1225	struct drm_device *dev = file_priv->minor->dev;
   1226	unsigned int nr = DRM_IOCTL_NR(cmd);
   1227	unsigned int flags;
   1228
   1229	/*
   1230	 * Do extra checking on driver private ioctls.
   1231	 */
   1232
   1233	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
   1234	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
   1235		const struct drm_ioctl_desc *ioctl =
   1236			&vmw_ioctls[nr - DRM_COMMAND_BASE];
   1237
   1238		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
   1239			return ioctl_func(filp, cmd, arg);
   1240		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
   1241			if (!drm_is_current_master(file_priv) &&
   1242			    !capable(CAP_SYS_ADMIN))
   1243				return -EACCES;
   1244		}
   1245
   1246		if (unlikely(ioctl->cmd != cmd))
   1247			goto out_io_encoding;
   1248
   1249		flags = ioctl->flags;
   1250	} else if (!drm_ioctl_flags(nr, &flags))
   1251		return -EINVAL;
   1252
   1253	return ioctl_func(filp, cmd, arg);
   1254
   1255out_io_encoding:
   1256	DRM_ERROR("Invalid command format, ioctl %d\n",
   1257		  nr - DRM_COMMAND_BASE);
   1258
   1259	return -EINVAL;
   1260}
   1261
   1262static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
   1263			       unsigned long arg)
   1264{
   1265	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
   1266}
   1267
   1268#ifdef CONFIG_COMPAT
   1269static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
   1270			     unsigned long arg)
   1271{
   1272	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
   1273}
   1274#endif
   1275
   1276static void vmw_master_set(struct drm_device *dev,
   1277			   struct drm_file *file_priv,
   1278			   bool from_open)
   1279{
   1280	/*
   1281	 * Inform a new master that the layout may have changed while
   1282	 * it was gone.
   1283	 */
   1284	if (!from_open)
   1285		drm_sysfs_hotplug_event(dev);
   1286}
   1287
   1288static void vmw_master_drop(struct drm_device *dev,
   1289			    struct drm_file *file_priv)
   1290{
   1291	struct vmw_private *dev_priv = vmw_priv(dev);
   1292
   1293	vmw_kms_legacy_hotspot_clear(dev_priv);
   1294	if (!dev_priv->enable_fb)
   1295		vmw_svga_disable(dev_priv);
   1296}
   1297
   1298/**
   1299 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
   1300 *
   1301 * @dev_priv: Pointer to device private struct.
   1302 * Needs the reservation sem to be held in non-exclusive mode.
   1303 */
   1304static void __vmw_svga_enable(struct vmw_private *dev_priv)
   1305{
   1306	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
   1307
   1308	if (!ttm_resource_manager_used(man)) {
   1309		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
   1310		ttm_resource_manager_set_used(man, true);
   1311	}
   1312}
   1313
   1314/**
   1315 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
   1316 *
   1317 * @dev_priv: Pointer to device private struct.
   1318 */
   1319void vmw_svga_enable(struct vmw_private *dev_priv)
   1320{
   1321	__vmw_svga_enable(dev_priv);
   1322}
   1323
   1324/**
   1325 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
   1326 *
   1327 * @dev_priv: Pointer to device private struct.
   1328 * Needs the reservation sem to be held in exclusive mode.
   1329 * Will not empty VRAM. VRAM must be emptied by caller.
   1330 */
   1331static void __vmw_svga_disable(struct vmw_private *dev_priv)
   1332{
   1333	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
   1334
   1335	if (ttm_resource_manager_used(man)) {
   1336		ttm_resource_manager_set_used(man, false);
   1337		vmw_write(dev_priv, SVGA_REG_ENABLE,
   1338			  SVGA_REG_ENABLE_HIDE |
   1339			  SVGA_REG_ENABLE_ENABLE);
   1340	}
   1341}
   1342
   1343/**
   1344 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
   1345 * running.
   1346 *
   1347 * @dev_priv: Pointer to device private struct.
   1348 * Will empty VRAM.
   1349 */
   1350void vmw_svga_disable(struct vmw_private *dev_priv)
   1351{
   1352	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
   1353	/*
   1354	 * Disabling SVGA will turn off device modesetting capabilities, so
   1355	 * notify KMS about that so that it doesn't cache atomic state that
   1356	 * isn't valid anymore, for example crtcs turned on.
   1357	 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
   1358	 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
   1359	 * end up with lock order reversal. Thus, a master may actually perform
   1360	 * a new modeset just after we call vmw_kms_lost_device() and race with
   1361	 * vmw_svga_disable(), but that should at worst cause atomic KMS state
   1362	 * to be inconsistent with the device, causing modesetting problems.
   1363	 *
   1364	 */
   1365	vmw_kms_lost_device(&dev_priv->drm);
   1366	if (ttm_resource_manager_used(man)) {
   1367		if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
   1368			DRM_ERROR("Failed evicting VRAM buffers.\n");
   1369		ttm_resource_manager_set_used(man, false);
   1370		vmw_write(dev_priv, SVGA_REG_ENABLE,
   1371			  SVGA_REG_ENABLE_HIDE |
   1372			  SVGA_REG_ENABLE_ENABLE);
   1373	}
   1374}
   1375
   1376static void vmw_remove(struct pci_dev *pdev)
   1377{
   1378	struct drm_device *dev = pci_get_drvdata(pdev);
   1379
   1380	drm_dev_unregister(dev);
   1381	vmw_driver_unload(dev);
   1382}
   1383
   1384static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
   1385{
   1386	struct drm_minor *minor = vmw->drm.primary;
   1387	struct dentry *root = minor->debugfs_root;
   1388
   1389	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_SYSTEM),
   1390					    root, "system_ttm");
   1391	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
   1392					    root, "vram_ttm");
   1393	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
   1394					    root, "gmr_ttm");
   1395	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
   1396					    root, "mob_ttm");
   1397	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
   1398					    root, "system_mob_ttm");
   1399}
   1400
   1401static unsigned long
   1402vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
   1403		      unsigned long len, unsigned long pgoff,
   1404		      unsigned long flags)
   1405{
   1406	struct drm_file *file_priv = file->private_data;
   1407	struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
   1408
   1409	return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
   1410				     dev_priv->drm.vma_offset_manager);
   1411}
   1412
   1413static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
   1414			      void *ptr)
   1415{
   1416	struct vmw_private *dev_priv =
   1417		container_of(nb, struct vmw_private, pm_nb);
   1418
   1419	switch (val) {
   1420	case PM_HIBERNATION_PREPARE:
   1421		/*
   1422		 * Take the reservation sem in write mode, which will make sure
   1423		 * there are no other processes holding a buffer object
   1424		 * reservation, meaning we should be able to evict all buffer
   1425		 * objects if needed.
   1426		 * Once user-space processes have been frozen, we can release
   1427		 * the lock again.
   1428		 */
   1429		dev_priv->suspend_locked = true;
   1430		break;
   1431	case PM_POST_HIBERNATION:
   1432	case PM_POST_RESTORE:
   1433		if (READ_ONCE(dev_priv->suspend_locked)) {
   1434			dev_priv->suspend_locked = false;
   1435		}
   1436		break;
   1437	default:
   1438		break;
   1439	}
   1440	return 0;
   1441}
   1442
   1443static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
   1444{
   1445	struct drm_device *dev = pci_get_drvdata(pdev);
   1446	struct vmw_private *dev_priv = vmw_priv(dev);
   1447
   1448	if (dev_priv->refuse_hibernation)
   1449		return -EBUSY;
   1450
   1451	pci_save_state(pdev);
   1452	pci_disable_device(pdev);
   1453	pci_set_power_state(pdev, PCI_D3hot);
   1454	return 0;
   1455}
   1456
   1457static int vmw_pci_resume(struct pci_dev *pdev)
   1458{
   1459	pci_set_power_state(pdev, PCI_D0);
   1460	pci_restore_state(pdev);
   1461	return pci_enable_device(pdev);
   1462}
   1463
   1464static int vmw_pm_suspend(struct device *kdev)
   1465{
   1466	struct pci_dev *pdev = to_pci_dev(kdev);
   1467	struct pm_message dummy;
   1468
   1469	dummy.event = 0;
   1470
   1471	return vmw_pci_suspend(pdev, dummy);
   1472}
   1473
   1474static int vmw_pm_resume(struct device *kdev)
   1475{
   1476	struct pci_dev *pdev = to_pci_dev(kdev);
   1477
   1478	return vmw_pci_resume(pdev);
   1479}
   1480
   1481static int vmw_pm_freeze(struct device *kdev)
   1482{
   1483	struct pci_dev *pdev = to_pci_dev(kdev);
   1484	struct drm_device *dev = pci_get_drvdata(pdev);
   1485	struct vmw_private *dev_priv = vmw_priv(dev);
   1486	struct ttm_operation_ctx ctx = {
   1487		.interruptible = false,
   1488		.no_wait_gpu = false
   1489	};
   1490	int ret;
   1491
   1492	/*
   1493	 * No user-space processes should be running now.
   1494	 */
   1495	ret = vmw_kms_suspend(&dev_priv->drm);
   1496	if (ret) {
   1497		DRM_ERROR("Failed to freeze modesetting.\n");
   1498		return ret;
   1499	}
   1500	if (dev_priv->enable_fb)
   1501		vmw_fb_off(dev_priv);
   1502
   1503	vmw_execbuf_release_pinned_bo(dev_priv);
   1504	vmw_resource_evict_all(dev_priv);
   1505	vmw_release_device_early(dev_priv);
   1506	while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
   1507	if (dev_priv->enable_fb)
   1508		vmw_fifo_resource_dec(dev_priv);
   1509	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
   1510		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
   1511		if (dev_priv->enable_fb)
   1512			vmw_fifo_resource_inc(dev_priv);
   1513		WARN_ON(vmw_request_device_late(dev_priv));
   1514		dev_priv->suspend_locked = false;
   1515		if (dev_priv->suspend_state)
   1516			vmw_kms_resume(dev);
   1517		if (dev_priv->enable_fb)
   1518			vmw_fb_on(dev_priv);
   1519		return -EBUSY;
   1520	}
   1521
   1522	vmw_fence_fifo_down(dev_priv->fman);
   1523	__vmw_svga_disable(dev_priv);
   1524
   1525	vmw_release_device_late(dev_priv);
   1526	return 0;
   1527}
   1528
   1529static int vmw_pm_restore(struct device *kdev)
   1530{
   1531	struct pci_dev *pdev = to_pci_dev(kdev);
   1532	struct drm_device *dev = pci_get_drvdata(pdev);
   1533	struct vmw_private *dev_priv = vmw_priv(dev);
   1534	int ret;
   1535
   1536	vmw_detect_version(dev_priv);
   1537
   1538	if (dev_priv->enable_fb)
   1539		vmw_fifo_resource_inc(dev_priv);
   1540
   1541	ret = vmw_request_device(dev_priv);
   1542	if (ret)
   1543		return ret;
   1544
   1545	if (dev_priv->enable_fb)
   1546		__vmw_svga_enable(dev_priv);
   1547
   1548	vmw_fence_fifo_up(dev_priv->fman);
   1549	dev_priv->suspend_locked = false;
   1550	if (dev_priv->suspend_state)
   1551		vmw_kms_resume(&dev_priv->drm);
   1552
   1553	if (dev_priv->enable_fb)
   1554		vmw_fb_on(dev_priv);
   1555
   1556	return 0;
   1557}
   1558
   1559static const struct dev_pm_ops vmw_pm_ops = {
   1560	.freeze = vmw_pm_freeze,
   1561	.thaw = vmw_pm_restore,
   1562	.restore = vmw_pm_restore,
   1563	.suspend = vmw_pm_suspend,
   1564	.resume = vmw_pm_resume,
   1565};
   1566
   1567static const struct file_operations vmwgfx_driver_fops = {
   1568	.owner = THIS_MODULE,
   1569	.open = drm_open,
   1570	.release = drm_release,
   1571	.unlocked_ioctl = vmw_unlocked_ioctl,
   1572	.mmap = vmw_mmap,
   1573	.poll = drm_poll,
   1574	.read = drm_read,
   1575#if defined(CONFIG_COMPAT)
   1576	.compat_ioctl = vmw_compat_ioctl,
   1577#endif
   1578	.llseek = noop_llseek,
   1579	.get_unmapped_area = vmw_get_unmapped_area,
   1580};
   1581
   1582static const struct drm_driver driver = {
   1583	.driver_features =
   1584	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
   1585	.ioctls = vmw_ioctls,
   1586	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
   1587	.master_set = vmw_master_set,
   1588	.master_drop = vmw_master_drop,
   1589	.open = vmw_driver_open,
   1590	.postclose = vmw_postclose,
   1591
   1592	.dumb_create = vmw_dumb_create,
   1593	.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
   1594
   1595	.prime_fd_to_handle = vmw_prime_fd_to_handle,
   1596	.prime_handle_to_fd = vmw_prime_handle_to_fd,
   1597
   1598	.fops = &vmwgfx_driver_fops,
   1599	.name = VMWGFX_DRIVER_NAME,
   1600	.desc = VMWGFX_DRIVER_DESC,
   1601	.date = VMWGFX_DRIVER_DATE,
   1602	.major = VMWGFX_DRIVER_MAJOR,
   1603	.minor = VMWGFX_DRIVER_MINOR,
   1604	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
   1605};
   1606
   1607static struct pci_driver vmw_pci_driver = {
   1608	.name = VMWGFX_DRIVER_NAME,
   1609	.id_table = vmw_pci_id_list,
   1610	.probe = vmw_probe,
   1611	.remove = vmw_remove,
   1612	.driver = {
   1613		.pm = &vmw_pm_ops
   1614	}
   1615};
   1616
   1617static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
   1618{
   1619	struct vmw_private *vmw;
   1620	int ret;
   1621
   1622	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
   1623	if (ret)
   1624		goto out_error;
   1625
   1626	ret = pcim_enable_device(pdev);
   1627	if (ret)
   1628		goto out_error;
   1629
   1630	vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
   1631				 struct vmw_private, drm);
   1632	if (IS_ERR(vmw)) {
   1633		ret = PTR_ERR(vmw);
   1634		goto out_error;
   1635	}
   1636
   1637	pci_set_drvdata(pdev, &vmw->drm);
   1638
   1639	ret = vmw_driver_load(vmw, ent->device);
   1640	if (ret)
   1641		goto out_error;
   1642
   1643	ret = drm_dev_register(&vmw->drm, 0);
   1644	if (ret)
   1645		goto out_unload;
   1646
   1647	vmw_debugfs_gem_init(vmw);
   1648	vmw_debugfs_resource_managers_init(vmw);
   1649
   1650	return 0;
   1651out_unload:
   1652	vmw_driver_unload(&vmw->drm);
   1653out_error:
   1654	return ret;
   1655}
   1656
   1657drm_module_pci_driver(vmw_pci_driver);
   1658
   1659MODULE_AUTHOR("VMware Inc. and others");
   1660MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
   1661MODULE_LICENSE("GPL and additional rights");
   1662MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
   1663	       __stringify(VMWGFX_DRIVER_MINOR) "."
   1664	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
   1665	       "0");