cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vgpu.c (17829B)


      1/*
      2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     21 * SOFTWARE.
     22 *
     23 * Authors:
     24 *    Eddie Dong <eddie.dong@intel.com>
     25 *    Kevin Tian <kevin.tian@intel.com>
     26 *
     27 * Contributors:
     28 *    Ping Gao <ping.a.gao@intel.com>
     29 *    Zhi Wang <zhi.a.wang@intel.com>
     30 *    Bing Niu <bing.niu@intel.com>
     31 *
     32 */
     33
     34#include "i915_drv.h"
     35#include "gvt.h"
     36#include "i915_pvinfo.h"
     37
     38void populate_pvinfo_page(struct intel_vgpu *vgpu)
     39{
     40	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
     41	/* setup the ballooning information */
     42	vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
     43	vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
     44	vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
     45	vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
     46	vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
     47
     48	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
     49	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
     50	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
     51
     52	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
     53		vgpu_aperture_gmadr_base(vgpu);
     54	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
     55		vgpu_aperture_sz(vgpu);
     56	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
     57		vgpu_hidden_gmadr_base(vgpu);
     58	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
     59		vgpu_hidden_sz(vgpu);
     60
     61	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
     62
     63	vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
     64	vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
     65
     66	gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
     67	gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
     68		vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
     69	gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
     70		vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
     71	gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
     72
     73	drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
     74}
     75
     76#define VGPU_MAX_WEIGHT 16
     77#define VGPU_WEIGHT(vgpu_num)	\
     78	(VGPU_MAX_WEIGHT / (vgpu_num))
     79
     80static const struct {
     81	unsigned int low_mm;
     82	unsigned int high_mm;
     83	unsigned int fence;
     84
     85	/* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
     86	 * with a weight of 4 on a contended host, different vGPU type has
     87	 * different weight set. Legal weights range from 1 to 16.
     88	 */
     89	unsigned int weight;
     90	enum intel_vgpu_edid edid;
     91	const char *name;
     92} vgpu_types[] = {
     93/* Fixed vGPU type table */
     94	{ MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
     95	{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
     96	{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
     97	{ MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
     98};
     99
    100/**
    101 * intel_gvt_init_vgpu_types - initialize vGPU type list
    102 * @gvt : GVT device
    103 *
    104 * Initialize vGPU type list based on available resource.
    105 *
    106 */
    107int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
    108{
    109	unsigned int num_types;
    110	unsigned int i, low_avail, high_avail;
    111	unsigned int min_low;
    112
    113	/* vGPU type name is defined as GVTg_Vx_y which contains
    114	 * physical GPU generation type (e.g V4 as BDW server, V5 as
    115	 * SKL server).
    116	 *
    117	 * Depend on physical SKU resource, might see vGPU types like
    118	 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
    119	 * different types of vGPU on same physical GPU depending on
    120	 * available resource. Each vGPU type will have "avail_instance"
    121	 * to indicate how many vGPU instance can be created for this
    122	 * type.
    123	 *
    124	 */
    125	low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
    126	high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
    127	num_types = ARRAY_SIZE(vgpu_types);
    128
    129	gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
    130			     GFP_KERNEL);
    131	if (!gvt->types)
    132		return -ENOMEM;
    133
    134	min_low = MB_TO_BYTES(32);
    135	for (i = 0; i < num_types; ++i) {
    136		if (low_avail / vgpu_types[i].low_mm == 0)
    137			break;
    138
    139		gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
    140		gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
    141		gvt->types[i].fence = vgpu_types[i].fence;
    142
    143		if (vgpu_types[i].weight < 1 ||
    144					vgpu_types[i].weight > VGPU_MAX_WEIGHT)
    145			return -EINVAL;
    146
    147		gvt->types[i].weight = vgpu_types[i].weight;
    148		gvt->types[i].resolution = vgpu_types[i].edid;
    149		gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
    150						   high_avail / vgpu_types[i].high_mm);
    151
    152		if (GRAPHICS_VER(gvt->gt->i915) == 8)
    153			sprintf(gvt->types[i].name, "GVTg_V4_%s",
    154				vgpu_types[i].name);
    155		else if (GRAPHICS_VER(gvt->gt->i915) == 9)
    156			sprintf(gvt->types[i].name, "GVTg_V5_%s",
    157				vgpu_types[i].name);
    158
    159		gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
    160			     i, gvt->types[i].name,
    161			     gvt->types[i].avail_instance,
    162			     gvt->types[i].low_gm_size,
    163			     gvt->types[i].high_gm_size, gvt->types[i].fence,
    164			     gvt->types[i].weight,
    165			     vgpu_edid_str(gvt->types[i].resolution));
    166	}
    167
    168	gvt->num_types = i;
    169	return 0;
    170}
    171
    172void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
    173{
    174	kfree(gvt->types);
    175}
    176
    177static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
    178{
    179	int i;
    180	unsigned int low_gm_avail, high_gm_avail, fence_avail;
    181	unsigned int low_gm_min, high_gm_min, fence_min;
    182
    183	/* Need to depend on maxium hw resource size but keep on
    184	 * static config for now.
    185	 */
    186	low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
    187		gvt->gm.vgpu_allocated_low_gm_size;
    188	high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
    189		gvt->gm.vgpu_allocated_high_gm_size;
    190	fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
    191		gvt->fence.vgpu_allocated_fence_num;
    192
    193	for (i = 0; i < gvt->num_types; i++) {
    194		low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
    195		high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
    196		fence_min = fence_avail / gvt->types[i].fence;
    197		gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
    198						   fence_min);
    199
    200		gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
    201		       i, gvt->types[i].name,
    202		       gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
    203		       gvt->types[i].high_gm_size, gvt->types[i].fence);
    204	}
    205}
    206
    207/**
    208 * intel_gvt_active_vgpu - activate a virtual GPU
    209 * @vgpu: virtual GPU
    210 *
    211 * This function is called when user wants to activate a virtual GPU.
    212 *
    213 */
    214void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
    215{
    216	mutex_lock(&vgpu->vgpu_lock);
    217	vgpu->active = true;
    218	mutex_unlock(&vgpu->vgpu_lock);
    219}
    220
    221/**
    222 * intel_gvt_deactive_vgpu - deactivate a virtual GPU
    223 * @vgpu: virtual GPU
    224 *
    225 * This function is called when user wants to deactivate a virtual GPU.
    226 * The virtual GPU will be stopped.
    227 *
    228 */
    229void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
    230{
    231	mutex_lock(&vgpu->vgpu_lock);
    232
    233	vgpu->active = false;
    234
    235	if (atomic_read(&vgpu->submission.running_workload_num)) {
    236		mutex_unlock(&vgpu->vgpu_lock);
    237		intel_gvt_wait_vgpu_idle(vgpu);
    238		mutex_lock(&vgpu->vgpu_lock);
    239	}
    240
    241	intel_vgpu_stop_schedule(vgpu);
    242
    243	mutex_unlock(&vgpu->vgpu_lock);
    244}
    245
    246/**
    247 * intel_gvt_release_vgpu - release a virtual GPU
    248 * @vgpu: virtual GPU
    249 *
    250 * This function is called when user wants to release a virtual GPU.
    251 * The virtual GPU will be stopped and all runtime information will be
    252 * destroyed.
    253 *
    254 */
    255void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
    256{
    257	intel_gvt_deactivate_vgpu(vgpu);
    258
    259	mutex_lock(&vgpu->vgpu_lock);
    260	vgpu->d3_entered = false;
    261	intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
    262	intel_vgpu_dmabuf_cleanup(vgpu);
    263	mutex_unlock(&vgpu->vgpu_lock);
    264}
    265
    266/**
    267 * intel_gvt_destroy_vgpu - destroy a virtual GPU
    268 * @vgpu: virtual GPU
    269 *
    270 * This function is called when user wants to destroy a virtual GPU.
    271 *
    272 */
    273void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
    274{
    275	struct intel_gvt *gvt = vgpu->gvt;
    276	struct drm_i915_private *i915 = gvt->gt->i915;
    277
    278	drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
    279
    280	/*
    281	 * remove idr first so later clean can judge if need to stop
    282	 * service if no active vgpu.
    283	 */
    284	mutex_lock(&gvt->lock);
    285	idr_remove(&gvt->vgpu_idr, vgpu->id);
    286	mutex_unlock(&gvt->lock);
    287
    288	mutex_lock(&vgpu->vgpu_lock);
    289	intel_gvt_debugfs_remove_vgpu(vgpu);
    290	intel_vgpu_clean_sched_policy(vgpu);
    291	intel_vgpu_clean_submission(vgpu);
    292	intel_vgpu_clean_display(vgpu);
    293	intel_vgpu_clean_opregion(vgpu);
    294	intel_vgpu_reset_ggtt(vgpu, true);
    295	intel_vgpu_clean_gtt(vgpu);
    296	intel_vgpu_detach_regions(vgpu);
    297	intel_vgpu_free_resource(vgpu);
    298	intel_vgpu_clean_mmio(vgpu);
    299	intel_vgpu_dmabuf_cleanup(vgpu);
    300	mutex_unlock(&vgpu->vgpu_lock);
    301
    302	mutex_lock(&gvt->lock);
    303	intel_gvt_update_vgpu_types(gvt);
    304	mutex_unlock(&gvt->lock);
    305
    306	vfree(vgpu);
    307}
    308
    309#define IDLE_VGPU_IDR 0
    310
    311/**
    312 * intel_gvt_create_idle_vgpu - create an idle virtual GPU
    313 * @gvt: GVT device
    314 *
    315 * This function is called when user wants to create an idle virtual GPU.
    316 *
    317 * Returns:
    318 * pointer to intel_vgpu, error pointer if failed.
    319 */
    320struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
    321{
    322	struct intel_vgpu *vgpu;
    323	enum intel_engine_id i;
    324	int ret;
    325
    326	vgpu = vzalloc(sizeof(*vgpu));
    327	if (!vgpu)
    328		return ERR_PTR(-ENOMEM);
    329
    330	vgpu->id = IDLE_VGPU_IDR;
    331	vgpu->gvt = gvt;
    332	mutex_init(&vgpu->vgpu_lock);
    333
    334	for (i = 0; i < I915_NUM_ENGINES; i++)
    335		INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
    336
    337	ret = intel_vgpu_init_sched_policy(vgpu);
    338	if (ret)
    339		goto out_free_vgpu;
    340
    341	vgpu->active = false;
    342
    343	return vgpu;
    344
    345out_free_vgpu:
    346	vfree(vgpu);
    347	return ERR_PTR(ret);
    348}
    349
    350/**
    351 * intel_gvt_destroy_vgpu - destroy an idle virtual GPU
    352 * @vgpu: virtual GPU
    353 *
    354 * This function is called when user wants to destroy an idle virtual GPU.
    355 *
    356 */
    357void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
    358{
    359	mutex_lock(&vgpu->vgpu_lock);
    360	intel_vgpu_clean_sched_policy(vgpu);
    361	mutex_unlock(&vgpu->vgpu_lock);
    362
    363	vfree(vgpu);
    364}
    365
    366static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
    367		struct intel_vgpu_creation_params *param)
    368{
    369	struct drm_i915_private *dev_priv = gvt->gt->i915;
    370	struct intel_vgpu *vgpu;
    371	int ret;
    372
    373	gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
    374			param->low_gm_sz, param->high_gm_sz,
    375			param->fence_sz);
    376
    377	vgpu = vzalloc(sizeof(*vgpu));
    378	if (!vgpu)
    379		return ERR_PTR(-ENOMEM);
    380
    381	ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
    382		GFP_KERNEL);
    383	if (ret < 0)
    384		goto out_free_vgpu;
    385
    386	vgpu->id = ret;
    387	vgpu->gvt = gvt;
    388	vgpu->sched_ctl.weight = param->weight;
    389	mutex_init(&vgpu->vgpu_lock);
    390	mutex_init(&vgpu->dmabuf_lock);
    391	INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
    392	INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
    393	idr_init_base(&vgpu->object_idr, 1);
    394	intel_vgpu_init_cfg_space(vgpu, param->primary);
    395	vgpu->d3_entered = false;
    396
    397	ret = intel_vgpu_init_mmio(vgpu);
    398	if (ret)
    399		goto out_clean_idr;
    400
    401	ret = intel_vgpu_alloc_resource(vgpu, param);
    402	if (ret)
    403		goto out_clean_vgpu_mmio;
    404
    405	populate_pvinfo_page(vgpu);
    406
    407	ret = intel_vgpu_init_gtt(vgpu);
    408	if (ret)
    409		goto out_clean_vgpu_resource;
    410
    411	ret = intel_vgpu_init_opregion(vgpu);
    412	if (ret)
    413		goto out_clean_gtt;
    414
    415	ret = intel_vgpu_init_display(vgpu, param->resolution);
    416	if (ret)
    417		goto out_clean_opregion;
    418
    419	ret = intel_vgpu_setup_submission(vgpu);
    420	if (ret)
    421		goto out_clean_display;
    422
    423	ret = intel_vgpu_init_sched_policy(vgpu);
    424	if (ret)
    425		goto out_clean_submission;
    426
    427	intel_gvt_debugfs_add_vgpu(vgpu);
    428
    429	ret = intel_gvt_set_opregion(vgpu);
    430	if (ret)
    431		goto out_clean_sched_policy;
    432
    433	if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
    434		ret = intel_gvt_set_edid(vgpu, PORT_B);
    435	else
    436		ret = intel_gvt_set_edid(vgpu, PORT_D);
    437	if (ret)
    438		goto out_clean_sched_policy;
    439
    440	return vgpu;
    441
    442out_clean_sched_policy:
    443	intel_vgpu_clean_sched_policy(vgpu);
    444out_clean_submission:
    445	intel_vgpu_clean_submission(vgpu);
    446out_clean_display:
    447	intel_vgpu_clean_display(vgpu);
    448out_clean_opregion:
    449	intel_vgpu_clean_opregion(vgpu);
    450out_clean_gtt:
    451	intel_vgpu_clean_gtt(vgpu);
    452out_clean_vgpu_resource:
    453	intel_vgpu_free_resource(vgpu);
    454out_clean_vgpu_mmio:
    455	intel_vgpu_clean_mmio(vgpu);
    456out_clean_idr:
    457	idr_remove(&gvt->vgpu_idr, vgpu->id);
    458out_free_vgpu:
    459	vfree(vgpu);
    460	return ERR_PTR(ret);
    461}
    462
    463/**
    464 * intel_gvt_create_vgpu - create a virtual GPU
    465 * @gvt: GVT device
    466 * @type: type of the vGPU to create
    467 *
    468 * This function is called when user wants to create a virtual GPU.
    469 *
    470 * Returns:
    471 * pointer to intel_vgpu, error pointer if failed.
    472 */
    473struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
    474				struct intel_vgpu_type *type)
    475{
    476	struct intel_vgpu_creation_params param;
    477	struct intel_vgpu *vgpu;
    478
    479	param.primary = 1;
    480	param.low_gm_sz = type->low_gm_size;
    481	param.high_gm_sz = type->high_gm_size;
    482	param.fence_sz = type->fence;
    483	param.weight = type->weight;
    484	param.resolution = type->resolution;
    485
    486	/* XXX current param based on MB */
    487	param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
    488	param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
    489
    490	mutex_lock(&gvt->lock);
    491	vgpu = __intel_gvt_create_vgpu(gvt, &param);
    492	if (!IS_ERR(vgpu)) {
    493		/* calculate left instance change for types */
    494		intel_gvt_update_vgpu_types(gvt);
    495		intel_gvt_update_reg_whitelist(vgpu);
    496	}
    497	mutex_unlock(&gvt->lock);
    498
    499	return vgpu;
    500}
    501
    502/**
    503 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
    504 * @vgpu: virtual GPU
    505 * @dmlr: vGPU Device Model Level Reset or GT Reset
    506 * @engine_mask: engines to reset for GT reset
    507 *
    508 * This function is called when user wants to reset a virtual GPU through
    509 * device model reset or GT reset. The caller should hold the vgpu lock.
    510 *
    511 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
    512 * the whole vGPU to default state as when it is created. This vGPU function
    513 * is required both for functionary and security concerns.The ultimate goal
    514 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
    515 * assign a vGPU to a virtual machine we must isse such reset first.
    516 *
    517 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
    518 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
    519 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
    520 * the reset request. Guest driver can issue a GT reset by programming the
    521 * virtual GDRST register to reset specific virtual GPU engine or all
    522 * engines.
    523 *
    524 * The parameter dev_level is to identify if we will do DMLR or GT reset.
    525 * The parameter engine_mask is to specific the engines that need to be
    526 * resetted. If value ALL_ENGINES is given for engine_mask, it means
    527 * the caller requests a full GT reset that we will reset all virtual
    528 * GPU engines. For FLR, engine_mask is ignored.
    529 */
    530void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
    531				 intel_engine_mask_t engine_mask)
    532{
    533	struct intel_gvt *gvt = vgpu->gvt;
    534	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
    535	intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
    536
    537	gvt_dbg_core("------------------------------------------\n");
    538	gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
    539		     vgpu->id, dmlr, engine_mask);
    540
    541	vgpu->resetting_eng = resetting_eng;
    542
    543	intel_vgpu_stop_schedule(vgpu);
    544	/*
    545	 * The current_vgpu will set to NULL after stopping the
    546	 * scheduler when the reset is triggered by current vgpu.
    547	 */
    548	if (scheduler->current_vgpu == NULL) {
    549		mutex_unlock(&vgpu->vgpu_lock);
    550		intel_gvt_wait_vgpu_idle(vgpu);
    551		mutex_lock(&vgpu->vgpu_lock);
    552	}
    553
    554	intel_vgpu_reset_submission(vgpu, resetting_eng);
    555	/* full GPU reset or device model level reset */
    556	if (engine_mask == ALL_ENGINES || dmlr) {
    557		intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
    558		if (engine_mask == ALL_ENGINES)
    559			intel_vgpu_invalidate_ppgtt(vgpu);
    560		/*fence will not be reset during virtual reset */
    561		if (dmlr) {
    562			if(!vgpu->d3_entered) {
    563				intel_vgpu_invalidate_ppgtt(vgpu);
    564				intel_vgpu_destroy_all_ppgtt_mm(vgpu);
    565			}
    566			intel_vgpu_reset_ggtt(vgpu, true);
    567			intel_vgpu_reset_resource(vgpu);
    568		}
    569
    570		intel_vgpu_reset_mmio(vgpu, dmlr);
    571		populate_pvinfo_page(vgpu);
    572
    573		if (dmlr) {
    574			intel_vgpu_reset_display(vgpu);
    575			intel_vgpu_reset_cfg_space(vgpu);
    576			/* only reset the failsafe mode when dmlr reset */
    577			vgpu->failsafe = false;
    578			/*
    579			 * PCI_D0 is set before dmlr, so reset d3_entered here
    580			 * after done using.
    581			 */
    582			if(vgpu->d3_entered)
    583				vgpu->d3_entered = false;
    584			else
    585				vgpu->pv_notified = false;
    586		}
    587	}
    588
    589	vgpu->resetting_eng = 0;
    590	gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
    591	gvt_dbg_core("------------------------------------------\n");
    592}
    593
    594/**
    595 * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
    596 * @vgpu: virtual GPU
    597 *
    598 * This function is called when user wants to reset a virtual GPU.
    599 *
    600 */
    601void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
    602{
    603	mutex_lock(&vgpu->vgpu_lock);
    604	intel_gvt_reset_vgpu_locked(vgpu, true, 0);
    605	mutex_unlock(&vgpu->vgpu_lock);
    606}