cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

handlers.c (97590B)


      1/*
      2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     21 * SOFTWARE.
     22 *
     23 * Authors:
     24 *    Kevin Tian <kevin.tian@intel.com>
     25 *    Eddie Dong <eddie.dong@intel.com>
     26 *    Zhiyuan Lv <zhiyuan.lv@intel.com>
     27 *
     28 * Contributors:
     29 *    Min He <min.he@intel.com>
     30 *    Tina Zhang <tina.zhang@intel.com>
     31 *    Pei Zhang <pei.zhang@intel.com>
     32 *    Niu Bing <bing.niu@intel.com>
     33 *    Ping Gao <ping.a.gao@intel.com>
     34 *    Zhi Wang <zhi.a.wang@intel.com>
     35 *
     36
     37 */
     38
     39#include "i915_drv.h"
     40#include "i915_reg.h"
     41#include "gvt.h"
     42#include "i915_pvinfo.h"
     43#include "intel_mchbar_regs.h"
     44#include "display/intel_display_types.h"
     45#include "display/intel_dmc_regs.h"
     46#include "display/intel_fbc.h"
     47#include "display/vlv_dsi_pll_regs.h"
     48#include "gt/intel_gt_regs.h"
     49
     50/* XXX FIXME i915 has changed PP_XXX definition */
     51#define PCH_PP_STATUS  _MMIO(0xc7200)
     52#define PCH_PP_CONTROL _MMIO(0xc7204)
     53#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
     54#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
     55#define PCH_PP_DIVISOR _MMIO(0xc7210)
     56
     57unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
     58{
     59	struct drm_i915_private *i915 = gvt->gt->i915;
     60
     61	if (IS_BROADWELL(i915))
     62		return D_BDW;
     63	else if (IS_SKYLAKE(i915))
     64		return D_SKL;
     65	else if (IS_KABYLAKE(i915))
     66		return D_KBL;
     67	else if (IS_BROXTON(i915))
     68		return D_BXT;
     69	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
     70		return D_CFL;
     71
     72	return 0;
     73}
     74
     75static bool intel_gvt_match_device(struct intel_gvt *gvt,
     76		unsigned long device)
     77{
     78	return intel_gvt_get_device_type(gvt) & device;
     79}
     80
     81static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
     82	void *p_data, unsigned int bytes)
     83{
     84	memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
     85}
     86
     87static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
     88	void *p_data, unsigned int bytes)
     89{
     90	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
     91}
     92
     93struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
     94						  unsigned int offset)
     95{
     96	struct intel_gvt_mmio_info *e;
     97
     98	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
     99		if (e->offset == offset)
    100			return e;
    101	}
    102	return NULL;
    103}
    104
    105static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size,
    106			   u16 flags, u32 addr_mask, u32 ro_mask, u32 device,
    107			   gvt_mmio_func read, gvt_mmio_func write)
    108{
    109	struct intel_gvt_mmio_info *p;
    110	u32 start, end, i;
    111
    112	if (!intel_gvt_match_device(gvt, device))
    113		return 0;
    114
    115	if (WARN_ON(!IS_ALIGNED(offset, 4)))
    116		return -EINVAL;
    117
    118	start = offset;
    119	end = offset + size;
    120
    121	for (i = start; i < end; i += 4) {
    122		p = intel_gvt_find_mmio_info(gvt, i);
    123		if (!p) {
    124			WARN(1, "assign a handler to a non-tracked mmio %x\n",
    125				i);
    126			return -ENODEV;
    127		}
    128		p->ro_mask = ro_mask;
    129		gvt->mmio.mmio_attribute[i / 4] = flags;
    130		if (read)
    131			p->read = read;
    132		if (write)
    133			p->write = write;
    134	}
    135	return 0;
    136}
    137
    138/**
    139 * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
    140 * @gvt: a GVT device
    141 * @offset: register offset
    142 *
    143 * Returns:
    144 * The engine containing the offset within its mmio page.
    145 */
    146const struct intel_engine_cs *
    147intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
    148{
    149	struct intel_engine_cs *engine;
    150	enum intel_engine_id id;
    151
    152	offset &= ~GENMASK(11, 0);
    153	for_each_engine(engine, gvt->gt, id)
    154		if (engine->mmio_base == offset)
    155			return engine;
    156
    157	return NULL;
    158}
    159
    160#define offset_to_fence_num(offset) \
    161	((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
    162
    163#define fence_num_to_offset(num) \
    164	(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
    165
    166
    167void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
    168{
    169	switch (reason) {
    170	case GVT_FAILSAFE_UNSUPPORTED_GUEST:
    171		pr_err("Detected your guest driver doesn't support GVT-g.\n");
    172		break;
    173	case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
    174		pr_err("Graphics resource is not enough for the guest\n");
    175		break;
    176	case GVT_FAILSAFE_GUEST_ERR:
    177		pr_err("GVT Internal error  for the guest\n");
    178		break;
    179	default:
    180		break;
    181	}
    182	pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
    183	vgpu->failsafe = true;
    184}
    185
    186static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
    187		unsigned int fence_num, void *p_data, unsigned int bytes)
    188{
    189	unsigned int max_fence = vgpu_fence_sz(vgpu);
    190
    191	if (fence_num >= max_fence) {
    192		gvt_vgpu_err("access oob fence reg %d/%d\n",
    193			     fence_num, max_fence);
    194
    195		/* When guest access oob fence regs without access
    196		 * pv_info first, we treat guest not supporting GVT,
    197		 * and we will let vgpu enter failsafe mode.
    198		 */
    199		if (!vgpu->pv_notified)
    200			enter_failsafe_mode(vgpu,
    201					GVT_FAILSAFE_UNSUPPORTED_GUEST);
    202
    203		memset(p_data, 0, bytes);
    204		return -EINVAL;
    205	}
    206	return 0;
    207}
    208
    209static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
    210		unsigned int offset, void *p_data, unsigned int bytes)
    211{
    212	u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
    213
    214	if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
    215		if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
    216			gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
    217		else if (!ips)
    218			gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
    219		else {
    220			/* All engines must be enabled together for vGPU,
    221			 * since we don't know which engine the ppgtt will
    222			 * bind to when shadowing.
    223			 */
    224			gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
    225				     ips);
    226			return -EINVAL;
    227		}
    228	}
    229
    230	write_vreg(vgpu, offset, p_data, bytes);
    231	return 0;
    232}
    233
    234static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
    235		void *p_data, unsigned int bytes)
    236{
    237	int ret;
    238
    239	ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
    240			p_data, bytes);
    241	if (ret)
    242		return ret;
    243	read_vreg(vgpu, off, p_data, bytes);
    244	return 0;
    245}
    246
    247static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
    248		void *p_data, unsigned int bytes)
    249{
    250	struct intel_gvt *gvt = vgpu->gvt;
    251	unsigned int fence_num = offset_to_fence_num(off);
    252	int ret;
    253
    254	ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
    255	if (ret)
    256		return ret;
    257	write_vreg(vgpu, off, p_data, bytes);
    258
    259	mmio_hw_access_pre(gvt->gt);
    260	intel_vgpu_write_fence(vgpu, fence_num,
    261			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
    262	mmio_hw_access_post(gvt->gt);
    263	return 0;
    264}
    265
    266#define CALC_MODE_MASK_REG(old, new) \
    267	(((new) & GENMASK(31, 16)) \
    268	 | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
    269	 | ((new) & ((new) >> 16))))
    270
    271static int mul_force_wake_write(struct intel_vgpu *vgpu,
    272		unsigned int offset, void *p_data, unsigned int bytes)
    273{
    274	u32 old, new;
    275	u32 ack_reg_offset;
    276
    277	old = vgpu_vreg(vgpu, offset);
    278	new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
    279
    280	if (GRAPHICS_VER(vgpu->gvt->gt->i915)  >=  9) {
    281		switch (offset) {
    282		case FORCEWAKE_RENDER_GEN9_REG:
    283			ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
    284			break;
    285		case FORCEWAKE_GT_GEN9_REG:
    286			ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG;
    287			break;
    288		case FORCEWAKE_MEDIA_GEN9_REG:
    289			ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
    290			break;
    291		default:
    292			/*should not hit here*/
    293			gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
    294			return -EINVAL;
    295		}
    296	} else {
    297		ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
    298	}
    299
    300	vgpu_vreg(vgpu, offset) = new;
    301	vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
    302	return 0;
    303}
    304
    305static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    306			    void *p_data, unsigned int bytes)
    307{
    308	intel_engine_mask_t engine_mask = 0;
    309	u32 data;
    310
    311	write_vreg(vgpu, offset, p_data, bytes);
    312	data = vgpu_vreg(vgpu, offset);
    313
    314	if (data & GEN6_GRDOM_FULL) {
    315		gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
    316		engine_mask = ALL_ENGINES;
    317	} else {
    318		if (data & GEN6_GRDOM_RENDER) {
    319			gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
    320			engine_mask |= BIT(RCS0);
    321		}
    322		if (data & GEN6_GRDOM_MEDIA) {
    323			gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
    324			engine_mask |= BIT(VCS0);
    325		}
    326		if (data & GEN6_GRDOM_BLT) {
    327			gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
    328			engine_mask |= BIT(BCS0);
    329		}
    330		if (data & GEN6_GRDOM_VECS) {
    331			gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
    332			engine_mask |= BIT(VECS0);
    333		}
    334		if (data & GEN8_GRDOM_MEDIA2) {
    335			gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
    336			engine_mask |= BIT(VCS1);
    337		}
    338		if (data & GEN9_GRDOM_GUC) {
    339			gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
    340			vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
    341		}
    342		engine_mask &= vgpu->gvt->gt->info.engine_mask;
    343	}
    344
    345	/* vgpu_lock already hold by emulate mmio r/w */
    346	intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
    347
    348	/* sw will wait for the device to ack the reset request */
    349	vgpu_vreg(vgpu, offset) = 0;
    350
    351	return 0;
    352}
    353
    354static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
    355		void *p_data, unsigned int bytes)
    356{
    357	return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
    358}
    359
    360static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    361		void *p_data, unsigned int bytes)
    362{
    363	return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
    364}
    365
    366static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
    367		unsigned int offset, void *p_data, unsigned int bytes)
    368{
    369	write_vreg(vgpu, offset, p_data, bytes);
    370
    371	if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
    372		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
    373		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
    374		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
    375		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
    376
    377	} else
    378		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
    379			~(PP_ON | PP_SEQUENCE_POWER_DOWN
    380					| PP_CYCLE_DELAY_ACTIVE);
    381	return 0;
    382}
    383
    384static int transconf_mmio_write(struct intel_vgpu *vgpu,
    385		unsigned int offset, void *p_data, unsigned int bytes)
    386{
    387	write_vreg(vgpu, offset, p_data, bytes);
    388
    389	if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
    390		vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
    391	else
    392		vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
    393	return 0;
    394}
    395
    396static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    397		void *p_data, unsigned int bytes)
    398{
    399	write_vreg(vgpu, offset, p_data, bytes);
    400
    401	if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
    402		vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
    403	else
    404		vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
    405
    406	if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
    407		vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
    408	else
    409		vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
    410
    411	return 0;
    412}
    413
    414static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
    415		void *p_data, unsigned int bytes)
    416{
    417	switch (offset) {
    418	case 0xe651c:
    419	case 0xe661c:
    420	case 0xe671c:
    421	case 0xe681c:
    422		vgpu_vreg(vgpu, offset) = 1 << 17;
    423		break;
    424	case 0xe6c04:
    425		vgpu_vreg(vgpu, offset) = 0x3;
    426		break;
    427	case 0xe6e1c:
    428		vgpu_vreg(vgpu, offset) = 0x2f << 16;
    429		break;
    430	default:
    431		return -EINVAL;
    432	}
    433
    434	read_vreg(vgpu, offset, p_data, bytes);
    435	return 0;
    436}
    437
    438/*
    439 * Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to
    440 *   TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on
    441 *   setup_virtual_dp_monitor().
    442 * emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled
    443 *   DPLL. Later guest driver may setup a different DPLLx when setting mode.
    444 * So the correct sequence to find DP stream clock is:
    445 *   Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x.
    446 *   Check correct PLLx for PORT_x to get PLL frequency and DP bitrate.
    447 * Then Refresh rate then can be calculated based on follow equations:
    448 *   Pixel clock = h_total * v_total * refresh_rate
    449 *   stream clock = Pixel clock
    450 *   ls_clk = DP bitrate
    451 *   Link M/N = strm_clk / ls_clk
    452 */
    453
    454static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
    455{
    456	u32 dp_br = 0;
    457	u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
    458
    459	switch (ddi_pll_sel) {
    460	case PORT_CLK_SEL_LCPLL_2700:
    461		dp_br = 270000 * 2;
    462		break;
    463	case PORT_CLK_SEL_LCPLL_1350:
    464		dp_br = 135000 * 2;
    465		break;
    466	case PORT_CLK_SEL_LCPLL_810:
    467		dp_br = 81000 * 2;
    468		break;
    469	case PORT_CLK_SEL_SPLL:
    470	{
    471		switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
    472		case SPLL_FREQ_810MHz:
    473			dp_br = 81000 * 2;
    474			break;
    475		case SPLL_FREQ_1350MHz:
    476			dp_br = 135000 * 2;
    477			break;
    478		case SPLL_FREQ_2700MHz:
    479			dp_br = 270000 * 2;
    480			break;
    481		default:
    482			gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
    483				    vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
    484			break;
    485		}
    486		break;
    487	}
    488	case PORT_CLK_SEL_WRPLL1:
    489	case PORT_CLK_SEL_WRPLL2:
    490	{
    491		u32 wrpll_ctl;
    492		int refclk, n, p, r;
    493
    494		if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1)
    495			wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
    496		else
    497			wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
    498
    499		switch (wrpll_ctl & WRPLL_REF_MASK) {
    500		case WRPLL_REF_PCH_SSC:
    501			refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc;
    502			break;
    503		case WRPLL_REF_LCPLL:
    504			refclk = 2700000;
    505			break;
    506		default:
    507			gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
    508				    vgpu->id, port_name(port), wrpll_ctl);
    509			goto out;
    510		}
    511
    512		r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK;
    513		p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
    514		n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
    515
    516		dp_br = (refclk * n / 10) / (p * r) * 2;
    517		break;
    518	}
    519	default:
    520		gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
    521			    vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
    522		break;
    523	}
    524
    525out:
    526	return dp_br;
    527}
    528
    529static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
    530{
    531	u32 dp_br = 0;
    532	int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc;
    533	enum dpio_phy phy = DPIO_PHY0;
    534	enum dpio_channel ch = DPIO_CH0;
    535	struct dpll clock = {0};
    536	u32 temp;
    537
    538	/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
    539	switch (port) {
    540	case PORT_A:
    541		phy = DPIO_PHY1;
    542		ch = DPIO_CH0;
    543		break;
    544	case PORT_B:
    545		phy = DPIO_PHY0;
    546		ch = DPIO_CH0;
    547		break;
    548	case PORT_C:
    549		phy = DPIO_PHY0;
    550		ch = DPIO_CH1;
    551		break;
    552	default:
    553		gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
    554		goto out;
    555	}
    556
    557	temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
    558	if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) {
    559		gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
    560			    vgpu->id, port_name(port), temp);
    561		goto out;
    562	}
    563
    564	clock.m1 = 2;
    565	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK,
    566				 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
    567	if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
    568		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
    569					  vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
    570	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK,
    571				vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
    572	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK,
    573				 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
    574	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK,
    575				 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
    576	clock.m = clock.m1 * clock.m2;
    577	clock.p = clock.p1 * clock.p2 * 5;
    578
    579	if (clock.n == 0 || clock.p == 0) {
    580		gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
    581		goto out;
    582	}
    583
    584	clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
    585	clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
    586
    587	dp_br = clock.dot;
    588
    589out:
    590	return dp_br;
    591}
    592
    593static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
    594{
    595	u32 dp_br = 0;
    596	enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0;
    597
    598	/* Find the enabled DPLL for the DDI/PORT */
    599	if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
    600	    (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
    601		dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
    602			DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
    603			DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
    604	} else {
    605		gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
    606			    vgpu->id, port_name(port));
    607		return dp_br;
    608	}
    609
    610	/* Find PLL output frequency from correct DPLL, and get bir rate */
    611	switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
    612		DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >>
    613		DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) {
    614		case DPLL_CTRL1_LINK_RATE_810:
    615			dp_br = 81000 * 2;
    616			break;
    617		case DPLL_CTRL1_LINK_RATE_1080:
    618			dp_br = 108000 * 2;
    619			break;
    620		case DPLL_CTRL1_LINK_RATE_1350:
    621			dp_br = 135000 * 2;
    622			break;
    623		case DPLL_CTRL1_LINK_RATE_1620:
    624			dp_br = 162000 * 2;
    625			break;
    626		case DPLL_CTRL1_LINK_RATE_2160:
    627			dp_br = 216000 * 2;
    628			break;
    629		case DPLL_CTRL1_LINK_RATE_2700:
    630			dp_br = 270000 * 2;
    631			break;
    632		default:
    633			dp_br = 0;
    634			gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
    635				    vgpu->id, port_name(port), dpll_id);
    636	}
    637
    638	return dp_br;
    639}
    640
    641static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
    642{
    643	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
    644	enum port port;
    645	u32 dp_br, link_m, link_n, htotal, vtotal;
    646
    647	/* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
    648	port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &
    649		TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
    650	if (port != PORT_B && port != PORT_D) {
    651		gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
    652		return;
    653	}
    654
    655	/* Calculate DP bitrate from PLL */
    656	if (IS_BROADWELL(dev_priv))
    657		dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
    658	else if (IS_BROXTON(dev_priv))
    659		dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
    660	else
    661		dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
    662
    663	/* Get DP link symbol clock M/N */
    664	link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A));
    665	link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
    666
    667	/* Get H/V total from transcoder timing */
    668	htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
    669	vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
    670
    671	if (dp_br && link_n && htotal && vtotal) {
    672		u64 pixel_clk = 0;
    673		u32 new_rate = 0;
    674		u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
    675
    676		/* Calcuate pixel clock by (ls_clk * M / N) */
    677		pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
    678		pixel_clk *= MSEC_PER_SEC;
    679
    680		/* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */
    681		new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
    682
    683		if (*old_rate != new_rate)
    684			*old_rate = new_rate;
    685
    686		gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
    687			    vgpu->id, pipe_name(PIPE_A), new_rate);
    688	}
    689}
    690
    691static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    692		void *p_data, unsigned int bytes)
    693{
    694	u32 data;
    695
    696	write_vreg(vgpu, offset, p_data, bytes);
    697	data = vgpu_vreg(vgpu, offset);
    698
    699	if (data & PIPECONF_ENABLE) {
    700		vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE;
    701		vgpu_update_refresh_rate(vgpu);
    702		vgpu_update_vblank_emulation(vgpu, true);
    703	} else {
    704		vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE;
    705		vgpu_update_vblank_emulation(vgpu, false);
    706	}
    707	return 0;
    708}
    709
    710/* sorted in ascending order */
    711static i915_reg_t force_nonpriv_white_list[] = {
    712	_MMIO(0xd80),
    713	GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
    714	GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
    715	CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
    716	PS_INVOCATION_COUNT, //_MMIO(0x2348)
    717	PS_DEPTH_COUNT, //_MMIO(0x2350)
    718	GEN8_CS_CHICKEN1,//_MMIO(0x2580)
    719	_MMIO(0x2690),
    720	_MMIO(0x2694),
    721	_MMIO(0x2698),
    722	_MMIO(0x2754),
    723	_MMIO(0x28a0),
    724	_MMIO(0x4de0),
    725	_MMIO(0x4de4),
    726	_MMIO(0x4dfc),
    727	GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
    728	_MMIO(0x7014),
    729	HDC_CHICKEN0,//_MMIO(0x7300)
    730	GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
    731	_MMIO(0x7700),
    732	_MMIO(0x7704),
    733	_MMIO(0x7708),
    734	_MMIO(0x770c),
    735	_MMIO(0x83a8),
    736	_MMIO(0xb110),
    737	GEN8_L3SQCREG4,//_MMIO(0xb118)
    738	_MMIO(0xe100),
    739	_MMIO(0xe18c),
    740	_MMIO(0xe48c),
    741	_MMIO(0xe5f4),
    742	_MMIO(0x64844),
    743};
    744
    745/* a simple bsearch */
    746static inline bool in_whitelist(u32 reg)
    747{
    748	int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
    749	i915_reg_t *array = force_nonpriv_white_list;
    750
    751	while (left < right) {
    752		int mid = (left + right)/2;
    753
    754		if (reg > array[mid].reg)
    755			left = mid + 1;
    756		else if (reg < array[mid].reg)
    757			right = mid;
    758		else
    759			return true;
    760	}
    761	return false;
    762}
    763
    764static int force_nonpriv_write(struct intel_vgpu *vgpu,
    765	unsigned int offset, void *p_data, unsigned int bytes)
    766{
    767	u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
    768	const struct intel_engine_cs *engine =
    769		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
    770
    771	if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
    772		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
    773			vgpu->id, offset, bytes);
    774		return -EINVAL;
    775	}
    776
    777	if (!in_whitelist(reg_nonpriv) &&
    778	    reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
    779		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
    780			vgpu->id, reg_nonpriv, offset);
    781	} else
    782		intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
    783
    784	return 0;
    785}
    786
    787static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    788		void *p_data, unsigned int bytes)
    789{
    790	write_vreg(vgpu, offset, p_data, bytes);
    791
    792	if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
    793		vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
    794	} else {
    795		vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
    796		if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
    797			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
    798				&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
    799	}
    800	return 0;
    801}
    802
    803static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
    804		unsigned int offset, void *p_data, unsigned int bytes)
    805{
    806	vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
    807	return 0;
    808}
    809
    810#define FDI_LINK_TRAIN_PATTERN1         0
    811#define FDI_LINK_TRAIN_PATTERN2         1
    812
    813static int fdi_auto_training_started(struct intel_vgpu *vgpu)
    814{
    815	u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
    816	u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
    817	u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
    818
    819	if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
    820			(rx_ctl & FDI_RX_ENABLE) &&
    821			(rx_ctl & FDI_AUTO_TRAINING) &&
    822			(tx_ctl & DP_TP_CTL_ENABLE) &&
    823			(tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
    824		return 1;
    825	else
    826		return 0;
    827}
    828
    829static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
    830		enum pipe pipe, unsigned int train_pattern)
    831{
    832	i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
    833	unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
    834	unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
    835	unsigned int fdi_iir_check_bits;
    836
    837	fdi_rx_imr = FDI_RX_IMR(pipe);
    838	fdi_tx_ctl = FDI_TX_CTL(pipe);
    839	fdi_rx_ctl = FDI_RX_CTL(pipe);
    840
    841	if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
    842		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
    843		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
    844		fdi_iir_check_bits = FDI_RX_BIT_LOCK;
    845	} else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
    846		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
    847		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
    848		fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
    849	} else {
    850		gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
    851		return -EINVAL;
    852	}
    853
    854	fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
    855	fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
    856
    857	/* If imr bit has been masked */
    858	if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
    859		return 0;
    860
    861	if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
    862			== fdi_tx_check_bits)
    863		&& ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
    864			== fdi_rx_check_bits))
    865		return 1;
    866	else
    867		return 0;
    868}
    869
    870#define INVALID_INDEX (~0U)
    871
    872static unsigned int calc_index(unsigned int offset, unsigned int start,
    873	unsigned int next, unsigned int end, i915_reg_t i915_end)
    874{
    875	unsigned int range = next - start;
    876
    877	if (!end)
    878		end = i915_mmio_reg_offset(i915_end);
    879	if (offset < start || offset > end)
    880		return INVALID_INDEX;
    881	offset -= start;
    882	return offset / range;
    883}
    884
    885#define FDI_RX_CTL_TO_PIPE(offset) \
    886	calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
    887
    888#define FDI_TX_CTL_TO_PIPE(offset) \
    889	calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
    890
    891#define FDI_RX_IMR_TO_PIPE(offset) \
    892	calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
    893
    894static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
    895		unsigned int offset, void *p_data, unsigned int bytes)
    896{
    897	i915_reg_t fdi_rx_iir;
    898	unsigned int index;
    899	int ret;
    900
    901	if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
    902		index = FDI_RX_CTL_TO_PIPE(offset);
    903	else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
    904		index = FDI_TX_CTL_TO_PIPE(offset);
    905	else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
    906		index = FDI_RX_IMR_TO_PIPE(offset);
    907	else {
    908		gvt_vgpu_err("Unsupport registers %x\n", offset);
    909		return -EINVAL;
    910	}
    911
    912	write_vreg(vgpu, offset, p_data, bytes);
    913
    914	fdi_rx_iir = FDI_RX_IIR(index);
    915
    916	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
    917	if (ret < 0)
    918		return ret;
    919	if (ret)
    920		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
    921
    922	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
    923	if (ret < 0)
    924		return ret;
    925	if (ret)
    926		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
    927
    928	if (offset == _FDI_RXA_CTL)
    929		if (fdi_auto_training_started(vgpu))
    930			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
    931				DP_TP_STATUS_AUTOTRAIN_DONE;
    932	return 0;
    933}
    934
    935#define DP_TP_CTL_TO_PORT(offset) \
    936	calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
    937
    938static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    939		void *p_data, unsigned int bytes)
    940{
    941	i915_reg_t status_reg;
    942	unsigned int index;
    943	u32 data;
    944
    945	write_vreg(vgpu, offset, p_data, bytes);
    946
    947	index = DP_TP_CTL_TO_PORT(offset);
    948	data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
    949	if (data == 0x2) {
    950		status_reg = DP_TP_STATUS(index);
    951		vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
    952	}
    953	return 0;
    954}
    955
    956static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
    957		unsigned int offset, void *p_data, unsigned int bytes)
    958{
    959	u32 reg_val;
    960	u32 sticky_mask;
    961
    962	reg_val = *((u32 *)p_data);
    963	sticky_mask = GENMASK(27, 26) | (1 << 24);
    964
    965	vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
    966		(vgpu_vreg(vgpu, offset) & sticky_mask);
    967	vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
    968	return 0;
    969}
    970
    971static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
    972		unsigned int offset, void *p_data, unsigned int bytes)
    973{
    974	u32 data;
    975
    976	write_vreg(vgpu, offset, p_data, bytes);
    977	data = vgpu_vreg(vgpu, offset);
    978
    979	if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
    980		vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
    981	return 0;
    982}
    983
    984static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
    985		unsigned int offset, void *p_data, unsigned int bytes)
    986{
    987	u32 data;
    988
    989	write_vreg(vgpu, offset, p_data, bytes);
    990	data = vgpu_vreg(vgpu, offset);
    991
    992	if (data & FDI_MPHY_IOSFSB_RESET_CTL)
    993		vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
    994	else
    995		vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
    996	return 0;
    997}
    998
    999#define DSPSURF_TO_PIPE(offset) \
   1000	calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
   1001
   1002static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1003		void *p_data, unsigned int bytes)
   1004{
   1005	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
   1006	u32 pipe = DSPSURF_TO_PIPE(offset);
   1007	int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
   1008
   1009	write_vreg(vgpu, offset, p_data, bytes);
   1010	vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
   1011
   1012	vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
   1013
   1014	if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
   1015		intel_vgpu_trigger_virtual_event(vgpu, event);
   1016	else
   1017		set_bit(event, vgpu->irq.flip_done_event[pipe]);
   1018
   1019	return 0;
   1020}
   1021
   1022#define SPRSURF_TO_PIPE(offset) \
   1023	calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
   1024
   1025static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1026		void *p_data, unsigned int bytes)
   1027{
   1028	u32 pipe = SPRSURF_TO_PIPE(offset);
   1029	int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
   1030
   1031	write_vreg(vgpu, offset, p_data, bytes);
   1032	vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
   1033
   1034	if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
   1035		intel_vgpu_trigger_virtual_event(vgpu, event);
   1036	else
   1037		set_bit(event, vgpu->irq.flip_done_event[pipe]);
   1038
   1039	return 0;
   1040}
   1041
   1042static int reg50080_mmio_write(struct intel_vgpu *vgpu,
   1043			       unsigned int offset, void *p_data,
   1044			       unsigned int bytes)
   1045{
   1046	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
   1047	enum pipe pipe = REG_50080_TO_PIPE(offset);
   1048	enum plane_id plane = REG_50080_TO_PLANE(offset);
   1049	int event = SKL_FLIP_EVENT(pipe, plane);
   1050
   1051	write_vreg(vgpu, offset, p_data, bytes);
   1052	if (plane == PLANE_PRIMARY) {
   1053		vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
   1054		vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
   1055	} else {
   1056		vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
   1057	}
   1058
   1059	if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
   1060		intel_vgpu_trigger_virtual_event(vgpu, event);
   1061	else
   1062		set_bit(event, vgpu->irq.flip_done_event[pipe]);
   1063
   1064	return 0;
   1065}
   1066
   1067static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
   1068		unsigned int reg)
   1069{
   1070	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
   1071	enum intel_gvt_event_type event;
   1072
   1073	if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
   1074		event = AUX_CHANNEL_A;
   1075	else if (reg == _PCH_DPB_AUX_CH_CTL ||
   1076		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
   1077		event = AUX_CHANNEL_B;
   1078	else if (reg == _PCH_DPC_AUX_CH_CTL ||
   1079		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
   1080		event = AUX_CHANNEL_C;
   1081	else if (reg == _PCH_DPD_AUX_CH_CTL ||
   1082		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
   1083		event = AUX_CHANNEL_D;
   1084	else {
   1085		drm_WARN_ON(&dev_priv->drm, true);
   1086		return -EINVAL;
   1087	}
   1088
   1089	intel_vgpu_trigger_virtual_event(vgpu, event);
   1090	return 0;
   1091}
   1092
   1093static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
   1094		unsigned int reg, int len, bool data_valid)
   1095{
   1096	/* mark transaction done */
   1097	value |= DP_AUX_CH_CTL_DONE;
   1098	value &= ~DP_AUX_CH_CTL_SEND_BUSY;
   1099	value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
   1100
   1101	if (data_valid)
   1102		value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
   1103	else
   1104		value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
   1105
   1106	/* message size */
   1107	value &= ~(0xf << 20);
   1108	value |= (len << 20);
   1109	vgpu_vreg(vgpu, reg) = value;
   1110
   1111	if (value & DP_AUX_CH_CTL_INTERRUPT)
   1112		return trigger_aux_channel_interrupt(vgpu, reg);
   1113	return 0;
   1114}
   1115
   1116static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
   1117		u8 t)
   1118{
   1119	if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
   1120		/* training pattern 1 for CR */
   1121		/* set LANE0_CR_DONE, LANE1_CR_DONE */
   1122		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
   1123		/* set LANE2_CR_DONE, LANE3_CR_DONE */
   1124		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
   1125	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
   1126			DPCD_TRAINING_PATTERN_2) {
   1127		/* training pattern 2 for EQ */
   1128		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane0_1 */
   1129		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
   1130		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
   1131		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane2_3 */
   1132		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
   1133		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
   1134		/* set INTERLANE_ALIGN_DONE */
   1135		dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
   1136			DPCD_INTERLANE_ALIGN_DONE;
   1137	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
   1138			DPCD_LINK_TRAINING_DISABLED) {
   1139		/* finish link training */
   1140		/* set sink status as synchronized */
   1141		dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
   1142	}
   1143}
   1144
   1145#define _REG_HSW_DP_AUX_CH_CTL(dp) \
   1146	((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
   1147
   1148#define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
   1149
   1150#define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
   1151
   1152#define dpy_is_valid_port(port)	\
   1153		(((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
   1154
   1155static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
   1156		unsigned int offset, void *p_data, unsigned int bytes)
   1157{
   1158	struct intel_vgpu_display *display = &vgpu->display;
   1159	int msg, addr, ctrl, op, len;
   1160	int port_index = OFFSET_TO_DP_AUX_PORT(offset);
   1161	struct intel_vgpu_dpcd_data *dpcd = NULL;
   1162	struct intel_vgpu_port *port = NULL;
   1163	u32 data;
   1164
   1165	if (!dpy_is_valid_port(port_index)) {
   1166		gvt_vgpu_err("Unsupported DP port access!\n");
   1167		return 0;
   1168	}
   1169
   1170	write_vreg(vgpu, offset, p_data, bytes);
   1171	data = vgpu_vreg(vgpu, offset);
   1172
   1173	if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9)
   1174		&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
   1175		/* SKL DPB/C/D aux ctl register changed */
   1176		return 0;
   1177	} else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
   1178		   offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
   1179		/* write to the data registers */
   1180		return 0;
   1181	}
   1182
   1183	if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
   1184		/* just want to clear the sticky bits */
   1185		vgpu_vreg(vgpu, offset) = 0;
   1186		return 0;
   1187	}
   1188
   1189	port = &display->ports[port_index];
   1190	dpcd = port->dpcd;
   1191
   1192	/* read out message from DATA1 register */
   1193	msg = vgpu_vreg(vgpu, offset + 4);
   1194	addr = (msg >> 8) & 0xffff;
   1195	ctrl = (msg >> 24) & 0xff;
   1196	len = msg & 0xff;
   1197	op = ctrl >> 4;
   1198
   1199	if (op == GVT_AUX_NATIVE_WRITE) {
   1200		int t;
   1201		u8 buf[16];
   1202
   1203		if ((addr + len + 1) >= DPCD_SIZE) {
   1204			/*
   1205			 * Write request exceeds what we supported,
   1206			 * DCPD spec: When a Source Device is writing a DPCD
   1207			 * address not supported by the Sink Device, the Sink
   1208			 * Device shall reply with AUX NACK and “M” equal to
   1209			 * zero.
   1210			 */
   1211
   1212			/* NAK the write */
   1213			vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
   1214			dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
   1215			return 0;
   1216		}
   1217
   1218		/*
   1219		 * Write request format: Headr (command + address + size) occupies
   1220		 * 4 bytes, followed by (len + 1) bytes of data. See details at
   1221		 * intel_dp_aux_transfer().
   1222		 */
   1223		if ((len + 1 + 4) > AUX_BURST_SIZE) {
   1224			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
   1225			return -EINVAL;
   1226		}
   1227
   1228		/* unpack data from vreg to buf */
   1229		for (t = 0; t < 4; t++) {
   1230			u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
   1231
   1232			buf[t * 4] = (r >> 24) & 0xff;
   1233			buf[t * 4 + 1] = (r >> 16) & 0xff;
   1234			buf[t * 4 + 2] = (r >> 8) & 0xff;
   1235			buf[t * 4 + 3] = r & 0xff;
   1236		}
   1237
   1238		/* write to virtual DPCD */
   1239		if (dpcd && dpcd->data_valid) {
   1240			for (t = 0; t <= len; t++) {
   1241				int p = addr + t;
   1242
   1243				dpcd->data[p] = buf[t];
   1244				/* check for link training */
   1245				if (p == DPCD_TRAINING_PATTERN_SET)
   1246					dp_aux_ch_ctl_link_training(dpcd,
   1247							buf[t]);
   1248			}
   1249		}
   1250
   1251		/* ACK the write */
   1252		vgpu_vreg(vgpu, offset + 4) = 0;
   1253		dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
   1254				dpcd && dpcd->data_valid);
   1255		return 0;
   1256	}
   1257
   1258	if (op == GVT_AUX_NATIVE_READ) {
   1259		int idx, i, ret = 0;
   1260
   1261		if ((addr + len + 1) >= DPCD_SIZE) {
   1262			/*
   1263			 * read request exceeds what we supported
   1264			 * DPCD spec: A Sink Device receiving a Native AUX CH
   1265			 * read request for an unsupported DPCD address must
   1266			 * reply with an AUX ACK and read data set equal to
   1267			 * zero instead of replying with AUX NACK.
   1268			 */
   1269
   1270			/* ACK the READ*/
   1271			vgpu_vreg(vgpu, offset + 4) = 0;
   1272			vgpu_vreg(vgpu, offset + 8) = 0;
   1273			vgpu_vreg(vgpu, offset + 12) = 0;
   1274			vgpu_vreg(vgpu, offset + 16) = 0;
   1275			vgpu_vreg(vgpu, offset + 20) = 0;
   1276
   1277			dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
   1278					true);
   1279			return 0;
   1280		}
   1281
   1282		for (idx = 1; idx <= 5; idx++) {
   1283			/* clear the data registers */
   1284			vgpu_vreg(vgpu, offset + 4 * idx) = 0;
   1285		}
   1286
   1287		/*
   1288		 * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
   1289		 */
   1290		if ((len + 2) > AUX_BURST_SIZE) {
   1291			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
   1292			return -EINVAL;
   1293		}
   1294
   1295		/* read from virtual DPCD to vreg */
   1296		/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
   1297		if (dpcd && dpcd->data_valid) {
   1298			for (i = 1; i <= (len + 1); i++) {
   1299				int t;
   1300
   1301				t = dpcd->data[addr + i - 1];
   1302				t <<= (24 - 8 * (i % 4));
   1303				ret |= t;
   1304
   1305				if ((i % 4 == 3) || (i == (len + 1))) {
   1306					vgpu_vreg(vgpu, offset +
   1307							(i / 4 + 1) * 4) = ret;
   1308					ret = 0;
   1309				}
   1310			}
   1311		}
   1312		dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
   1313				dpcd && dpcd->data_valid);
   1314		return 0;
   1315	}
   1316
   1317	/* i2c transaction starts */
   1318	intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
   1319
   1320	if (data & DP_AUX_CH_CTL_INTERRUPT)
   1321		trigger_aux_channel_interrupt(vgpu, offset);
   1322	return 0;
   1323}
   1324
   1325static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
   1326		void *p_data, unsigned int bytes)
   1327{
   1328	*(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
   1329	write_vreg(vgpu, offset, p_data, bytes);
   1330	return 0;
   1331}
   1332
   1333static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1334		void *p_data, unsigned int bytes)
   1335{
   1336	bool vga_disable;
   1337
   1338	write_vreg(vgpu, offset, p_data, bytes);
   1339	vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
   1340
   1341	gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
   1342			vga_disable ? "Disable" : "Enable");
   1343	return 0;
   1344}
   1345
   1346static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
   1347		unsigned int sbi_offset)
   1348{
   1349	struct intel_vgpu_display *display = &vgpu->display;
   1350	int num = display->sbi.number;
   1351	int i;
   1352
   1353	for (i = 0; i < num; ++i)
   1354		if (display->sbi.registers[i].offset == sbi_offset)
   1355			break;
   1356
   1357	if (i == num)
   1358		return 0;
   1359
   1360	return display->sbi.registers[i].value;
   1361}
   1362
   1363static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
   1364		unsigned int offset, u32 value)
   1365{
   1366	struct intel_vgpu_display *display = &vgpu->display;
   1367	int num = display->sbi.number;
   1368	int i;
   1369
   1370	for (i = 0; i < num; ++i) {
   1371		if (display->sbi.registers[i].offset == offset)
   1372			break;
   1373	}
   1374
   1375	if (i == num) {
   1376		if (num == SBI_REG_MAX) {
   1377			gvt_vgpu_err("SBI caching meets maximum limits\n");
   1378			return;
   1379		}
   1380		display->sbi.number++;
   1381	}
   1382
   1383	display->sbi.registers[i].offset = offset;
   1384	display->sbi.registers[i].value = value;
   1385}
   1386
   1387static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
   1388		void *p_data, unsigned int bytes)
   1389{
   1390	if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
   1391				SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
   1392		unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
   1393				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
   1394		vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
   1395				sbi_offset);
   1396	}
   1397	read_vreg(vgpu, offset, p_data, bytes);
   1398	return 0;
   1399}
   1400
   1401static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1402		void *p_data, unsigned int bytes)
   1403{
   1404	u32 data;
   1405
   1406	write_vreg(vgpu, offset, p_data, bytes);
   1407	data = vgpu_vreg(vgpu, offset);
   1408
   1409	data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
   1410	data |= SBI_READY;
   1411
   1412	data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
   1413	data |= SBI_RESPONSE_SUCCESS;
   1414
   1415	vgpu_vreg(vgpu, offset) = data;
   1416
   1417	if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
   1418				SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
   1419		unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
   1420				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
   1421
   1422		write_virtual_sbi_register(vgpu, sbi_offset,
   1423					   vgpu_vreg_t(vgpu, SBI_DATA));
   1424	}
   1425	return 0;
   1426}
   1427
   1428#define _vgtif_reg(x) \
   1429	(VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
   1430
   1431static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
   1432		void *p_data, unsigned int bytes)
   1433{
   1434	bool invalid_read = false;
   1435
   1436	read_vreg(vgpu, offset, p_data, bytes);
   1437
   1438	switch (offset) {
   1439	case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
   1440		if (offset + bytes > _vgtif_reg(vgt_id) + 4)
   1441			invalid_read = true;
   1442		break;
   1443	case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
   1444		_vgtif_reg(avail_rs.fence_num):
   1445		if (offset + bytes >
   1446			_vgtif_reg(avail_rs.fence_num) + 4)
   1447			invalid_read = true;
   1448		break;
   1449	case 0x78010:	/* vgt_caps */
   1450	case 0x7881c:
   1451		break;
   1452	default:
   1453		invalid_read = true;
   1454		break;
   1455	}
   1456	if (invalid_read)
   1457		gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
   1458				offset, bytes, *(u32 *)p_data);
   1459	vgpu->pv_notified = true;
   1460	return 0;
   1461}
   1462
   1463static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
   1464{
   1465	enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
   1466	struct intel_vgpu_mm *mm;
   1467	u64 *pdps;
   1468
   1469	pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
   1470
   1471	switch (notification) {
   1472	case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
   1473		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
   1474		fallthrough;
   1475	case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
   1476		mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
   1477		return PTR_ERR_OR_ZERO(mm);
   1478	case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
   1479	case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
   1480		return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
   1481	case VGT_G2V_EXECLIST_CONTEXT_CREATE:
   1482	case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
   1483	case 1:	/* Remove this in guest driver. */
   1484		break;
   1485	default:
   1486		gvt_vgpu_err("Invalid PV notification %d\n", notification);
   1487	}
   1488	return 0;
   1489}
   1490
   1491static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
   1492{
   1493	struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
   1494	char *env[3] = {NULL, NULL, NULL};
   1495	char vmid_str[20];
   1496	char display_ready_str[20];
   1497
   1498	snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
   1499	env[0] = display_ready_str;
   1500
   1501	snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
   1502	env[1] = vmid_str;
   1503
   1504	return kobject_uevent_env(kobj, KOBJ_ADD, env);
   1505}
   1506
   1507static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1508		void *p_data, unsigned int bytes)
   1509{
   1510	u32 data = *(u32 *)p_data;
   1511	bool invalid_write = false;
   1512
   1513	switch (offset) {
   1514	case _vgtif_reg(display_ready):
   1515		send_display_ready_uevent(vgpu, data ? 1 : 0);
   1516		break;
   1517	case _vgtif_reg(g2v_notify):
   1518		handle_g2v_notification(vgpu, data);
   1519		break;
   1520	/* add xhot and yhot to handled list to avoid error log */
   1521	case _vgtif_reg(cursor_x_hot):
   1522	case _vgtif_reg(cursor_y_hot):
   1523	case _vgtif_reg(pdp[0].lo):
   1524	case _vgtif_reg(pdp[0].hi):
   1525	case _vgtif_reg(pdp[1].lo):
   1526	case _vgtif_reg(pdp[1].hi):
   1527	case _vgtif_reg(pdp[2].lo):
   1528	case _vgtif_reg(pdp[2].hi):
   1529	case _vgtif_reg(pdp[3].lo):
   1530	case _vgtif_reg(pdp[3].hi):
   1531	case _vgtif_reg(execlist_context_descriptor_lo):
   1532	case _vgtif_reg(execlist_context_descriptor_hi):
   1533		break;
   1534	case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
   1535		invalid_write = true;
   1536		enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
   1537		break;
   1538	default:
   1539		invalid_write = true;
   1540		gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
   1541				offset, bytes, data);
   1542		break;
   1543	}
   1544
   1545	if (!invalid_write)
   1546		write_vreg(vgpu, offset, p_data, bytes);
   1547
   1548	return 0;
   1549}
   1550
   1551static int pf_write(struct intel_vgpu *vgpu,
   1552		unsigned int offset, void *p_data, unsigned int bytes)
   1553{
   1554	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
   1555	u32 val = *(u32 *)p_data;
   1556
   1557	if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
   1558	   offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
   1559	   offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
   1560		drm_WARN_ONCE(&i915->drm, true,
   1561			      "VM(%d): guest is trying to scaling a plane\n",
   1562			      vgpu->id);
   1563		return 0;
   1564	}
   1565
   1566	return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
   1567}
   1568
   1569static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
   1570		unsigned int offset, void *p_data, unsigned int bytes)
   1571{
   1572	write_vreg(vgpu, offset, p_data, bytes);
   1573
   1574	if (vgpu_vreg(vgpu, offset) &
   1575	    HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
   1576		vgpu_vreg(vgpu, offset) |=
   1577			HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
   1578	else
   1579		vgpu_vreg(vgpu, offset) &=
   1580			~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
   1581	return 0;
   1582}
   1583
   1584static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
   1585		unsigned int offset, void *p_data, unsigned int bytes)
   1586{
   1587	write_vreg(vgpu, offset, p_data, bytes);
   1588
   1589	if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
   1590		vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
   1591	else
   1592		vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
   1593
   1594	return 0;
   1595}
   1596
   1597static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
   1598	unsigned int offset, void *p_data, unsigned int bytes)
   1599{
   1600	write_vreg(vgpu, offset, p_data, bytes);
   1601
   1602	if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
   1603		vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
   1604	return 0;
   1605}
   1606
   1607static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
   1608		void *p_data, unsigned int bytes)
   1609{
   1610	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
   1611	u32 mode;
   1612
   1613	write_vreg(vgpu, offset, p_data, bytes);
   1614	mode = vgpu_vreg(vgpu, offset);
   1615
   1616	if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
   1617		drm_WARN_ONCE(&i915->drm, 1,
   1618				"VM(%d): iGVT-g doesn't support GuC\n",
   1619				vgpu->id);
   1620		return 0;
   1621	}
   1622
   1623	return 0;
   1624}
   1625
   1626static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
   1627		void *p_data, unsigned int bytes)
   1628{
   1629	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
   1630	u32 trtte = *(u32 *)p_data;
   1631
   1632	if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
   1633		drm_WARN(&i915->drm, 1,
   1634				"VM(%d): Use physical address for TRTT!\n",
   1635				vgpu->id);
   1636		return -EINVAL;
   1637	}
   1638	write_vreg(vgpu, offset, p_data, bytes);
   1639
   1640	return 0;
   1641}
   1642
   1643static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
   1644		void *p_data, unsigned int bytes)
   1645{
   1646	write_vreg(vgpu, offset, p_data, bytes);
   1647	return 0;
   1648}
   1649
   1650static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
   1651		void *p_data, unsigned int bytes)
   1652{
   1653	u32 v = 0;
   1654
   1655	if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
   1656		v |= (1 << 0);
   1657
   1658	if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
   1659		v |= (1 << 8);
   1660
   1661	if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
   1662		v |= (1 << 16);
   1663
   1664	if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
   1665		v |= (1 << 24);
   1666
   1667	vgpu_vreg(vgpu, offset) = v;
   1668
   1669	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
   1670}
   1671
   1672static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
   1673		void *p_data, unsigned int bytes)
   1674{
   1675	u32 value = *(u32 *)p_data;
   1676	u32 cmd = value & 0xff;
   1677	u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
   1678
   1679	switch (cmd) {
   1680	case GEN9_PCODE_READ_MEM_LATENCY:
   1681		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
   1682		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
   1683		    IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
   1684		    IS_COMETLAKE(vgpu->gvt->gt->i915)) {
   1685			/**
   1686			 * "Read memory latency" command on gen9.
   1687			 * Below memory latency values are read
   1688			 * from skylake platform.
   1689			 */
   1690			if (!*data0)
   1691				*data0 = 0x1e1a1100;
   1692			else
   1693				*data0 = 0x61514b3d;
   1694		} else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
   1695			/**
   1696			 * "Read memory latency" command on gen9.
   1697			 * Below memory latency values are read
   1698			 * from Broxton MRB.
   1699			 */
   1700			if (!*data0)
   1701				*data0 = 0x16080707;
   1702			else
   1703				*data0 = 0x16161616;
   1704		}
   1705		break;
   1706	case SKL_PCODE_CDCLK_CONTROL:
   1707		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
   1708		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
   1709		    IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
   1710		    IS_COMETLAKE(vgpu->gvt->gt->i915))
   1711			*data0 = SKL_CDCLK_READY_FOR_CHANGE;
   1712		break;
   1713	case GEN6_PCODE_READ_RC6VIDS:
   1714		*data0 |= 0x1;
   1715		break;
   1716	}
   1717
   1718	gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
   1719		     vgpu->id, value, *data0);
   1720	/**
   1721	 * PCODE_READY clear means ready for pcode read/write,
   1722	 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
   1723	 * always emulate as pcode read/write success and ready for access
   1724	 * anytime, since we don't touch real physical registers here.
   1725	 */
   1726	value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
   1727	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
   1728}
   1729
   1730static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
   1731		void *p_data, unsigned int bytes)
   1732{
   1733	u32 value = *(u32 *)p_data;
   1734	const struct intel_engine_cs *engine =
   1735		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
   1736
   1737	if (value != 0 &&
   1738	    !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
   1739		gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
   1740			      offset, value);
   1741		return -EINVAL;
   1742	}
   1743
   1744	/*
   1745	 * Need to emulate all the HWSP register write to ensure host can
   1746	 * update the VM CSB status correctly. Here listed registers can
   1747	 * support BDW, SKL or other platforms with same HWSP registers.
   1748	 */
   1749	if (unlikely(!engine)) {
   1750		gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
   1751			     offset);
   1752		return -EINVAL;
   1753	}
   1754	vgpu->hws_pga[engine->id] = value;
   1755	gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
   1756		     vgpu->id, value, offset);
   1757
   1758	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
   1759}
   1760
   1761static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
   1762		unsigned int offset, void *p_data, unsigned int bytes)
   1763{
   1764	u32 v = *(u32 *)p_data;
   1765
   1766	if (IS_BROXTON(vgpu->gvt->gt->i915))
   1767		v &= (1 << 31) | (1 << 29);
   1768	else
   1769		v &= (1 << 31) | (1 << 29) | (1 << 9) |
   1770			(1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
   1771	v |= (v >> 1);
   1772
   1773	return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
   1774}
   1775
   1776static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
   1777		void *p_data, unsigned int bytes)
   1778{
   1779	u32 v = *(u32 *)p_data;
   1780
   1781	/* other bits are MBZ. */
   1782	v &= (1 << 31) | (1 << 30);
   1783	v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
   1784
   1785	vgpu_vreg(vgpu, offset) = v;
   1786
   1787	return 0;
   1788}
   1789
   1790static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
   1791		unsigned int offset, void *p_data, unsigned int bytes)
   1792{
   1793	u32 v = *(u32 *)p_data;
   1794
   1795	if (v & BXT_DE_PLL_PLL_ENABLE)
   1796		v |= BXT_DE_PLL_LOCK;
   1797
   1798	vgpu_vreg(vgpu, offset) = v;
   1799
   1800	return 0;
   1801}
   1802
   1803static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
   1804		unsigned int offset, void *p_data, unsigned int bytes)
   1805{
   1806	u32 v = *(u32 *)p_data;
   1807
   1808	if (v & PORT_PLL_ENABLE)
   1809		v |= PORT_PLL_LOCK;
   1810
   1811	vgpu_vreg(vgpu, offset) = v;
   1812
   1813	return 0;
   1814}
   1815
   1816static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
   1817		unsigned int offset, void *p_data, unsigned int bytes)
   1818{
   1819	u32 v = *(u32 *)p_data;
   1820	u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
   1821
   1822	switch (offset) {
   1823	case _PHY_CTL_FAMILY_EDP:
   1824		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
   1825		break;
   1826	case _PHY_CTL_FAMILY_DDI:
   1827		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
   1828		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
   1829		break;
   1830	}
   1831
   1832	vgpu_vreg(vgpu, offset) = v;
   1833
   1834	return 0;
   1835}
   1836
   1837static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
   1838		unsigned int offset, void *p_data, unsigned int bytes)
   1839{
   1840	u32 v = vgpu_vreg(vgpu, offset);
   1841
   1842	v &= ~UNIQUE_TRANGE_EN_METHOD;
   1843
   1844	vgpu_vreg(vgpu, offset) = v;
   1845
   1846	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
   1847}
   1848
   1849static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
   1850		unsigned int offset, void *p_data, unsigned int bytes)
   1851{
   1852	u32 v = *(u32 *)p_data;
   1853
   1854	if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
   1855		vgpu_vreg(vgpu, offset - 0x600) = v;
   1856		vgpu_vreg(vgpu, offset - 0x800) = v;
   1857	} else {
   1858		vgpu_vreg(vgpu, offset - 0x400) = v;
   1859		vgpu_vreg(vgpu, offset - 0x600) = v;
   1860	}
   1861
   1862	vgpu_vreg(vgpu, offset) = v;
   1863
   1864	return 0;
   1865}
   1866
   1867static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
   1868		unsigned int offset, void *p_data, unsigned int bytes)
   1869{
   1870	u32 v = *(u32 *)p_data;
   1871
   1872	if (v & BIT(0)) {
   1873		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
   1874			~PHY_RESERVED;
   1875		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
   1876			PHY_POWER_GOOD;
   1877	}
   1878
   1879	if (v & BIT(1)) {
   1880		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
   1881			~PHY_RESERVED;
   1882		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
   1883			PHY_POWER_GOOD;
   1884	}
   1885
   1886
   1887	vgpu_vreg(vgpu, offset) = v;
   1888
   1889	return 0;
   1890}
   1891
   1892static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
   1893		unsigned int offset, void *p_data, unsigned int bytes)
   1894{
   1895	vgpu_vreg(vgpu, offset) = 0;
   1896	return 0;
   1897}
   1898
   1899/*
   1900 * FixMe:
   1901 * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
   1902 * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
   1903 * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
   1904 * these MI_BATCH_BUFFER.
   1905 * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
   1906 * PML4 PTE: PAT(0) PCD(1) PWT(1).
   1907 * The performance is still expected to be low, will need further improvement.
   1908 */
   1909static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
   1910			      void *p_data, unsigned int bytes)
   1911{
   1912	u64 pat =
   1913		GEN8_PPAT(0, CHV_PPAT_SNOOP) |
   1914		GEN8_PPAT(1, 0) |
   1915		GEN8_PPAT(2, 0) |
   1916		GEN8_PPAT(3, CHV_PPAT_SNOOP) |
   1917		GEN8_PPAT(4, CHV_PPAT_SNOOP) |
   1918		GEN8_PPAT(5, CHV_PPAT_SNOOP) |
   1919		GEN8_PPAT(6, CHV_PPAT_SNOOP) |
   1920		GEN8_PPAT(7, CHV_PPAT_SNOOP);
   1921
   1922	vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
   1923
   1924	return 0;
   1925}
   1926
   1927static int guc_status_read(struct intel_vgpu *vgpu,
   1928			   unsigned int offset, void *p_data,
   1929			   unsigned int bytes)
   1930{
   1931	/* keep MIA_IN_RESET before clearing */
   1932	read_vreg(vgpu, offset, p_data, bytes);
   1933	vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
   1934	return 0;
   1935}
   1936
   1937static int mmio_read_from_hw(struct intel_vgpu *vgpu,
   1938		unsigned int offset, void *p_data, unsigned int bytes)
   1939{
   1940	struct intel_gvt *gvt = vgpu->gvt;
   1941	const struct intel_engine_cs *engine =
   1942		intel_gvt_render_mmio_to_engine(gvt, offset);
   1943
   1944	/**
   1945	 * Read HW reg in following case
   1946	 * a. the offset isn't a ring mmio
   1947	 * b. the offset's ring is running on hw.
   1948	 * c. the offset is ring time stamp mmio
   1949	 */
   1950
   1951	if (!engine ||
   1952	    vgpu == gvt->scheduler.engine_owner[engine->id] ||
   1953	    offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
   1954	    offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
   1955		mmio_hw_access_pre(gvt->gt);
   1956		vgpu_vreg(vgpu, offset) =
   1957			intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
   1958		mmio_hw_access_post(gvt->gt);
   1959	}
   1960
   1961	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
   1962}
   1963
   1964static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1965		void *p_data, unsigned int bytes)
   1966{
   1967	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
   1968	const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
   1969	struct intel_vgpu_execlist *execlist;
   1970	u32 data = *(u32 *)p_data;
   1971	int ret = 0;
   1972
   1973	if (drm_WARN_ON(&i915->drm, !engine))
   1974		return -EINVAL;
   1975
   1976	/*
   1977	 * Due to d3_entered is used to indicate skipping PPGTT invalidation on
   1978	 * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
   1979	 * vGPU reset if in resuming.
   1980	 * In S0ix exit, the device power state also transite from D3 to D0 as
   1981	 * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
   1982	 * S0ix exit, all engines continue to work. However the d3_entered
   1983	 * remains set which will break next vGPU reset logic (miss the expected
   1984	 * PPGTT invalidation).
   1985	 * Engines can only work in D0. Thus the 1st elsp write gives GVT a
   1986	 * chance to clear d3_entered.
   1987	 */
   1988	if (vgpu->d3_entered)
   1989		vgpu->d3_entered = false;
   1990
   1991	execlist = &vgpu->submission.execlist[engine->id];
   1992
   1993	execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
   1994	if (execlist->elsp_dwords.index == 3) {
   1995		ret = intel_vgpu_submit_execlist(vgpu, engine);
   1996		if(ret)
   1997			gvt_vgpu_err("fail submit workload on ring %s\n",
   1998				     engine->name);
   1999	}
   2000
   2001	++execlist->elsp_dwords.index;
   2002	execlist->elsp_dwords.index &= 0x3;
   2003	return ret;
   2004}
   2005
   2006static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   2007		void *p_data, unsigned int bytes)
   2008{
   2009	u32 data = *(u32 *)p_data;
   2010	const struct intel_engine_cs *engine =
   2011		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
   2012	bool enable_execlist;
   2013	int ret;
   2014
   2015	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
   2016	if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
   2017	    IS_COMETLAKE(vgpu->gvt->gt->i915))
   2018		(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
   2019	write_vreg(vgpu, offset, p_data, bytes);
   2020
   2021	if (IS_MASKED_BITS_ENABLED(data, 1)) {
   2022		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
   2023		return 0;
   2024	}
   2025
   2026	if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
   2027	     IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
   2028	    IS_MASKED_BITS_ENABLED(data, 2)) {
   2029		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
   2030		return 0;
   2031	}
   2032
   2033	/* when PPGTT mode enabled, we will check if guest has called
   2034	 * pvinfo, if not, we will treat this guest as non-gvtg-aware
   2035	 * guest, and stop emulating its cfg space, mmio, gtt, etc.
   2036	 */
   2037	if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
   2038	    IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
   2039	    !vgpu->pv_notified) {
   2040		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
   2041		return 0;
   2042	}
   2043	if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
   2044	    IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
   2045		enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
   2046
   2047		gvt_dbg_core("EXECLIST %s on ring %s\n",
   2048			     (enable_execlist ? "enabling" : "disabling"),
   2049			     engine->name);
   2050
   2051		if (!enable_execlist)
   2052			return 0;
   2053
   2054		ret = intel_vgpu_select_submission_ops(vgpu,
   2055						       engine->mask,
   2056						       INTEL_VGPU_EXECLIST_SUBMISSION);
   2057		if (ret)
   2058			return ret;
   2059
   2060		intel_vgpu_start_schedule(vgpu);
   2061	}
   2062	return 0;
   2063}
   2064
   2065static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
   2066		unsigned int offset, void *p_data, unsigned int bytes)
   2067{
   2068	unsigned int id = 0;
   2069
   2070	write_vreg(vgpu, offset, p_data, bytes);
   2071	vgpu_vreg(vgpu, offset) = 0;
   2072
   2073	switch (offset) {
   2074	case 0x4260:
   2075		id = RCS0;
   2076		break;
   2077	case 0x4264:
   2078		id = VCS0;
   2079		break;
   2080	case 0x4268:
   2081		id = VCS1;
   2082		break;
   2083	case 0x426c:
   2084		id = BCS0;
   2085		break;
   2086	case 0x4270:
   2087		id = VECS0;
   2088		break;
   2089	default:
   2090		return -EINVAL;
   2091	}
   2092	set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
   2093
   2094	return 0;
   2095}
   2096
   2097static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
   2098	unsigned int offset, void *p_data, unsigned int bytes)
   2099{
   2100	u32 data;
   2101
   2102	write_vreg(vgpu, offset, p_data, bytes);
   2103	data = vgpu_vreg(vgpu, offset);
   2104
   2105	if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
   2106		data |= RESET_CTL_READY_TO_RESET;
   2107	else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
   2108		data &= ~RESET_CTL_READY_TO_RESET;
   2109
   2110	vgpu_vreg(vgpu, offset) = data;
   2111	return 0;
   2112}
   2113
   2114static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
   2115				    unsigned int offset, void *p_data,
   2116				    unsigned int bytes)
   2117{
   2118	u32 data = *(u32 *)p_data;
   2119
   2120	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
   2121	write_vreg(vgpu, offset, p_data, bytes);
   2122
   2123	if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
   2124	    IS_MASKED_BITS_ENABLED(data, 0x8))
   2125		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
   2126
   2127	return 0;
   2128}
   2129
   2130#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
   2131	ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
   2132		s, f, am, rm, d, r, w); \
   2133	if (ret) \
   2134		return ret; \
   2135} while (0)
   2136
   2137#define MMIO_DH(reg, d, r, w) \
   2138	MMIO_F(reg, 4, 0, 0, 0, d, r, w)
   2139
   2140#define MMIO_DFH(reg, d, f, r, w) \
   2141	MMIO_F(reg, 4, f, 0, 0, d, r, w)
   2142
   2143#define MMIO_GM(reg, d, r, w) \
   2144	MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
   2145
   2146#define MMIO_GM_RDR(reg, d, r, w) \
   2147	MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
   2148
   2149#define MMIO_RO(reg, d, f, rm, r, w) \
   2150	MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
   2151
   2152#define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
   2153	MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
   2154	MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
   2155	MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
   2156	MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
   2157	if (HAS_ENGINE(gvt->gt, VCS1)) \
   2158		MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
   2159} while (0)
   2160
   2161#define MMIO_RING_DFH(prefix, d, f, r, w) \
   2162	MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
   2163
   2164#define MMIO_RING_GM(prefix, d, r, w) \
   2165	MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
   2166
   2167#define MMIO_RING_GM_RDR(prefix, d, r, w) \
   2168	MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
   2169
   2170#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
   2171	MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
   2172
   2173static int init_generic_mmio_info(struct intel_gvt *gvt)
   2174{
   2175	struct drm_i915_private *dev_priv = gvt->gt->i915;
   2176	int ret;
   2177
   2178	MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
   2179		intel_vgpu_reg_imr_handler);
   2180
   2181	MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
   2182	MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
   2183	MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
   2184
   2185	MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
   2186
   2187
   2188	MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
   2189		gamw_echo_dev_rw_ia_write);
   2190
   2191	MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
   2192	MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
   2193	MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
   2194
   2195#define RING_REG(base) _MMIO((base) + 0x28)
   2196	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2197#undef RING_REG
   2198
   2199#define RING_REG(base) _MMIO((base) + 0x134)
   2200	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2201#undef RING_REG
   2202
   2203#define RING_REG(base) _MMIO((base) + 0x6c)
   2204	MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
   2205#undef RING_REG
   2206	MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
   2207
   2208	MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
   2209	MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
   2210	MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
   2211
   2212	MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
   2213	MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
   2214	MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
   2215	MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
   2216	MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
   2217
   2218	/* RING MODE */
   2219#define RING_REG(base) _MMIO((base) + 0x29c)
   2220	MMIO_RING_DFH(RING_REG, D_ALL,
   2221		F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
   2222		ring_mode_mmio_write);
   2223#undef RING_REG
   2224
   2225	MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
   2226		NULL, NULL);
   2227	MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
   2228			NULL, NULL);
   2229	MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
   2230			mmio_read_from_hw, NULL);
   2231	MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
   2232			mmio_read_from_hw, NULL);
   2233
   2234	MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2235	MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
   2236		NULL, NULL);
   2237	MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2238	MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2239	MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2240
   2241	MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2242	MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2243	MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2244	MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
   2245		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2246	MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2247	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2248	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
   2249		NULL, NULL);
   2250	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
   2251		 NULL, NULL);
   2252	MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2253	MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2254	MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2255	MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2256	MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2257	MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2258	MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2259	MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2260	MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2261	MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2262
   2263	/* display */
   2264	MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
   2265	MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
   2266	MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
   2267	MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
   2268	MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
   2269	MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
   2270		reg50080_mmio_write);
   2271	MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
   2272	MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
   2273		reg50080_mmio_write);
   2274	MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
   2275	MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
   2276		reg50080_mmio_write);
   2277	MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
   2278	MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
   2279		reg50080_mmio_write);
   2280	MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
   2281	MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
   2282		reg50080_mmio_write);
   2283	MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
   2284	MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
   2285		reg50080_mmio_write);
   2286
   2287	MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
   2288		gmbus_mmio_write);
   2289	MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
   2290
   2291	MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
   2292		dp_aux_ch_ctl_mmio_write);
   2293	MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
   2294		dp_aux_ch_ctl_mmio_write);
   2295	MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
   2296		dp_aux_ch_ctl_mmio_write);
   2297
   2298	MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
   2299
   2300	MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
   2301	MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
   2302
   2303	MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
   2304	MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
   2305	MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
   2306	MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
   2307	MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
   2308	MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
   2309	MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
   2310	MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
   2311	MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
   2312	MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
   2313	MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
   2314	MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
   2315	MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
   2316	MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
   2317	MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
   2318	MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
   2319
   2320	MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
   2321		PORTA_HOTPLUG_STATUS_MASK
   2322		| PORTB_HOTPLUG_STATUS_MASK
   2323		| PORTC_HOTPLUG_STATUS_MASK
   2324		| PORTD_HOTPLUG_STATUS_MASK,
   2325		NULL, NULL);
   2326
   2327	MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
   2328	MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
   2329	MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
   2330	MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
   2331	MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
   2332
   2333	MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
   2334		dp_aux_ch_ctl_mmio_write);
   2335
   2336	MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
   2337	MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
   2338	MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
   2339	MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
   2340	MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
   2341
   2342	MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
   2343	MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
   2344	MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
   2345	MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
   2346	MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
   2347
   2348	MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
   2349	MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
   2350	MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
   2351	MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
   2352	MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
   2353
   2354	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
   2355	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
   2356	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
   2357	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
   2358
   2359	MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
   2360	MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2361	MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2362	MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
   2363	MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
   2364	MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
   2365	MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
   2366	MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
   2367	MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
   2368	MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
   2369	MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
   2370	MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
   2371	MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
   2372
   2373	MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
   2374	MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
   2375	MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
   2376
   2377	MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
   2378	MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
   2379
   2380	MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
   2381	MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2382
   2383	MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
   2384	MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2385	MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2386	MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2387	MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2388	MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2389
   2390	MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
   2391	MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2392	MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2393	MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2394
   2395	MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2396	MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2397	MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2398
   2399	MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2400	MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2401	MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2402	MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2403	MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2404	MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2405	MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2406	MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2407	MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2408	MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2409	MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2410	MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
   2411	MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
   2412	MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
   2413	MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
   2414	MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
   2415	MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2416
   2417	MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2418	MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
   2419	MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2420	MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2421	MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2422	MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2423	MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2424	MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2425	MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2426	MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2427	MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2428
   2429	MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
   2430	MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
   2431	MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
   2432
   2433	return 0;
   2434}
   2435
   2436static int init_bdw_mmio_info(struct intel_gvt *gvt)
   2437{
   2438	int ret;
   2439
   2440	MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2441	MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2442	MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2443
   2444	MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2445	MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2446	MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2447
   2448	MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2449	MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2450	MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2451
   2452	MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2453	MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2454	MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2455
   2456	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
   2457		intel_vgpu_reg_imr_handler);
   2458	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
   2459		intel_vgpu_reg_ier_handler);
   2460	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
   2461		intel_vgpu_reg_iir_handler);
   2462
   2463	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
   2464		intel_vgpu_reg_imr_handler);
   2465	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
   2466		intel_vgpu_reg_ier_handler);
   2467	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
   2468		intel_vgpu_reg_iir_handler);
   2469
   2470	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
   2471		intel_vgpu_reg_imr_handler);
   2472	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
   2473		intel_vgpu_reg_ier_handler);
   2474	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
   2475		intel_vgpu_reg_iir_handler);
   2476
   2477	MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2478	MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2479	MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2480
   2481	MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2482	MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2483	MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2484
   2485	MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2486	MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2487	MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2488
   2489	MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
   2490		intel_vgpu_reg_master_irq_handler);
   2491
   2492	MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
   2493		mmio_read_from_hw, NULL);
   2494
   2495#define RING_REG(base) _MMIO((base) + 0xd0)
   2496	MMIO_RING_F(RING_REG, 4, F_RO, 0,
   2497		~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
   2498		ring_reset_ctl_write);
   2499#undef RING_REG
   2500
   2501#define RING_REG(base) _MMIO((base) + 0x230)
   2502	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
   2503#undef RING_REG
   2504
   2505#define RING_REG(base) _MMIO((base) + 0x234)
   2506	MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
   2507		NULL, NULL);
   2508#undef RING_REG
   2509
   2510#define RING_REG(base) _MMIO((base) + 0x244)
   2511	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2512#undef RING_REG
   2513
   2514#define RING_REG(base) _MMIO((base) + 0x370)
   2515	MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
   2516#undef RING_REG
   2517
   2518#define RING_REG(base) _MMIO((base) + 0x3a0)
   2519	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
   2520#undef RING_REG
   2521
   2522	MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
   2523
   2524#define RING_REG(base) _MMIO((base) + 0x270)
   2525	MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
   2526#undef RING_REG
   2527
   2528	MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
   2529
   2530	MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2531
   2532	MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   2533		NULL, NULL);
   2534	MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   2535		NULL, NULL);
   2536	MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2537
   2538	MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2539	MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2540	MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2541	MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2542	MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2543
   2544	MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
   2545		D_BDW_PLUS, NULL, force_nonpriv_write);
   2546
   2547	MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2548
   2549	MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2550
   2551	MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2552	MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2553	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2554	MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2555
   2556	MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2557
   2558	MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2559	MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2560	MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2561	MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2562	MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2563	MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2564	MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2565	MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2566	MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2567	MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2568	return 0;
   2569}
   2570
   2571static int init_skl_mmio_info(struct intel_gvt *gvt)
   2572{
   2573	struct drm_i915_private *dev_priv = gvt->gt->i915;
   2574	int ret;
   2575
   2576	MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
   2577	MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
   2578	MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
   2579	MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL);
   2580	MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
   2581	MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
   2582
   2583	MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
   2584						dp_aux_ch_ctl_mmio_write);
   2585	MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
   2586						dp_aux_ch_ctl_mmio_write);
   2587	MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
   2588						dp_aux_ch_ctl_mmio_write);
   2589
   2590	MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
   2591
   2592	MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
   2593
   2594	MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   2595	MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   2596	MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
   2597	MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
   2598	MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
   2599	MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
   2600
   2601	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
   2602	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
   2603	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
   2604	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
   2605	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
   2606	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
   2607
   2608	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
   2609	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
   2610	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
   2611	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
   2612	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
   2613	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
   2614
   2615	MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
   2616	MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
   2617	MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
   2618	MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
   2619	MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
   2620	MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
   2621
   2622	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
   2623	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
   2624	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
   2625	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
   2626
   2627	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
   2628	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
   2629	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
   2630	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
   2631
   2632	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
   2633	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
   2634	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
   2635	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
   2636
   2637	MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
   2638	MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
   2639	MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
   2640
   2641	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
   2642	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
   2643	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
   2644
   2645	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
   2646	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
   2647	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
   2648
   2649	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
   2650	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
   2651	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
   2652
   2653	MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
   2654	MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
   2655	MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
   2656
   2657	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
   2658	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
   2659	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
   2660	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
   2661
   2662	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
   2663	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
   2664	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
   2665	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
   2666
   2667	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
   2668	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
   2669	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
   2670	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
   2671
   2672	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
   2673	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
   2674	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
   2675	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
   2676
   2677	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
   2678	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
   2679	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
   2680	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
   2681
   2682	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
   2683	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
   2684	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
   2685	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
   2686
   2687	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
   2688	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
   2689	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
   2690	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
   2691
   2692	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
   2693	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
   2694	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
   2695	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
   2696
   2697	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
   2698	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
   2699	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
   2700	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
   2701
   2702	MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   2703
   2704	MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
   2705		NULL, NULL);
   2706	MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
   2707		NULL, NULL);
   2708
   2709	MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
   2710		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2711	MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   2712		NULL, NULL);
   2713
   2714	/* TRTT */
   2715	MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   2716	MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   2717	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   2718	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   2719	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   2720	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
   2721		 NULL, gen9_trtte_write);
   2722	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
   2723		 NULL, gen9_trtt_chicken_write);
   2724
   2725	MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   2726	MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
   2727
   2728#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
   2729	MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   2730		      NULL, csfe_chicken1_mmio_write);
   2731#undef CSFE_CHICKEN1_REG
   2732	MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   2733		 NULL, NULL);
   2734	MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   2735		 NULL, NULL);
   2736
   2737	MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
   2738	MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2739
   2740	return 0;
   2741}
   2742
   2743static int init_bxt_mmio_info(struct intel_gvt *gvt)
   2744{
   2745	int ret;
   2746
   2747	MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
   2748	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
   2749		NULL, bxt_phy_ctl_family_write);
   2750	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
   2751		NULL, bxt_phy_ctl_family_write);
   2752	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
   2753		NULL, bxt_port_pll_enable_write);
   2754	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
   2755		NULL, bxt_port_pll_enable_write);
   2756	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
   2757		bxt_port_pll_enable_write);
   2758
   2759	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
   2760		NULL, bxt_pcs_dw12_grp_write);
   2761	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
   2762		bxt_port_tx_dw3_read, NULL);
   2763	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
   2764		NULL, bxt_pcs_dw12_grp_write);
   2765	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
   2766		bxt_port_tx_dw3_read, NULL);
   2767	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
   2768		NULL, bxt_pcs_dw12_grp_write);
   2769	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
   2770		bxt_port_tx_dw3_read, NULL);
   2771	MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
   2772	MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
   2773	MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
   2774	MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
   2775	MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
   2776	       0, 0, D_BXT, NULL, NULL);
   2777	MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
   2778	       0, 0, D_BXT, NULL, NULL);
   2779	MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
   2780	       0, 0, D_BXT, NULL, NULL);
   2781	MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
   2782	       0, 0, D_BXT, NULL, NULL);
   2783
   2784	MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
   2785
   2786	MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
   2787
   2788	return 0;
   2789}
   2790
   2791static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
   2792					      unsigned int offset)
   2793{
   2794	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
   2795	int num = gvt->mmio.num_mmio_block;
   2796	int i;
   2797
   2798	for (i = 0; i < num; i++, block++) {
   2799		if (offset >= i915_mmio_reg_offset(block->offset) &&
   2800		    offset < i915_mmio_reg_offset(block->offset) + block->size)
   2801			return block;
   2802	}
   2803	return NULL;
   2804}
   2805
   2806/**
   2807 * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
   2808 * @gvt: GVT device
   2809 *
   2810 * This function is called at the driver unloading stage, to clean up the MMIO
   2811 * information table of GVT device
   2812 *
   2813 */
   2814void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
   2815{
   2816	struct hlist_node *tmp;
   2817	struct intel_gvt_mmio_info *e;
   2818	int i;
   2819
   2820	hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
   2821		kfree(e);
   2822
   2823	kfree(gvt->mmio.mmio_block);
   2824	gvt->mmio.mmio_block = NULL;
   2825	gvt->mmio.num_mmio_block = 0;
   2826
   2827	vfree(gvt->mmio.mmio_attribute);
   2828	gvt->mmio.mmio_attribute = NULL;
   2829}
   2830
   2831static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
   2832		       u32 size)
   2833{
   2834	struct intel_gvt *gvt = iter->data;
   2835	struct intel_gvt_mmio_info *info, *p;
   2836	u32 start, end, i;
   2837
   2838	if (WARN_ON(!IS_ALIGNED(offset, 4)))
   2839		return -EINVAL;
   2840
   2841	start = offset;
   2842	end = offset + size;
   2843
   2844	for (i = start; i < end; i += 4) {
   2845		p = intel_gvt_find_mmio_info(gvt, i);
   2846		if (p) {
   2847			WARN(1, "dup mmio definition offset %x\n",
   2848				info->offset);
   2849
   2850			/* We return -EEXIST here to make GVT-g load fail.
   2851			 * So duplicated MMIO can be found as soon as
   2852			 * possible.
   2853			 */
   2854			return -EEXIST;
   2855		}
   2856
   2857		info = kzalloc(sizeof(*info), GFP_KERNEL);
   2858		if (!info)
   2859			return -ENOMEM;
   2860
   2861		info->offset = i;
   2862		info->read = intel_vgpu_default_mmio_read;
   2863		info->write = intel_vgpu_default_mmio_write;
   2864		INIT_HLIST_NODE(&info->node);
   2865		hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
   2866		gvt->mmio.num_tracked_mmio++;
   2867	}
   2868	return 0;
   2869}
   2870
   2871static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter,
   2872			     u32 offset, u32 size)
   2873{
   2874	struct intel_gvt *gvt = iter->data;
   2875	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
   2876	void *ret;
   2877
   2878	ret = krealloc(block,
   2879			 (gvt->mmio.num_mmio_block + 1) * sizeof(*block),
   2880			 GFP_KERNEL);
   2881	if (!ret)
   2882		return -ENOMEM;
   2883
   2884	gvt->mmio.mmio_block = block = ret;
   2885
   2886	block += gvt->mmio.num_mmio_block;
   2887
   2888	memset(block, 0, sizeof(*block));
   2889
   2890	block->offset = _MMIO(offset);
   2891	block->size = size;
   2892
   2893	gvt->mmio.num_mmio_block++;
   2894
   2895	return 0;
   2896}
   2897
   2898static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset,
   2899			  u32 size)
   2900{
   2901	if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0)))
   2902		return handle_mmio(iter, offset, size);
   2903	else
   2904		return handle_mmio_block(iter, offset, size);
   2905}
   2906
   2907static int init_mmio_info(struct intel_gvt *gvt)
   2908{
   2909	struct intel_gvt_mmio_table_iter iter = {
   2910		.i915 = gvt->gt->i915,
   2911		.data = gvt,
   2912		.handle_mmio_cb = handle_mmio_cb,
   2913	};
   2914
   2915	return intel_gvt_iterate_mmio_table(&iter);
   2916}
   2917
   2918static int init_mmio_block_handlers(struct intel_gvt *gvt)
   2919{
   2920	struct gvt_mmio_block *block;
   2921
   2922	block = find_mmio_block(gvt, VGT_PVINFO_PAGE);
   2923	if (!block) {
   2924		WARN(1, "fail to assign handlers to mmio block %x\n",
   2925		     i915_mmio_reg_offset(gvt->mmio.mmio_block->offset));
   2926		return -ENODEV;
   2927	}
   2928
   2929	block->read = pvinfo_mmio_read;
   2930	block->write = pvinfo_mmio_write;
   2931
   2932	return 0;
   2933}
   2934
   2935/**
   2936 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
   2937 * @gvt: GVT device
   2938 *
   2939 * This function is called at the initialization stage, to setup the MMIO
   2940 * information table for GVT device
   2941 *
   2942 * Returns:
   2943 * zero on success, negative if failed.
   2944 */
   2945int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
   2946{
   2947	struct intel_gvt_device_info *info = &gvt->device_info;
   2948	struct drm_i915_private *i915 = gvt->gt->i915;
   2949	int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
   2950	int ret;
   2951
   2952	gvt->mmio.mmio_attribute = vzalloc(size);
   2953	if (!gvt->mmio.mmio_attribute)
   2954		return -ENOMEM;
   2955
   2956	ret = init_mmio_info(gvt);
   2957	if (ret)
   2958		goto err;
   2959
   2960	ret = init_mmio_block_handlers(gvt);
   2961	if (ret)
   2962		goto err;
   2963
   2964	ret = init_generic_mmio_info(gvt);
   2965	if (ret)
   2966		goto err;
   2967
   2968	if (IS_BROADWELL(i915)) {
   2969		ret = init_bdw_mmio_info(gvt);
   2970		if (ret)
   2971			goto err;
   2972	} else if (IS_SKYLAKE(i915) ||
   2973		   IS_KABYLAKE(i915) ||
   2974		   IS_COFFEELAKE(i915) ||
   2975		   IS_COMETLAKE(i915)) {
   2976		ret = init_bdw_mmio_info(gvt);
   2977		if (ret)
   2978			goto err;
   2979		ret = init_skl_mmio_info(gvt);
   2980		if (ret)
   2981			goto err;
   2982	} else if (IS_BROXTON(i915)) {
   2983		ret = init_bdw_mmio_info(gvt);
   2984		if (ret)
   2985			goto err;
   2986		ret = init_skl_mmio_info(gvt);
   2987		if (ret)
   2988			goto err;
   2989		ret = init_bxt_mmio_info(gvt);
   2990		if (ret)
   2991			goto err;
   2992	}
   2993
   2994	return 0;
   2995err:
   2996	intel_gvt_clean_mmio_info(gvt);
   2997	return ret;
   2998}
   2999
   3000/**
   3001 * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
   3002 * @gvt: a GVT device
   3003 * @handler: the handler
   3004 * @data: private data given to handler
   3005 *
   3006 * Returns:
   3007 * Zero on success, negative error code if failed.
   3008 */
   3009int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
   3010	int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
   3011	void *data)
   3012{
   3013	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
   3014	struct intel_gvt_mmio_info *e;
   3015	int i, j, ret;
   3016
   3017	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
   3018		ret = handler(gvt, e->offset, data);
   3019		if (ret)
   3020			return ret;
   3021	}
   3022
   3023	for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
   3024		/* pvinfo data doesn't come from hw mmio */
   3025		if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
   3026			continue;
   3027
   3028		for (j = 0; j < block->size; j += 4) {
   3029			ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data);
   3030			if (ret)
   3031				return ret;
   3032		}
   3033	}
   3034	return 0;
   3035}
   3036
   3037/**
   3038 * intel_vgpu_default_mmio_read - default MMIO read handler
   3039 * @vgpu: a vGPU
   3040 * @offset: access offset
   3041 * @p_data: data return buffer
   3042 * @bytes: access data length
   3043 *
   3044 * Returns:
   3045 * Zero on success, negative error code if failed.
   3046 */
   3047int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
   3048		void *p_data, unsigned int bytes)
   3049{
   3050	read_vreg(vgpu, offset, p_data, bytes);
   3051	return 0;
   3052}
   3053
   3054/**
   3055 * intel_t_default_mmio_write - default MMIO write handler
   3056 * @vgpu: a vGPU
   3057 * @offset: access offset
   3058 * @p_data: write data buffer
   3059 * @bytes: access data length
   3060 *
   3061 * Returns:
   3062 * Zero on success, negative error code if failed.
   3063 */
   3064int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   3065		void *p_data, unsigned int bytes)
   3066{
   3067	write_vreg(vgpu, offset, p_data, bytes);
   3068	return 0;
   3069}
   3070
   3071/**
   3072 * intel_vgpu_mask_mmio_write - write mask register
   3073 * @vgpu: a vGPU
   3074 * @offset: access offset
   3075 * @p_data: write data buffer
   3076 * @bytes: access data length
   3077 *
   3078 * Returns:
   3079 * Zero on success, negative error code if failed.
   3080 */
   3081int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   3082		void *p_data, unsigned int bytes)
   3083{
   3084	u32 mask, old_vreg;
   3085
   3086	old_vreg = vgpu_vreg(vgpu, offset);
   3087	write_vreg(vgpu, offset, p_data, bytes);
   3088	mask = vgpu_vreg(vgpu, offset) >> 16;
   3089	vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
   3090				(vgpu_vreg(vgpu, offset) & mask);
   3091
   3092	return 0;
   3093}
   3094
   3095/**
   3096 * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
   3097 * force-nopriv register
   3098 *
   3099 * @gvt: a GVT device
   3100 * @offset: register offset
   3101 *
   3102 * Returns:
   3103 * True if the register is in force-nonpriv whitelist;
   3104 * False if outside;
   3105 */
   3106bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
   3107					  unsigned int offset)
   3108{
   3109	return in_whitelist(offset);
   3110}
   3111
   3112/**
   3113 * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
   3114 * @vgpu: a vGPU
   3115 * @offset: register offset
   3116 * @pdata: data buffer
   3117 * @bytes: data length
   3118 * @is_read: read or write
   3119 *
   3120 * Returns:
   3121 * Zero on success, negative error code if failed.
   3122 */
   3123int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
   3124			   void *pdata, unsigned int bytes, bool is_read)
   3125{
   3126	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
   3127	struct intel_gvt *gvt = vgpu->gvt;
   3128	struct intel_gvt_mmio_info *mmio_info;
   3129	struct gvt_mmio_block *mmio_block;
   3130	gvt_mmio_func func;
   3131	int ret;
   3132
   3133	if (drm_WARN_ON(&i915->drm, bytes > 8))
   3134		return -EINVAL;
   3135
   3136	/*
   3137	 * Handle special MMIO blocks.
   3138	 */
   3139	mmio_block = find_mmio_block(gvt, offset);
   3140	if (mmio_block) {
   3141		func = is_read ? mmio_block->read : mmio_block->write;
   3142		if (func)
   3143			return func(vgpu, offset, pdata, bytes);
   3144		goto default_rw;
   3145	}
   3146
   3147	/*
   3148	 * Normal tracked MMIOs.
   3149	 */
   3150	mmio_info = intel_gvt_find_mmio_info(gvt, offset);
   3151	if (!mmio_info) {
   3152		gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
   3153		goto default_rw;
   3154	}
   3155
   3156	if (is_read)
   3157		return mmio_info->read(vgpu, offset, pdata, bytes);
   3158	else {
   3159		u64 ro_mask = mmio_info->ro_mask;
   3160		u32 old_vreg = 0;
   3161		u64 data = 0;
   3162
   3163		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
   3164			old_vreg = vgpu_vreg(vgpu, offset);
   3165		}
   3166
   3167		if (likely(!ro_mask))
   3168			ret = mmio_info->write(vgpu, offset, pdata, bytes);
   3169		else if (!~ro_mask) {
   3170			gvt_vgpu_err("try to write RO reg %x\n", offset);
   3171			return 0;
   3172		} else {
   3173			/* keep the RO bits in the virtual register */
   3174			memcpy(&data, pdata, bytes);
   3175			data &= ~ro_mask;
   3176			data |= vgpu_vreg(vgpu, offset) & ro_mask;
   3177			ret = mmio_info->write(vgpu, offset, &data, bytes);
   3178		}
   3179
   3180		/* higher 16bits of mode ctl regs are mask bits for change */
   3181		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
   3182			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
   3183
   3184			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
   3185					| (vgpu_vreg(vgpu, offset) & mask);
   3186		}
   3187	}
   3188
   3189	return ret;
   3190
   3191default_rw:
   3192	return is_read ?
   3193		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
   3194		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
   3195}
   3196
   3197void intel_gvt_restore_fence(struct intel_gvt *gvt)
   3198{
   3199	struct intel_vgpu *vgpu;
   3200	int i, id;
   3201
   3202	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
   3203		mmio_hw_access_pre(gvt->gt);
   3204		for (i = 0; i < vgpu_fence_sz(vgpu); i++)
   3205			intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
   3206		mmio_hw_access_post(gvt->gt);
   3207	}
   3208}
   3209
   3210static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
   3211{
   3212	struct intel_vgpu *vgpu = data;
   3213	struct drm_i915_private *dev_priv = gvt->gt->i915;
   3214
   3215	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
   3216		intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
   3217
   3218	return 0;
   3219}
   3220
   3221void intel_gvt_restore_mmio(struct intel_gvt *gvt)
   3222{
   3223	struct intel_vgpu *vgpu;
   3224	int id;
   3225
   3226	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
   3227		mmio_hw_access_pre(gvt->gt);
   3228		intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
   3229		mmio_hw_access_post(gvt->gt);
   3230	}
   3231}