cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_display_power_well.c (57390B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2022 Intel Corporation
      4 */
      5
      6#include "i915_drv.h"
      7#include "i915_irq.h"
      8#include "intel_combo_phy.h"
      9#include "intel_combo_phy_regs.h"
     10#include "intel_crt.h"
     11#include "intel_de.h"
     12#include "intel_display_power_well.h"
     13#include "intel_display_types.h"
     14#include "intel_dmc.h"
     15#include "intel_dpio_phy.h"
     16#include "intel_dpll.h"
     17#include "intel_hotplug.h"
     18#include "intel_pcode.h"
     19#include "intel_pm.h"
     20#include "intel_pps.h"
     21#include "intel_tc.h"
     22#include "intel_vga.h"
     23#include "vlv_sideband.h"
     24#include "vlv_sideband_reg.h"
     25
     26struct i915_power_well_regs {
     27	i915_reg_t bios;
     28	i915_reg_t driver;
     29	i915_reg_t kvmr;
     30	i915_reg_t debug;
     31};
     32
     33struct i915_power_well_ops {
     34	const struct i915_power_well_regs *regs;
     35	/*
     36	 * Synchronize the well's hw state to match the current sw state, for
     37	 * example enable/disable it based on the current refcount. Called
     38	 * during driver init and resume time, possibly after first calling
     39	 * the enable/disable handlers.
     40	 */
     41	void (*sync_hw)(struct drm_i915_private *i915,
     42			struct i915_power_well *power_well);
     43	/*
     44	 * Enable the well and resources that depend on it (for example
     45	 * interrupts located on the well). Called after the 0->1 refcount
     46	 * transition.
     47	 */
     48	void (*enable)(struct drm_i915_private *i915,
     49		       struct i915_power_well *power_well);
     50	/*
     51	 * Disable the well and resources that depend on it. Called after
     52	 * the 1->0 refcount transition.
     53	 */
     54	void (*disable)(struct drm_i915_private *i915,
     55			struct i915_power_well *power_well);
     56	/* Returns the hw enabled state. */
     57	bool (*is_enabled)(struct drm_i915_private *i915,
     58			   struct i915_power_well *power_well);
     59};
     60
     61static const struct i915_power_well_instance *
     62i915_power_well_instance(const struct i915_power_well *power_well)
     63{
     64	return &power_well->desc->instances->list[power_well->instance_idx];
     65}
     66
     67struct i915_power_well *
     68lookup_power_well(struct drm_i915_private *i915,
     69		  enum i915_power_well_id power_well_id)
     70{
     71	struct i915_power_well *power_well;
     72
     73	for_each_power_well(i915, power_well)
     74		if (i915_power_well_instance(power_well)->id == power_well_id)
     75			return power_well;
     76
     77	/*
     78	 * It's not feasible to add error checking code to the callers since
     79	 * this condition really shouldn't happen and it doesn't even make sense
     80	 * to abort things like display initialization sequences. Just return
     81	 * the first power well and hope the WARN gets reported so we can fix
     82	 * our driver.
     83	 */
     84	drm_WARN(&i915->drm, 1,
     85		 "Power well %d not defined for this platform\n",
     86		 power_well_id);
     87	return &i915->power_domains.power_wells[0];
     88}
     89
     90void intel_power_well_enable(struct drm_i915_private *i915,
     91			     struct i915_power_well *power_well)
     92{
     93	drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well));
     94	power_well->desc->ops->enable(i915, power_well);
     95	power_well->hw_enabled = true;
     96}
     97
     98void intel_power_well_disable(struct drm_i915_private *i915,
     99			      struct i915_power_well *power_well)
    100{
    101	drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well));
    102	power_well->hw_enabled = false;
    103	power_well->desc->ops->disable(i915, power_well);
    104}
    105
    106void intel_power_well_sync_hw(struct drm_i915_private *i915,
    107			      struct i915_power_well *power_well)
    108{
    109	power_well->desc->ops->sync_hw(i915, power_well);
    110	power_well->hw_enabled =
    111		power_well->desc->ops->is_enabled(i915, power_well);
    112}
    113
    114void intel_power_well_get(struct drm_i915_private *i915,
    115			  struct i915_power_well *power_well)
    116{
    117	if (!power_well->count++)
    118		intel_power_well_enable(i915, power_well);
    119}
    120
    121void intel_power_well_put(struct drm_i915_private *i915,
    122			  struct i915_power_well *power_well)
    123{
    124	drm_WARN(&i915->drm, !power_well->count,
    125		 "Use count on power well %s is already zero",
    126		 i915_power_well_instance(power_well)->name);
    127
    128	if (!--power_well->count)
    129		intel_power_well_disable(i915, power_well);
    130}
    131
    132bool intel_power_well_is_enabled(struct drm_i915_private *i915,
    133				 struct i915_power_well *power_well)
    134{
    135	return power_well->desc->ops->is_enabled(i915, power_well);
    136}
    137
    138bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
    139{
    140	return power_well->hw_enabled;
    141}
    142
    143bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
    144					 enum i915_power_well_id power_well_id)
    145{
    146	struct i915_power_well *power_well;
    147
    148	power_well = lookup_power_well(dev_priv, power_well_id);
    149
    150	return intel_power_well_is_enabled(dev_priv, power_well);
    151}
    152
    153bool intel_power_well_is_always_on(struct i915_power_well *power_well)
    154{
    155	return power_well->desc->always_on;
    156}
    157
    158const char *intel_power_well_name(struct i915_power_well *power_well)
    159{
    160	return i915_power_well_instance(power_well)->name;
    161}
    162
    163struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well)
    164{
    165	return &power_well->domains;
    166}
    167
    168int intel_power_well_refcount(struct i915_power_well *power_well)
    169{
    170	return power_well->count;
    171}
    172
    173/*
    174 * Starting with Haswell, we have a "Power Down Well" that can be turned off
    175 * when not needed anymore. We have 4 registers that can request the power well
    176 * to be enabled, and it will only be disabled if none of the registers is
    177 * requesting it to be enabled.
    178 */
    179static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
    180				       u8 irq_pipe_mask, bool has_vga)
    181{
    182	if (has_vga)
    183		intel_vga_reset_io_mem(dev_priv);
    184
    185	if (irq_pipe_mask)
    186		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
    187}
    188
    189static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
    190				       u8 irq_pipe_mask)
    191{
    192	if (irq_pipe_mask)
    193		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
    194}
    195
    196#define ICL_AUX_PW_TO_CH(pw_idx)	\
    197	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
    198
    199#define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
    200	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
    201
    202static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
    203{
    204	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
    205
    206	return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
    207					     ICL_AUX_PW_TO_CH(pw_idx);
    208}
    209
    210static struct intel_digital_port *
    211aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
    212		       enum aux_ch aux_ch)
    213{
    214	struct intel_digital_port *dig_port = NULL;
    215	struct intel_encoder *encoder;
    216
    217	for_each_intel_encoder(&dev_priv->drm, encoder) {
    218		/* We'll check the MST primary port */
    219		if (encoder->type == INTEL_OUTPUT_DP_MST)
    220			continue;
    221
    222		dig_port = enc_to_dig_port(encoder);
    223		if (!dig_port)
    224			continue;
    225
    226		if (dig_port->aux_ch != aux_ch) {
    227			dig_port = NULL;
    228			continue;
    229		}
    230
    231		break;
    232	}
    233
    234	return dig_port;
    235}
    236
    237static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
    238				  const struct i915_power_well *power_well)
    239{
    240	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
    241	struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
    242
    243	return intel_port_to_phy(i915, dig_port->base.port);
    244}
    245
    246static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
    247					   struct i915_power_well *power_well,
    248					   bool timeout_expected)
    249{
    250	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
    251	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
    252
    253	/*
    254	 * For some power wells we're not supposed to watch the status bit for
    255	 * an ack, but rather just wait a fixed amount of time and then
    256	 * proceed.  This is only used on DG2.
    257	 */
    258	if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) {
    259		usleep_range(600, 1200);
    260		return;
    261	}
    262
    263	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
    264	if (intel_de_wait_for_set(dev_priv, regs->driver,
    265				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
    266		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
    267			    intel_power_well_name(power_well));
    268
    269		drm_WARN_ON(&dev_priv->drm, !timeout_expected);
    270
    271	}
    272}
    273
    274static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
    275				     const struct i915_power_well_regs *regs,
    276				     int pw_idx)
    277{
    278	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
    279	u32 ret;
    280
    281	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
    282	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
    283	if (regs->kvmr.reg)
    284		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
    285	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
    286
    287	return ret;
    288}
    289
    290static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
    291					    struct i915_power_well *power_well)
    292{
    293	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
    294	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
    295	bool disabled;
    296	u32 reqs;
    297
    298	/*
    299	 * Bspec doesn't require waiting for PWs to get disabled, but still do
    300	 * this for paranoia. The known cases where a PW will be forced on:
    301	 * - a KVMR request on any power well via the KVMR request register
    302	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
    303	 *   DEBUG request registers
    304	 * Skip the wait in case any of the request bits are set and print a
    305	 * diagnostic message.
    306	 */
    307	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
    308			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
    309		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
    310	if (disabled)
    311		return;
    312
    313	drm_dbg_kms(&dev_priv->drm,
    314		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
    315		    intel_power_well_name(power_well),
    316		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
    317}
    318
    319static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
    320					   enum skl_power_gate pg)
    321{
    322	/* Timeout 5us for PG#0, for other PGs 1us */
    323	drm_WARN_ON(&dev_priv->drm,
    324		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
    325					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
    326}
    327
    328static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
    329				  struct i915_power_well *power_well)
    330{
    331	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
    332	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
    333	u32 val;
    334
    335	if (power_well->desc->has_fuses) {
    336		enum skl_power_gate pg;
    337
    338		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
    339						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
    340
    341		/* Wa_16013190616:adlp */
    342		if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
    343			intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
    344
    345		/*
    346		 * For PW1 we have to wait both for the PW0/PG0 fuse state
    347		 * before enabling the power well and PW1/PG1's own fuse
    348		 * state after the enabling. For all other power wells with
    349		 * fuses we only have to wait for that PW/PG's fuse state
    350		 * after the enabling.
    351		 */
    352		if (pg == SKL_PG1)
    353			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
    354	}
    355
    356	val = intel_de_read(dev_priv, regs->driver);
    357	intel_de_write(dev_priv, regs->driver,
    358		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
    359
    360	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
    361
    362	if (power_well->desc->has_fuses) {
    363		enum skl_power_gate pg;
    364
    365		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
    366						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
    367		gen9_wait_for_power_well_fuses(dev_priv, pg);
    368	}
    369
    370	hsw_power_well_post_enable(dev_priv,
    371				   power_well->desc->irq_pipe_mask,
    372				   power_well->desc->has_vga);
    373}
    374
    375static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
    376				   struct i915_power_well *power_well)
    377{
    378	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
    379	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
    380	u32 val;
    381
    382	hsw_power_well_pre_disable(dev_priv,
    383				   power_well->desc->irq_pipe_mask);
    384
    385	val = intel_de_read(dev_priv, regs->driver);
    386	intel_de_write(dev_priv, regs->driver,
    387		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
    388	hsw_wait_for_power_well_disable(dev_priv, power_well);
    389}
    390
    391static void
    392icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
    393				    struct i915_power_well *power_well)
    394{
    395	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
    396	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
    397	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
    398	u32 val;
    399
    400	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
    401
    402	val = intel_de_read(dev_priv, regs->driver);
    403	intel_de_write(dev_priv, regs->driver,
    404		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
    405
    406	if (DISPLAY_VER(dev_priv) < 12) {
    407		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
    408		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
    409			       val | ICL_LANE_ENABLE_AUX);
    410	}
    411
    412	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
    413
    414	/* Display WA #1178: icl */
    415	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
    416	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
    417		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
    418		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
    419		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
    420	}
    421}
    422
    423static void
    424icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
    425				     struct i915_power_well *power_well)
    426{
    427	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
    428	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
    429	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
    430	u32 val;
    431
    432	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
    433
    434	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
    435	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
    436		       val & ~ICL_LANE_ENABLE_AUX);
    437
    438	val = intel_de_read(dev_priv, regs->driver);
    439	intel_de_write(dev_priv, regs->driver,
    440		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
    441
    442	hsw_wait_for_power_well_disable(dev_priv, power_well);
    443}
    444
    445#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
    446
    447static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
    448					struct i915_power_well *power_well,
    449					struct intel_digital_port *dig_port)
    450{
    451	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
    452		return;
    453
    454	if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
    455		return;
    456
    457	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
    458}
    459
    460#else
    461
    462static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
    463					struct i915_power_well *power_well,
    464					struct intel_digital_port *dig_port)
    465{
    466}
    467
    468#endif
    469
    470#define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
    471
    472static void icl_tc_cold_exit(struct drm_i915_private *i915)
    473{
    474	int ret, tries = 0;
    475
    476	while (1) {
    477		ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0,
    478					      250, 1);
    479		if (ret != -EAGAIN || ++tries == 3)
    480			break;
    481		msleep(1);
    482	}
    483
    484	/* Spec states that TC cold exit can take up to 1ms to complete */
    485	if (!ret)
    486		msleep(1);
    487
    488	/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
    489	drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
    490		    "succeeded");
    491}
    492
    493static void
    494icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
    495				 struct i915_power_well *power_well)
    496{
    497	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
    498	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
    499	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
    500	bool is_tbt = power_well->desc->is_tc_tbt;
    501	bool timeout_expected;
    502	u32 val;
    503
    504	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
    505
    506	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
    507	val &= ~DP_AUX_CH_CTL_TBT_IO;
    508	if (is_tbt)
    509		val |= DP_AUX_CH_CTL_TBT_IO;
    510	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
    511
    512	val = intel_de_read(dev_priv, regs->driver);
    513	intel_de_write(dev_priv, regs->driver,
    514		       val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
    515
    516	/*
    517	 * An AUX timeout is expected if the TBT DP tunnel is down,
    518	 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
    519	 * exit sequence.
    520	 */
    521	timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
    522	if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
    523		icl_tc_cold_exit(dev_priv);
    524
    525	hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
    526
    527	if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
    528		enum tc_port tc_port;
    529
    530		tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
    531		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
    532			       HIP_INDEX_VAL(tc_port, 0x2));
    533
    534		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
    535					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
    536			drm_warn(&dev_priv->drm,
    537				 "Timeout waiting TC uC health\n");
    538	}
    539}
    540
    541static void
    542icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
    543			  struct i915_power_well *power_well)
    544{
    545	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
    546
    547	if (intel_phy_is_tc(dev_priv, phy))
    548		return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
    549	else if (IS_ICELAKE(dev_priv))
    550		return icl_combo_phy_aux_power_well_enable(dev_priv,
    551							   power_well);
    552	else
    553		return hsw_power_well_enable(dev_priv, power_well);
    554}
    555
    556static void
    557icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
    558			   struct i915_power_well *power_well)
    559{
    560	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
    561
    562	if (intel_phy_is_tc(dev_priv, phy))
    563		return hsw_power_well_disable(dev_priv, power_well);
    564	else if (IS_ICELAKE(dev_priv))
    565		return icl_combo_phy_aux_power_well_disable(dev_priv,
    566							    power_well);
    567	else
    568		return hsw_power_well_disable(dev_priv, power_well);
    569}
    570
    571/*
    572 * We should only use the power well if we explicitly asked the hardware to
    573 * enable it, so check if it's enabled and also check if we've requested it to
    574 * be enabled.
    575 */
    576static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
    577				   struct i915_power_well *power_well)
    578{
    579	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
    580	enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
    581	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
    582	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
    583		   HSW_PWR_WELL_CTL_STATE(pw_idx);
    584	u32 val;
    585
    586	val = intel_de_read(dev_priv, regs->driver);
    587
    588	/*
    589	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
    590	 * and the MISC_IO PW will be not restored, so check instead for the
    591	 * BIOS's own request bits, which are forced-on for these power wells
    592	 * when exiting DC5/6.
    593	 */
    594	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
    595	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
    596		val |= intel_de_read(dev_priv, regs->bios);
    597
    598	return (val & mask) == mask;
    599}
    600
    601static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
    602{
    603	drm_WARN_ONCE(&dev_priv->drm,
    604		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
    605		      "DC9 already programmed to be enabled.\n");
    606	drm_WARN_ONCE(&dev_priv->drm,
    607		      intel_de_read(dev_priv, DC_STATE_EN) &
    608		      DC_STATE_EN_UPTO_DC5,
    609		      "DC5 still not disabled to enable DC9.\n");
    610	drm_WARN_ONCE(&dev_priv->drm,
    611		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
    612		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
    613		      "Power well 2 on.\n");
    614	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
    615		      "Interrupts not disabled yet.\n");
    616
    617	 /*
    618	  * TODO: check for the following to verify the conditions to enter DC9
    619	  * state are satisfied:
    620	  * 1] Check relevant display engine registers to verify if mode set
    621	  * disable sequence was followed.
    622	  * 2] Check if display uninitialize sequence is initialized.
    623	  */
    624}
    625
    626static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
    627{
    628	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
    629		      "Interrupts not disabled yet.\n");
    630	drm_WARN_ONCE(&dev_priv->drm,
    631		      intel_de_read(dev_priv, DC_STATE_EN) &
    632		      DC_STATE_EN_UPTO_DC5,
    633		      "DC5 still not disabled.\n");
    634
    635	 /*
    636	  * TODO: check for the following to verify DC9 state was indeed
    637	  * entered before programming to disable it:
    638	  * 1] Check relevant display engine registers to verify if mode
    639	  *  set disable sequence was followed.
    640	  * 2] Check if display uninitialize sequence is initialized.
    641	  */
    642}
    643
    644static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
    645				u32 state)
    646{
    647	int rewrites = 0;
    648	int rereads = 0;
    649	u32 v;
    650
    651	intel_de_write(dev_priv, DC_STATE_EN, state);
    652
    653	/* It has been observed that disabling the dc6 state sometimes
    654	 * doesn't stick and dmc keeps returning old value. Make sure
    655	 * the write really sticks enough times and also force rewrite until
    656	 * we are confident that state is exactly what we want.
    657	 */
    658	do  {
    659		v = intel_de_read(dev_priv, DC_STATE_EN);
    660
    661		if (v != state) {
    662			intel_de_write(dev_priv, DC_STATE_EN, state);
    663			rewrites++;
    664			rereads = 0;
    665		} else if (rereads++ > 5) {
    666			break;
    667		}
    668
    669	} while (rewrites < 100);
    670
    671	if (v != state)
    672		drm_err(&dev_priv->drm,
    673			"Writing dc state to 0x%x failed, now 0x%x\n",
    674			state, v);
    675
    676	/* Most of the times we need one retry, avoid spam */
    677	if (rewrites > 1)
    678		drm_dbg_kms(&dev_priv->drm,
    679			    "Rewrote dc state to 0x%x %d times\n",
    680			    state, rewrites);
    681}
    682
    683static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
    684{
    685	u32 mask;
    686
    687	mask = DC_STATE_EN_UPTO_DC5;
    688
    689	if (DISPLAY_VER(dev_priv) >= 12)
    690		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
    691					  | DC_STATE_EN_DC9;
    692	else if (DISPLAY_VER(dev_priv) == 11)
    693		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
    694	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
    695		mask |= DC_STATE_EN_DC9;
    696	else
    697		mask |= DC_STATE_EN_UPTO_DC6;
    698
    699	return mask;
    700}
    701
    702void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
    703{
    704	u32 val;
    705
    706	if (!HAS_DISPLAY(dev_priv))
    707		return;
    708
    709	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
    710
    711	drm_dbg_kms(&dev_priv->drm,
    712		    "Resetting DC state tracking from %02x to %02x\n",
    713		    dev_priv->dmc.dc_state, val);
    714	dev_priv->dmc.dc_state = val;
    715}
    716
    717/**
    718 * gen9_set_dc_state - set target display C power state
    719 * @dev_priv: i915 device instance
    720 * @state: target DC power state
    721 * - DC_STATE_DISABLE
    722 * - DC_STATE_EN_UPTO_DC5
    723 * - DC_STATE_EN_UPTO_DC6
    724 * - DC_STATE_EN_DC9
    725 *
    726 * Signal to DMC firmware/HW the target DC power state passed in @state.
    727 * DMC/HW can turn off individual display clocks and power rails when entering
    728 * a deeper DC power state (higher in number) and turns these back when exiting
    729 * that state to a shallower power state (lower in number). The HW will decide
    730 * when to actually enter a given state on an on-demand basis, for instance
    731 * depending on the active state of display pipes. The state of display
    732 * registers backed by affected power rails are saved/restored as needed.
    733 *
    734 * Based on the above enabling a deeper DC power state is asynchronous wrt.
    735 * enabling it. Disabling a deeper power state is synchronous: for instance
    736 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
    737 * back on and register state is restored. This is guaranteed by the MMIO write
    738 * to DC_STATE_EN blocking until the state is restored.
    739 */
    740void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
    741{
    742	u32 val;
    743	u32 mask;
    744
    745	if (!HAS_DISPLAY(dev_priv))
    746		return;
    747
    748	if (drm_WARN_ON_ONCE(&dev_priv->drm,
    749			     state & ~dev_priv->dmc.allowed_dc_mask))
    750		state &= dev_priv->dmc.allowed_dc_mask;
    751
    752	val = intel_de_read(dev_priv, DC_STATE_EN);
    753	mask = gen9_dc_mask(dev_priv);
    754	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
    755		    val & mask, state);
    756
    757	/* Check if DMC is ignoring our DC state requests */
    758	if ((val & mask) != dev_priv->dmc.dc_state)
    759		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
    760			dev_priv->dmc.dc_state, val & mask);
    761
    762	val &= ~mask;
    763	val |= state;
    764
    765	gen9_write_dc_state(dev_priv, val);
    766
    767	dev_priv->dmc.dc_state = val & mask;
    768}
    769
    770static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
    771{
    772	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
    773	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
    774}
    775
    776static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
    777{
    778	u32 val;
    779
    780	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
    781	val = intel_de_read(dev_priv, DC_STATE_EN);
    782	val &= ~DC_STATE_DC3CO_STATUS;
    783	intel_de_write(dev_priv, DC_STATE_EN, val);
    784	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
    785	/*
    786	 * Delay of 200us DC3CO Exit time B.Spec 49196
    787	 */
    788	usleep_range(200, 210);
    789}
    790
    791static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
    792{
    793	enum i915_power_well_id high_pg;
    794
    795	/* Power wells at this level and above must be disabled for DC5 entry */
    796	if (DISPLAY_VER(dev_priv) == 12)
    797		high_pg = ICL_DISP_PW_3;
    798	else
    799		high_pg = SKL_DISP_PW_2;
    800
    801	drm_WARN_ONCE(&dev_priv->drm,
    802		      intel_display_power_well_is_enabled(dev_priv, high_pg),
    803		      "Power wells above platform's DC5 limit still enabled.\n");
    804
    805	drm_WARN_ONCE(&dev_priv->drm,
    806		      (intel_de_read(dev_priv, DC_STATE_EN) &
    807		       DC_STATE_EN_UPTO_DC5),
    808		      "DC5 already programmed to be enabled.\n");
    809	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
    810
    811	assert_dmc_loaded(dev_priv);
    812}
    813
    814void gen9_enable_dc5(struct drm_i915_private *dev_priv)
    815{
    816	assert_can_enable_dc5(dev_priv);
    817
    818	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
    819
    820	/* Wa Display #1183: skl,kbl,cfl */
    821	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
    822		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
    823			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
    824
    825	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
    826}
    827
    828static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
    829{
    830	drm_WARN_ONCE(&dev_priv->drm,
    831		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
    832		      "Backlight is not disabled.\n");
    833	drm_WARN_ONCE(&dev_priv->drm,
    834		      (intel_de_read(dev_priv, DC_STATE_EN) &
    835		       DC_STATE_EN_UPTO_DC6),
    836		      "DC6 already programmed to be enabled.\n");
    837
    838	assert_dmc_loaded(dev_priv);
    839}
    840
    841void skl_enable_dc6(struct drm_i915_private *dev_priv)
    842{
    843	assert_can_enable_dc6(dev_priv);
    844
    845	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
    846
    847	/* Wa Display #1183: skl,kbl,cfl */
    848	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
    849		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
    850			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
    851
    852	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
    853}
    854
    855void bxt_enable_dc9(struct drm_i915_private *dev_priv)
    856{
    857	assert_can_enable_dc9(dev_priv);
    858
    859	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
    860	/*
    861	 * Power sequencer reset is not needed on
    862	 * platforms with South Display Engine on PCH,
    863	 * because PPS registers are always on.
    864	 */
    865	if (!HAS_PCH_SPLIT(dev_priv))
    866		intel_pps_reset_all(dev_priv);
    867	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
    868}
    869
    870void bxt_disable_dc9(struct drm_i915_private *dev_priv)
    871{
    872	assert_can_disable_dc9(dev_priv);
    873
    874	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
    875
    876	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
    877
    878	intel_pps_unlock_regs_wa(dev_priv);
    879}
    880
    881static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
    882				   struct i915_power_well *power_well)
    883{
    884	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
    885	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
    886	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
    887	u32 bios_req = intel_de_read(dev_priv, regs->bios);
    888
    889	/* Take over the request bit if set by BIOS. */
    890	if (bios_req & mask) {
    891		u32 drv_req = intel_de_read(dev_priv, regs->driver);
    892
    893		if (!(drv_req & mask))
    894			intel_de_write(dev_priv, regs->driver, drv_req | mask);
    895		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
    896	}
    897}
    898
    899static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
    900					   struct i915_power_well *power_well)
    901{
    902	bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
    903}
    904
    905static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
    906					    struct i915_power_well *power_well)
    907{
    908	bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
    909}
    910
    911static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
    912					    struct i915_power_well *power_well)
    913{
    914	return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
    915}
    916
    917static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
    918{
    919	struct i915_power_well *power_well;
    920
    921	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
    922	if (intel_power_well_refcount(power_well) > 0)
    923		bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
    924
    925	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
    926	if (intel_power_well_refcount(power_well) > 0)
    927		bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
    928
    929	if (IS_GEMINILAKE(dev_priv)) {
    930		power_well = lookup_power_well(dev_priv,
    931					       GLK_DISP_PW_DPIO_CMN_C);
    932		if (intel_power_well_refcount(power_well) > 0)
    933			bxt_ddi_phy_verify_state(dev_priv,
    934						 i915_power_well_instance(power_well)->bxt.phy);
    935	}
    936}
    937
    938static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
    939					   struct i915_power_well *power_well)
    940{
    941	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
    942		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
    943}
    944
    945static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
    946{
    947	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
    948	u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
    949
    950	drm_WARN(&dev_priv->drm,
    951		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
    952		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
    953		 hw_enabled_dbuf_slices,
    954		 enabled_dbuf_slices);
    955}
    956
    957void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
    958{
    959	struct intel_cdclk_config cdclk_config = {};
    960
    961	if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
    962		tgl_disable_dc3co(dev_priv);
    963		return;
    964	}
    965
    966	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
    967
    968	if (!HAS_DISPLAY(dev_priv))
    969		return;
    970
    971	intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
    972	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
    973	drm_WARN_ON(&dev_priv->drm,
    974		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
    975					      &cdclk_config));
    976
    977	gen9_assert_dbuf_enabled(dev_priv);
    978
    979	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
    980		bxt_verify_ddi_phy_power_wells(dev_priv);
    981
    982	if (DISPLAY_VER(dev_priv) >= 11)
    983		/*
    984		 * DMC retains HW context only for port A, the other combo
    985		 * PHY's HW context for port B is lost after DC transitions,
    986		 * so we need to restore it manually.
    987		 */
    988		intel_combo_phy_init(dev_priv);
    989}
    990
    991static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
    992					  struct i915_power_well *power_well)
    993{
    994	gen9_disable_dc_states(dev_priv);
    995}
    996
    997static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
    998					   struct i915_power_well *power_well)
    999{
   1000	if (!intel_dmc_has_payload(dev_priv))
   1001		return;
   1002
   1003	switch (dev_priv->dmc.target_dc_state) {
   1004	case DC_STATE_EN_DC3CO:
   1005		tgl_enable_dc3co(dev_priv);
   1006		break;
   1007	case DC_STATE_EN_UPTO_DC6:
   1008		skl_enable_dc6(dev_priv);
   1009		break;
   1010	case DC_STATE_EN_UPTO_DC5:
   1011		gen9_enable_dc5(dev_priv);
   1012		break;
   1013	}
   1014}
   1015
   1016static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
   1017					 struct i915_power_well *power_well)
   1018{
   1019}
   1020
   1021static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
   1022					   struct i915_power_well *power_well)
   1023{
   1024}
   1025
   1026static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
   1027					     struct i915_power_well *power_well)
   1028{
   1029	return true;
   1030}
   1031
   1032static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
   1033					 struct i915_power_well *power_well)
   1034{
   1035	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
   1036		i830_enable_pipe(dev_priv, PIPE_A);
   1037	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
   1038		i830_enable_pipe(dev_priv, PIPE_B);
   1039}
   1040
   1041static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
   1042					  struct i915_power_well *power_well)
   1043{
   1044	i830_disable_pipe(dev_priv, PIPE_B);
   1045	i830_disable_pipe(dev_priv, PIPE_A);
   1046}
   1047
   1048static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
   1049					  struct i915_power_well *power_well)
   1050{
   1051	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
   1052		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
   1053}
   1054
   1055static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
   1056					  struct i915_power_well *power_well)
   1057{
   1058	if (intel_power_well_refcount(power_well) > 0)
   1059		i830_pipes_power_well_enable(dev_priv, power_well);
   1060	else
   1061		i830_pipes_power_well_disable(dev_priv, power_well);
   1062}
   1063
   1064static void vlv_set_power_well(struct drm_i915_private *dev_priv,
   1065			       struct i915_power_well *power_well, bool enable)
   1066{
   1067	int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
   1068	u32 mask;
   1069	u32 state;
   1070	u32 ctrl;
   1071
   1072	mask = PUNIT_PWRGT_MASK(pw_idx);
   1073	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
   1074			 PUNIT_PWRGT_PWR_GATE(pw_idx);
   1075
   1076	vlv_punit_get(dev_priv);
   1077
   1078#define COND \
   1079	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
   1080
   1081	if (COND)
   1082		goto out;
   1083
   1084	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
   1085	ctrl &= ~mask;
   1086	ctrl |= state;
   1087	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
   1088
   1089	if (wait_for(COND, 100))
   1090		drm_err(&dev_priv->drm,
   1091			"timeout setting power well state %08x (%08x)\n",
   1092			state,
   1093			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
   1094
   1095#undef COND
   1096
   1097out:
   1098	vlv_punit_put(dev_priv);
   1099}
   1100
   1101static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
   1102				  struct i915_power_well *power_well)
   1103{
   1104	vlv_set_power_well(dev_priv, power_well, true);
   1105}
   1106
   1107static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
   1108				   struct i915_power_well *power_well)
   1109{
   1110	vlv_set_power_well(dev_priv, power_well, false);
   1111}
   1112
   1113static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
   1114				   struct i915_power_well *power_well)
   1115{
   1116	int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
   1117	bool enabled = false;
   1118	u32 mask;
   1119	u32 state;
   1120	u32 ctrl;
   1121
   1122	mask = PUNIT_PWRGT_MASK(pw_idx);
   1123	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
   1124
   1125	vlv_punit_get(dev_priv);
   1126
   1127	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
   1128	/*
   1129	 * We only ever set the power-on and power-gate states, anything
   1130	 * else is unexpected.
   1131	 */
   1132	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
   1133		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
   1134	if (state == ctrl)
   1135		enabled = true;
   1136
   1137	/*
   1138	 * A transient state at this point would mean some unexpected party
   1139	 * is poking at the power controls too.
   1140	 */
   1141	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
   1142	drm_WARN_ON(&dev_priv->drm, ctrl != state);
   1143
   1144	vlv_punit_put(dev_priv);
   1145
   1146	return enabled;
   1147}
   1148
   1149static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
   1150{
   1151	u32 val;
   1152
   1153	/*
   1154	 * On driver load, a pipe may be active and driving a DSI display.
   1155	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
   1156	 * (and never recovering) in this case. intel_dsi_post_disable() will
   1157	 * clear it when we turn off the display.
   1158	 */
   1159	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
   1160	val &= DPOUNIT_CLOCK_GATE_DISABLE;
   1161	val |= VRHUNIT_CLOCK_GATE_DISABLE;
   1162	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
   1163
   1164	/*
   1165	 * Disable trickle feed and enable pnd deadline calculation
   1166	 */
   1167	intel_de_write(dev_priv, MI_ARB_VLV,
   1168		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
   1169	intel_de_write(dev_priv, CBR1_VLV, 0);
   1170
   1171	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
   1172	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
   1173		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
   1174					 1000));
   1175}
   1176
   1177static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
   1178{
   1179	struct intel_encoder *encoder;
   1180	enum pipe pipe;
   1181
   1182	/*
   1183	 * Enable the CRI clock source so we can get at the
   1184	 * display and the reference clock for VGA
   1185	 * hotplug / manual detection. Supposedly DSI also
   1186	 * needs the ref clock up and running.
   1187	 *
   1188	 * CHV DPLL B/C have some issues if VGA mode is enabled.
   1189	 */
   1190	for_each_pipe(dev_priv, pipe) {
   1191		u32 val = intel_de_read(dev_priv, DPLL(pipe));
   1192
   1193		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
   1194		if (pipe != PIPE_A)
   1195			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
   1196
   1197		intel_de_write(dev_priv, DPLL(pipe), val);
   1198	}
   1199
   1200	vlv_init_display_clock_gating(dev_priv);
   1201
   1202	spin_lock_irq(&dev_priv->irq_lock);
   1203	valleyview_enable_display_irqs(dev_priv);
   1204	spin_unlock_irq(&dev_priv->irq_lock);
   1205
   1206	/*
   1207	 * During driver initialization/resume we can avoid restoring the
   1208	 * part of the HW/SW state that will be inited anyway explicitly.
   1209	 */
   1210	if (dev_priv->power_domains.initializing)
   1211		return;
   1212
   1213	intel_hpd_init(dev_priv);
   1214	intel_hpd_poll_disable(dev_priv);
   1215
   1216	/* Re-enable the ADPA, if we have one */
   1217	for_each_intel_encoder(&dev_priv->drm, encoder) {
   1218		if (encoder->type == INTEL_OUTPUT_ANALOG)
   1219			intel_crt_reset(&encoder->base);
   1220	}
   1221
   1222	intel_vga_redisable_power_on(dev_priv);
   1223
   1224	intel_pps_unlock_regs_wa(dev_priv);
   1225}
   1226
   1227static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
   1228{
   1229	spin_lock_irq(&dev_priv->irq_lock);
   1230	valleyview_disable_display_irqs(dev_priv);
   1231	spin_unlock_irq(&dev_priv->irq_lock);
   1232
   1233	/* make sure we're done processing display irqs */
   1234	intel_synchronize_irq(dev_priv);
   1235
   1236	intel_pps_reset_all(dev_priv);
   1237
   1238	/* Prevent us from re-enabling polling on accident in late suspend */
   1239	if (!dev_priv->drm.dev->power.is_suspended)
   1240		intel_hpd_poll_enable(dev_priv);
   1241}
   1242
   1243static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
   1244					  struct i915_power_well *power_well)
   1245{
   1246	vlv_set_power_well(dev_priv, power_well, true);
   1247
   1248	vlv_display_power_well_init(dev_priv);
   1249}
   1250
   1251static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
   1252					   struct i915_power_well *power_well)
   1253{
   1254	vlv_display_power_well_deinit(dev_priv);
   1255
   1256	vlv_set_power_well(dev_priv, power_well, false);
   1257}
   1258
   1259static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
   1260					   struct i915_power_well *power_well)
   1261{
   1262	/* since ref/cri clock was enabled */
   1263	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
   1264
   1265	vlv_set_power_well(dev_priv, power_well, true);
   1266
   1267	/*
   1268	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
   1269	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
   1270	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
   1271	 *   b.	The other bits such as sfr settings / modesel may all
   1272	 *	be set to 0.
   1273	 *
   1274	 * This should only be done on init and resume from S3 with
   1275	 * both PLLs disabled, or we risk losing DPIO and PLL
   1276	 * synchronization.
   1277	 */
   1278	intel_de_write(dev_priv, DPIO_CTL,
   1279		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
   1280}
   1281
   1282static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
   1283					    struct i915_power_well *power_well)
   1284{
   1285	enum pipe pipe;
   1286
   1287	for_each_pipe(dev_priv, pipe)
   1288		assert_pll_disabled(dev_priv, pipe);
   1289
   1290	/* Assert common reset */
   1291	intel_de_write(dev_priv, DPIO_CTL,
   1292		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
   1293
   1294	vlv_set_power_well(dev_priv, power_well, false);
   1295}
   1296
   1297#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
   1298
   1299static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
   1300{
   1301	struct i915_power_well *cmn_bc =
   1302		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
   1303	struct i915_power_well *cmn_d =
   1304		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
   1305	u32 phy_control = dev_priv->chv_phy_control;
   1306	u32 phy_status = 0;
   1307	u32 phy_status_mask = 0xffffffff;
   1308
   1309	/*
   1310	 * The BIOS can leave the PHY is some weird state
   1311	 * where it doesn't fully power down some parts.
   1312	 * Disable the asserts until the PHY has been fully
   1313	 * reset (ie. the power well has been disabled at
   1314	 * least once).
   1315	 */
   1316	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
   1317		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
   1318				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
   1319				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
   1320				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
   1321				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
   1322				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
   1323
   1324	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
   1325		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
   1326				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
   1327				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
   1328
   1329	if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
   1330		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
   1331
   1332		/* this assumes override is only used to enable lanes */
   1333		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
   1334			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
   1335
   1336		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
   1337			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
   1338
   1339		/* CL1 is on whenever anything is on in either channel */
   1340		if (BITS_SET(phy_control,
   1341			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
   1342			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
   1343			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
   1344
   1345		/*
   1346		 * The DPLLB check accounts for the pipe B + port A usage
   1347		 * with CL2 powered up but all the lanes in the second channel
   1348		 * powered down.
   1349		 */
   1350		if (BITS_SET(phy_control,
   1351			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
   1352		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
   1353			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
   1354
   1355		if (BITS_SET(phy_control,
   1356			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
   1357			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
   1358		if (BITS_SET(phy_control,
   1359			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
   1360			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
   1361
   1362		if (BITS_SET(phy_control,
   1363			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
   1364			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
   1365		if (BITS_SET(phy_control,
   1366			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
   1367			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
   1368	}
   1369
   1370	if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
   1371		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
   1372
   1373		/* this assumes override is only used to enable lanes */
   1374		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
   1375			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
   1376
   1377		if (BITS_SET(phy_control,
   1378			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
   1379			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
   1380
   1381		if (BITS_SET(phy_control,
   1382			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
   1383			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
   1384		if (BITS_SET(phy_control,
   1385			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
   1386			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
   1387	}
   1388
   1389	phy_status &= phy_status_mask;
   1390
   1391	/*
   1392	 * The PHY may be busy with some initial calibration and whatnot,
   1393	 * so the power state can take a while to actually change.
   1394	 */
   1395	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
   1396				       phy_status_mask, phy_status, 10))
   1397		drm_err(&dev_priv->drm,
   1398			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
   1399			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
   1400			phy_status, dev_priv->chv_phy_control);
   1401}
   1402
   1403#undef BITS_SET
   1404
   1405static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
   1406					   struct i915_power_well *power_well)
   1407{
   1408	enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
   1409	enum dpio_phy phy;
   1410	enum pipe pipe;
   1411	u32 tmp;
   1412
   1413	drm_WARN_ON_ONCE(&dev_priv->drm,
   1414			 id != VLV_DISP_PW_DPIO_CMN_BC &&
   1415			 id != CHV_DISP_PW_DPIO_CMN_D);
   1416
   1417	if (id == VLV_DISP_PW_DPIO_CMN_BC) {
   1418		pipe = PIPE_A;
   1419		phy = DPIO_PHY0;
   1420	} else {
   1421		pipe = PIPE_C;
   1422		phy = DPIO_PHY1;
   1423	}
   1424
   1425	/* since ref/cri clock was enabled */
   1426	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
   1427	vlv_set_power_well(dev_priv, power_well, true);
   1428
   1429	/* Poll for phypwrgood signal */
   1430	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
   1431				  PHY_POWERGOOD(phy), 1))
   1432		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
   1433			phy);
   1434
   1435	vlv_dpio_get(dev_priv);
   1436
   1437	/* Enable dynamic power down */
   1438	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
   1439	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
   1440		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
   1441	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
   1442
   1443	if (id == VLV_DISP_PW_DPIO_CMN_BC) {
   1444		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
   1445		tmp |= DPIO_DYNPWRDOWNEN_CH1;
   1446		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
   1447	} else {
   1448		/*
   1449		 * Force the non-existing CL2 off. BXT does this
   1450		 * too, so maybe it saves some power even though
   1451		 * CL2 doesn't exist?
   1452		 */
   1453		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
   1454		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
   1455		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
   1456	}
   1457
   1458	vlv_dpio_put(dev_priv);
   1459
   1460	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
   1461	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
   1462		       dev_priv->chv_phy_control);
   1463
   1464	drm_dbg_kms(&dev_priv->drm,
   1465		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
   1466		    phy, dev_priv->chv_phy_control);
   1467
   1468	assert_chv_phy_status(dev_priv);
   1469}
   1470
   1471static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
   1472					    struct i915_power_well *power_well)
   1473{
   1474	enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
   1475	enum dpio_phy phy;
   1476
   1477	drm_WARN_ON_ONCE(&dev_priv->drm,
   1478			 id != VLV_DISP_PW_DPIO_CMN_BC &&
   1479			 id != CHV_DISP_PW_DPIO_CMN_D);
   1480
   1481	if (id == VLV_DISP_PW_DPIO_CMN_BC) {
   1482		phy = DPIO_PHY0;
   1483		assert_pll_disabled(dev_priv, PIPE_A);
   1484		assert_pll_disabled(dev_priv, PIPE_B);
   1485	} else {
   1486		phy = DPIO_PHY1;
   1487		assert_pll_disabled(dev_priv, PIPE_C);
   1488	}
   1489
   1490	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
   1491	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
   1492		       dev_priv->chv_phy_control);
   1493
   1494	vlv_set_power_well(dev_priv, power_well, false);
   1495
   1496	drm_dbg_kms(&dev_priv->drm,
   1497		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
   1498		    phy, dev_priv->chv_phy_control);
   1499
   1500	/* PHY is fully reset now, so we can enable the PHY state asserts */
   1501	dev_priv->chv_phy_assert[phy] = true;
   1502
   1503	assert_chv_phy_status(dev_priv);
   1504}
   1505
   1506static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
   1507				     enum dpio_channel ch, bool override, unsigned int mask)
   1508{
   1509	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
   1510	u32 reg, val, expected, actual;
   1511
   1512	/*
   1513	 * The BIOS can leave the PHY is some weird state
   1514	 * where it doesn't fully power down some parts.
   1515	 * Disable the asserts until the PHY has been fully
   1516	 * reset (ie. the power well has been disabled at
   1517	 * least once).
   1518	 */
   1519	if (!dev_priv->chv_phy_assert[phy])
   1520		return;
   1521
   1522	if (ch == DPIO_CH0)
   1523		reg = _CHV_CMN_DW0_CH0;
   1524	else
   1525		reg = _CHV_CMN_DW6_CH1;
   1526
   1527	vlv_dpio_get(dev_priv);
   1528	val = vlv_dpio_read(dev_priv, pipe, reg);
   1529	vlv_dpio_put(dev_priv);
   1530
   1531	/*
   1532	 * This assumes !override is only used when the port is disabled.
   1533	 * All lanes should power down even without the override when
   1534	 * the port is disabled.
   1535	 */
   1536	if (!override || mask == 0xf) {
   1537		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
   1538		/*
   1539		 * If CH1 common lane is not active anymore
   1540		 * (eg. for pipe B DPLL) the entire channel will
   1541		 * shut down, which causes the common lane registers
   1542		 * to read as 0. That means we can't actually check
   1543		 * the lane power down status bits, but as the entire
   1544		 * register reads as 0 it's a good indication that the
   1545		 * channel is indeed entirely powered down.
   1546		 */
   1547		if (ch == DPIO_CH1 && val == 0)
   1548			expected = 0;
   1549	} else if (mask != 0x0) {
   1550		expected = DPIO_ANYDL_POWERDOWN;
   1551	} else {
   1552		expected = 0;
   1553	}
   1554
   1555	if (ch == DPIO_CH0)
   1556		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
   1557	else
   1558		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
   1559	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
   1560
   1561	drm_WARN(&dev_priv->drm, actual != expected,
   1562		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
   1563		 !!(actual & DPIO_ALLDL_POWERDOWN),
   1564		 !!(actual & DPIO_ANYDL_POWERDOWN),
   1565		 !!(expected & DPIO_ALLDL_POWERDOWN),
   1566		 !!(expected & DPIO_ANYDL_POWERDOWN),
   1567		 reg, val);
   1568}
   1569
   1570bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
   1571			  enum dpio_channel ch, bool override)
   1572{
   1573	struct i915_power_domains *power_domains = &dev_priv->power_domains;
   1574	bool was_override;
   1575
   1576	mutex_lock(&power_domains->lock);
   1577
   1578	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
   1579
   1580	if (override == was_override)
   1581		goto out;
   1582
   1583	if (override)
   1584		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
   1585	else
   1586		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
   1587
   1588	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
   1589		       dev_priv->chv_phy_control);
   1590
   1591	drm_dbg_kms(&dev_priv->drm,
   1592		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
   1593		    phy, ch, dev_priv->chv_phy_control);
   1594
   1595	assert_chv_phy_status(dev_priv);
   1596
   1597out:
   1598	mutex_unlock(&power_domains->lock);
   1599
   1600	return was_override;
   1601}
   1602
   1603void chv_phy_powergate_lanes(struct intel_encoder *encoder,
   1604			     bool override, unsigned int mask)
   1605{
   1606	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
   1607	struct i915_power_domains *power_domains = &dev_priv->power_domains;
   1608	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
   1609	enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
   1610
   1611	mutex_lock(&power_domains->lock);
   1612
   1613	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
   1614	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
   1615
   1616	if (override)
   1617		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
   1618	else
   1619		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
   1620
   1621	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
   1622		       dev_priv->chv_phy_control);
   1623
   1624	drm_dbg_kms(&dev_priv->drm,
   1625		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
   1626		    phy, ch, mask, dev_priv->chv_phy_control);
   1627
   1628	assert_chv_phy_status(dev_priv);
   1629
   1630	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
   1631
   1632	mutex_unlock(&power_domains->lock);
   1633}
   1634
   1635static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
   1636					struct i915_power_well *power_well)
   1637{
   1638	enum pipe pipe = PIPE_A;
   1639	bool enabled;
   1640	u32 state, ctrl;
   1641
   1642	vlv_punit_get(dev_priv);
   1643
   1644	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
   1645	/*
   1646	 * We only ever set the power-on and power-gate states, anything
   1647	 * else is unexpected.
   1648	 */
   1649	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
   1650		    state != DP_SSS_PWR_GATE(pipe));
   1651	enabled = state == DP_SSS_PWR_ON(pipe);
   1652
   1653	/*
   1654	 * A transient state at this point would mean some unexpected party
   1655	 * is poking at the power controls too.
   1656	 */
   1657	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
   1658	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
   1659
   1660	vlv_punit_put(dev_priv);
   1661
   1662	return enabled;
   1663}
   1664
   1665static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
   1666				    struct i915_power_well *power_well,
   1667				    bool enable)
   1668{
   1669	enum pipe pipe = PIPE_A;
   1670	u32 state;
   1671	u32 ctrl;
   1672
   1673	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
   1674
   1675	vlv_punit_get(dev_priv);
   1676
   1677#define COND \
   1678	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
   1679
   1680	if (COND)
   1681		goto out;
   1682
   1683	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
   1684	ctrl &= ~DP_SSC_MASK(pipe);
   1685	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
   1686	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
   1687
   1688	if (wait_for(COND, 100))
   1689		drm_err(&dev_priv->drm,
   1690			"timeout setting power well state %08x (%08x)\n",
   1691			state,
   1692			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
   1693
   1694#undef COND
   1695
   1696out:
   1697	vlv_punit_put(dev_priv);
   1698}
   1699
   1700static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
   1701					struct i915_power_well *power_well)
   1702{
   1703	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
   1704		       dev_priv->chv_phy_control);
   1705}
   1706
   1707static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
   1708				       struct i915_power_well *power_well)
   1709{
   1710	chv_set_pipe_power_well(dev_priv, power_well, true);
   1711
   1712	vlv_display_power_well_init(dev_priv);
   1713}
   1714
   1715static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
   1716					struct i915_power_well *power_well)
   1717{
   1718	vlv_display_power_well_deinit(dev_priv);
   1719
   1720	chv_set_pipe_power_well(dev_priv, power_well, false);
   1721}
   1722
   1723static void
   1724tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
   1725{
   1726	u8 tries = 0;
   1727	int ret;
   1728
   1729	while (1) {
   1730		u32 low_val;
   1731		u32 high_val = 0;
   1732
   1733		if (block)
   1734			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
   1735		else
   1736			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
   1737
   1738		/*
   1739		 * Spec states that we should timeout the request after 200us
   1740		 * but the function below will timeout after 500us
   1741		 */
   1742		ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val);
   1743		if (ret == 0) {
   1744			if (block &&
   1745			    (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
   1746				ret = -EIO;
   1747			else
   1748				break;
   1749		}
   1750
   1751		if (++tries == 3)
   1752			break;
   1753
   1754		msleep(1);
   1755	}
   1756
   1757	if (ret)
   1758		drm_err(&i915->drm, "TC cold %sblock failed\n",
   1759			block ? "" : "un");
   1760	else
   1761		drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
   1762			    block ? "" : "un");
   1763}
   1764
   1765static void
   1766tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
   1767				  struct i915_power_well *power_well)
   1768{
   1769	tgl_tc_cold_request(i915, true);
   1770}
   1771
   1772static void
   1773tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
   1774				   struct i915_power_well *power_well)
   1775{
   1776	tgl_tc_cold_request(i915, false);
   1777}
   1778
   1779static void
   1780tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
   1781				   struct i915_power_well *power_well)
   1782{
   1783	if (intel_power_well_refcount(power_well) > 0)
   1784		tgl_tc_cold_off_power_well_enable(i915, power_well);
   1785	else
   1786		tgl_tc_cold_off_power_well_disable(i915, power_well);
   1787}
   1788
   1789static bool
   1790tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
   1791				      struct i915_power_well *power_well)
   1792{
   1793	/*
   1794	 * Not the correctly implementation but there is no way to just read it
   1795	 * from PCODE, so returning count to avoid state mismatch errors
   1796	 */
   1797	return intel_power_well_refcount(power_well);
   1798}
   1799
   1800
   1801const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
   1802	.sync_hw = i9xx_power_well_sync_hw_noop,
   1803	.enable = i9xx_always_on_power_well_noop,
   1804	.disable = i9xx_always_on_power_well_noop,
   1805	.is_enabled = i9xx_always_on_power_well_enabled,
   1806};
   1807
   1808const struct i915_power_well_ops chv_pipe_power_well_ops = {
   1809	.sync_hw = chv_pipe_power_well_sync_hw,
   1810	.enable = chv_pipe_power_well_enable,
   1811	.disable = chv_pipe_power_well_disable,
   1812	.is_enabled = chv_pipe_power_well_enabled,
   1813};
   1814
   1815const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
   1816	.sync_hw = i9xx_power_well_sync_hw_noop,
   1817	.enable = chv_dpio_cmn_power_well_enable,
   1818	.disable = chv_dpio_cmn_power_well_disable,
   1819	.is_enabled = vlv_power_well_enabled,
   1820};
   1821
   1822const struct i915_power_well_ops i830_pipes_power_well_ops = {
   1823	.sync_hw = i830_pipes_power_well_sync_hw,
   1824	.enable = i830_pipes_power_well_enable,
   1825	.disable = i830_pipes_power_well_disable,
   1826	.is_enabled = i830_pipes_power_well_enabled,
   1827};
   1828
   1829static const struct i915_power_well_regs hsw_power_well_regs = {
   1830	.bios	= HSW_PWR_WELL_CTL1,
   1831	.driver	= HSW_PWR_WELL_CTL2,
   1832	.kvmr	= HSW_PWR_WELL_CTL3,
   1833	.debug	= HSW_PWR_WELL_CTL4,
   1834};
   1835
   1836const struct i915_power_well_ops hsw_power_well_ops = {
   1837	.regs = &hsw_power_well_regs,
   1838	.sync_hw = hsw_power_well_sync_hw,
   1839	.enable = hsw_power_well_enable,
   1840	.disable = hsw_power_well_disable,
   1841	.is_enabled = hsw_power_well_enabled,
   1842};
   1843
   1844const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
   1845	.sync_hw = i9xx_power_well_sync_hw_noop,
   1846	.enable = gen9_dc_off_power_well_enable,
   1847	.disable = gen9_dc_off_power_well_disable,
   1848	.is_enabled = gen9_dc_off_power_well_enabled,
   1849};
   1850
   1851const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
   1852	.sync_hw = i9xx_power_well_sync_hw_noop,
   1853	.enable = bxt_dpio_cmn_power_well_enable,
   1854	.disable = bxt_dpio_cmn_power_well_disable,
   1855	.is_enabled = bxt_dpio_cmn_power_well_enabled,
   1856};
   1857
   1858const struct i915_power_well_ops vlv_display_power_well_ops = {
   1859	.sync_hw = i9xx_power_well_sync_hw_noop,
   1860	.enable = vlv_display_power_well_enable,
   1861	.disable = vlv_display_power_well_disable,
   1862	.is_enabled = vlv_power_well_enabled,
   1863};
   1864
   1865const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
   1866	.sync_hw = i9xx_power_well_sync_hw_noop,
   1867	.enable = vlv_dpio_cmn_power_well_enable,
   1868	.disable = vlv_dpio_cmn_power_well_disable,
   1869	.is_enabled = vlv_power_well_enabled,
   1870};
   1871
   1872const struct i915_power_well_ops vlv_dpio_power_well_ops = {
   1873	.sync_hw = i9xx_power_well_sync_hw_noop,
   1874	.enable = vlv_power_well_enable,
   1875	.disable = vlv_power_well_disable,
   1876	.is_enabled = vlv_power_well_enabled,
   1877};
   1878
   1879static const struct i915_power_well_regs icl_aux_power_well_regs = {
   1880	.bios	= ICL_PWR_WELL_CTL_AUX1,
   1881	.driver	= ICL_PWR_WELL_CTL_AUX2,
   1882	.debug	= ICL_PWR_WELL_CTL_AUX4,
   1883};
   1884
   1885const struct i915_power_well_ops icl_aux_power_well_ops = {
   1886	.regs = &icl_aux_power_well_regs,
   1887	.sync_hw = hsw_power_well_sync_hw,
   1888	.enable = icl_aux_power_well_enable,
   1889	.disable = icl_aux_power_well_disable,
   1890	.is_enabled = hsw_power_well_enabled,
   1891};
   1892
   1893static const struct i915_power_well_regs icl_ddi_power_well_regs = {
   1894	.bios	= ICL_PWR_WELL_CTL_DDI1,
   1895	.driver	= ICL_PWR_WELL_CTL_DDI2,
   1896	.debug	= ICL_PWR_WELL_CTL_DDI4,
   1897};
   1898
   1899const struct i915_power_well_ops icl_ddi_power_well_ops = {
   1900	.regs = &icl_ddi_power_well_regs,
   1901	.sync_hw = hsw_power_well_sync_hw,
   1902	.enable = hsw_power_well_enable,
   1903	.disable = hsw_power_well_disable,
   1904	.is_enabled = hsw_power_well_enabled,
   1905};
   1906
   1907const struct i915_power_well_ops tgl_tc_cold_off_ops = {
   1908	.sync_hw = tgl_tc_cold_off_power_well_sync_hw,
   1909	.enable = tgl_tc_cold_off_power_well_enable,
   1910	.disable = tgl_tc_cold_off_power_well_disable,
   1911	.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
   1912};