cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_display_power.c (75002B)


      1/* SPDX-License-Identifier: MIT */
      2/*
      3 * Copyright © 2019 Intel Corporation
      4 */
      5
      6#include <linux/string_helpers.h>
      7
      8#include "i915_drv.h"
      9#include "i915_irq.h"
     10#include "intel_cdclk.h"
     11#include "intel_combo_phy.h"
     12#include "intel_de.h"
     13#include "intel_display_power.h"
     14#include "intel_display_power_map.h"
     15#include "intel_display_power_well.h"
     16#include "intel_display_types.h"
     17#include "intel_dmc.h"
     18#include "intel_mchbar_regs.h"
     19#include "intel_pch_refclk.h"
     20#include "intel_pcode.h"
     21#include "intel_pm.h"
     22#include "intel_snps_phy.h"
     23#include "vlv_sideband.h"
     24
     25#define for_each_power_domain_well(__dev_priv, __power_well, __domain)	\
     26	for_each_power_well(__dev_priv, __power_well)				\
     27		for_each_if(test_bit((__domain), (__power_well)->domains.bits))
     28
     29#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
     30	for_each_power_well_reverse(__dev_priv, __power_well)		        \
     31		for_each_if(test_bit((__domain), (__power_well)->domains.bits))
     32
     33const char *
     34intel_display_power_domain_str(enum intel_display_power_domain domain)
     35{
     36	switch (domain) {
     37	case POWER_DOMAIN_DISPLAY_CORE:
     38		return "DISPLAY_CORE";
     39	case POWER_DOMAIN_PIPE_A:
     40		return "PIPE_A";
     41	case POWER_DOMAIN_PIPE_B:
     42		return "PIPE_B";
     43	case POWER_DOMAIN_PIPE_C:
     44		return "PIPE_C";
     45	case POWER_DOMAIN_PIPE_D:
     46		return "PIPE_D";
     47	case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
     48		return "PIPE_PANEL_FITTER_A";
     49	case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
     50		return "PIPE_PANEL_FITTER_B";
     51	case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
     52		return "PIPE_PANEL_FITTER_C";
     53	case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
     54		return "PIPE_PANEL_FITTER_D";
     55	case POWER_DOMAIN_TRANSCODER_A:
     56		return "TRANSCODER_A";
     57	case POWER_DOMAIN_TRANSCODER_B:
     58		return "TRANSCODER_B";
     59	case POWER_DOMAIN_TRANSCODER_C:
     60		return "TRANSCODER_C";
     61	case POWER_DOMAIN_TRANSCODER_D:
     62		return "TRANSCODER_D";
     63	case POWER_DOMAIN_TRANSCODER_EDP:
     64		return "TRANSCODER_EDP";
     65	case POWER_DOMAIN_TRANSCODER_DSI_A:
     66		return "TRANSCODER_DSI_A";
     67	case POWER_DOMAIN_TRANSCODER_DSI_C:
     68		return "TRANSCODER_DSI_C";
     69	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
     70		return "TRANSCODER_VDSC_PW2";
     71	case POWER_DOMAIN_PORT_DDI_LANES_A:
     72		return "PORT_DDI_LANES_A";
     73	case POWER_DOMAIN_PORT_DDI_LANES_B:
     74		return "PORT_DDI_LANES_B";
     75	case POWER_DOMAIN_PORT_DDI_LANES_C:
     76		return "PORT_DDI_LANES_C";
     77	case POWER_DOMAIN_PORT_DDI_LANES_D:
     78		return "PORT_DDI_LANES_D";
     79	case POWER_DOMAIN_PORT_DDI_LANES_E:
     80		return "PORT_DDI_LANES_E";
     81	case POWER_DOMAIN_PORT_DDI_LANES_F:
     82		return "PORT_DDI_LANES_F";
     83	case POWER_DOMAIN_PORT_DDI_LANES_TC1:
     84		return "PORT_DDI_LANES_TC1";
     85	case POWER_DOMAIN_PORT_DDI_LANES_TC2:
     86		return "PORT_DDI_LANES_TC2";
     87	case POWER_DOMAIN_PORT_DDI_LANES_TC3:
     88		return "PORT_DDI_LANES_TC3";
     89	case POWER_DOMAIN_PORT_DDI_LANES_TC4:
     90		return "PORT_DDI_LANES_TC4";
     91	case POWER_DOMAIN_PORT_DDI_LANES_TC5:
     92		return "PORT_DDI_LANES_TC5";
     93	case POWER_DOMAIN_PORT_DDI_LANES_TC6:
     94		return "PORT_DDI_LANES_TC6";
     95	case POWER_DOMAIN_PORT_DDI_IO_A:
     96		return "PORT_DDI_IO_A";
     97	case POWER_DOMAIN_PORT_DDI_IO_B:
     98		return "PORT_DDI_IO_B";
     99	case POWER_DOMAIN_PORT_DDI_IO_C:
    100		return "PORT_DDI_IO_C";
    101	case POWER_DOMAIN_PORT_DDI_IO_D:
    102		return "PORT_DDI_IO_D";
    103	case POWER_DOMAIN_PORT_DDI_IO_E:
    104		return "PORT_DDI_IO_E";
    105	case POWER_DOMAIN_PORT_DDI_IO_F:
    106		return "PORT_DDI_IO_F";
    107	case POWER_DOMAIN_PORT_DDI_IO_TC1:
    108		return "PORT_DDI_IO_TC1";
    109	case POWER_DOMAIN_PORT_DDI_IO_TC2:
    110		return "PORT_DDI_IO_TC2";
    111	case POWER_DOMAIN_PORT_DDI_IO_TC3:
    112		return "PORT_DDI_IO_TC3";
    113	case POWER_DOMAIN_PORT_DDI_IO_TC4:
    114		return "PORT_DDI_IO_TC4";
    115	case POWER_DOMAIN_PORT_DDI_IO_TC5:
    116		return "PORT_DDI_IO_TC5";
    117	case POWER_DOMAIN_PORT_DDI_IO_TC6:
    118		return "PORT_DDI_IO_TC6";
    119	case POWER_DOMAIN_PORT_DSI:
    120		return "PORT_DSI";
    121	case POWER_DOMAIN_PORT_CRT:
    122		return "PORT_CRT";
    123	case POWER_DOMAIN_PORT_OTHER:
    124		return "PORT_OTHER";
    125	case POWER_DOMAIN_VGA:
    126		return "VGA";
    127	case POWER_DOMAIN_AUDIO_MMIO:
    128		return "AUDIO_MMIO";
    129	case POWER_DOMAIN_AUDIO_PLAYBACK:
    130		return "AUDIO_PLAYBACK";
    131	case POWER_DOMAIN_AUX_A:
    132		return "AUX_A";
    133	case POWER_DOMAIN_AUX_B:
    134		return "AUX_B";
    135	case POWER_DOMAIN_AUX_C:
    136		return "AUX_C";
    137	case POWER_DOMAIN_AUX_D:
    138		return "AUX_D";
    139	case POWER_DOMAIN_AUX_E:
    140		return "AUX_E";
    141	case POWER_DOMAIN_AUX_F:
    142		return "AUX_F";
    143	case POWER_DOMAIN_AUX_USBC1:
    144		return "AUX_USBC1";
    145	case POWER_DOMAIN_AUX_USBC2:
    146		return "AUX_USBC2";
    147	case POWER_DOMAIN_AUX_USBC3:
    148		return "AUX_USBC3";
    149	case POWER_DOMAIN_AUX_USBC4:
    150		return "AUX_USBC4";
    151	case POWER_DOMAIN_AUX_USBC5:
    152		return "AUX_USBC5";
    153	case POWER_DOMAIN_AUX_USBC6:
    154		return "AUX_USBC6";
    155	case POWER_DOMAIN_AUX_IO_A:
    156		return "AUX_IO_A";
    157	case POWER_DOMAIN_AUX_TBT1:
    158		return "AUX_TBT1";
    159	case POWER_DOMAIN_AUX_TBT2:
    160		return "AUX_TBT2";
    161	case POWER_DOMAIN_AUX_TBT3:
    162		return "AUX_TBT3";
    163	case POWER_DOMAIN_AUX_TBT4:
    164		return "AUX_TBT4";
    165	case POWER_DOMAIN_AUX_TBT5:
    166		return "AUX_TBT5";
    167	case POWER_DOMAIN_AUX_TBT6:
    168		return "AUX_TBT6";
    169	case POWER_DOMAIN_GMBUS:
    170		return "GMBUS";
    171	case POWER_DOMAIN_INIT:
    172		return "INIT";
    173	case POWER_DOMAIN_MODESET:
    174		return "MODESET";
    175	case POWER_DOMAIN_GT_IRQ:
    176		return "GT_IRQ";
    177	case POWER_DOMAIN_DC_OFF:
    178		return "DC_OFF";
    179	case POWER_DOMAIN_TC_COLD_OFF:
    180		return "TC_COLD_OFF";
    181	default:
    182		MISSING_CASE(domain);
    183		return "?";
    184	}
    185}
    186
    187/**
    188 * __intel_display_power_is_enabled - unlocked check for a power domain
    189 * @dev_priv: i915 device instance
    190 * @domain: power domain to check
    191 *
    192 * This is the unlocked version of intel_display_power_is_enabled() and should
    193 * only be used from error capture and recovery code where deadlocks are
    194 * possible.
    195 *
    196 * Returns:
    197 * True when the power domain is enabled, false otherwise.
    198 */
    199bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
    200				      enum intel_display_power_domain domain)
    201{
    202	struct i915_power_well *power_well;
    203	bool is_enabled;
    204
    205	if (dev_priv->runtime_pm.suspended)
    206		return false;
    207
    208	is_enabled = true;
    209
    210	for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
    211		if (intel_power_well_is_always_on(power_well))
    212			continue;
    213
    214		if (!intel_power_well_is_enabled_cached(power_well)) {
    215			is_enabled = false;
    216			break;
    217		}
    218	}
    219
    220	return is_enabled;
    221}
    222
    223/**
    224 * intel_display_power_is_enabled - check for a power domain
    225 * @dev_priv: i915 device instance
    226 * @domain: power domain to check
    227 *
    228 * This function can be used to check the hw power domain state. It is mostly
    229 * used in hardware state readout functions. Everywhere else code should rely
    230 * upon explicit power domain reference counting to ensure that the hardware
    231 * block is powered up before accessing it.
    232 *
    233 * Callers must hold the relevant modesetting locks to ensure that concurrent
    234 * threads can't disable the power well while the caller tries to read a few
    235 * registers.
    236 *
    237 * Returns:
    238 * True when the power domain is enabled, false otherwise.
    239 */
    240bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
    241				    enum intel_display_power_domain domain)
    242{
    243	struct i915_power_domains *power_domains;
    244	bool ret;
    245
    246	power_domains = &dev_priv->power_domains;
    247
    248	mutex_lock(&power_domains->lock);
    249	ret = __intel_display_power_is_enabled(dev_priv, domain);
    250	mutex_unlock(&power_domains->lock);
    251
    252	return ret;
    253}
    254
    255static u32
    256sanitize_target_dc_state(struct drm_i915_private *dev_priv,
    257			 u32 target_dc_state)
    258{
    259	static const u32 states[] = {
    260		DC_STATE_EN_UPTO_DC6,
    261		DC_STATE_EN_UPTO_DC5,
    262		DC_STATE_EN_DC3CO,
    263		DC_STATE_DISABLE,
    264	};
    265	int i;
    266
    267	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
    268		if (target_dc_state != states[i])
    269			continue;
    270
    271		if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
    272			break;
    273
    274		target_dc_state = states[i + 1];
    275	}
    276
    277	return target_dc_state;
    278}
    279
    280/**
    281 * intel_display_power_set_target_dc_state - Set target dc state.
    282 * @dev_priv: i915 device
    283 * @state: state which needs to be set as target_dc_state.
    284 *
    285 * This function set the "DC off" power well target_dc_state,
    286 * based upon this target_dc_stste, "DC off" power well will
    287 * enable desired DC state.
    288 */
    289void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
    290					     u32 state)
    291{
    292	struct i915_power_well *power_well;
    293	bool dc_off_enabled;
    294	struct i915_power_domains *power_domains = &dev_priv->power_domains;
    295
    296	mutex_lock(&power_domains->lock);
    297	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
    298
    299	if (drm_WARN_ON(&dev_priv->drm, !power_well))
    300		goto unlock;
    301
    302	state = sanitize_target_dc_state(dev_priv, state);
    303
    304	if (state == dev_priv->dmc.target_dc_state)
    305		goto unlock;
    306
    307	dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
    308	/*
    309	 * If DC off power well is disabled, need to enable and disable the
    310	 * DC off power well to effect target DC state.
    311	 */
    312	if (!dc_off_enabled)
    313		intel_power_well_enable(dev_priv, power_well);
    314
    315	dev_priv->dmc.target_dc_state = state;
    316
    317	if (!dc_off_enabled)
    318		intel_power_well_disable(dev_priv, power_well);
    319
    320unlock:
    321	mutex_unlock(&power_domains->lock);
    322}
    323
    324#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
    325
    326static void __async_put_domains_mask(struct i915_power_domains *power_domains,
    327				     struct intel_power_domain_mask *mask)
    328{
    329	bitmap_or(mask->bits,
    330		  power_domains->async_put_domains[0].bits,
    331		  power_domains->async_put_domains[1].bits,
    332		  POWER_DOMAIN_NUM);
    333}
    334
    335#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
    336
    337static bool
    338assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
    339{
    340	struct drm_i915_private *i915 = container_of(power_domains,
    341						     struct drm_i915_private,
    342						     power_domains);
    343
    344	return !drm_WARN_ON(&i915->drm,
    345			    bitmap_intersects(power_domains->async_put_domains[0].bits,
    346					      power_domains->async_put_domains[1].bits,
    347					      POWER_DOMAIN_NUM));
    348}
    349
    350static bool
    351__async_put_domains_state_ok(struct i915_power_domains *power_domains)
    352{
    353	struct drm_i915_private *i915 = container_of(power_domains,
    354						     struct drm_i915_private,
    355						     power_domains);
    356	struct intel_power_domain_mask async_put_mask;
    357	enum intel_display_power_domain domain;
    358	bool err = false;
    359
    360	err |= !assert_async_put_domain_masks_disjoint(power_domains);
    361	__async_put_domains_mask(power_domains, &async_put_mask);
    362	err |= drm_WARN_ON(&i915->drm,
    363			   !!power_domains->async_put_wakeref !=
    364			   !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
    365
    366	for_each_power_domain(domain, &async_put_mask)
    367		err |= drm_WARN_ON(&i915->drm,
    368				   power_domains->domain_use_count[domain] != 1);
    369
    370	return !err;
    371}
    372
    373static void print_power_domains(struct i915_power_domains *power_domains,
    374				const char *prefix, struct intel_power_domain_mask *mask)
    375{
    376	struct drm_i915_private *i915 = container_of(power_domains,
    377						     struct drm_i915_private,
    378						     power_domains);
    379	enum intel_display_power_domain domain;
    380
    381	drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
    382	for_each_power_domain(domain, mask)
    383		drm_dbg(&i915->drm, "%s use_count %d\n",
    384			intel_display_power_domain_str(domain),
    385			power_domains->domain_use_count[domain]);
    386}
    387
    388static void
    389print_async_put_domains_state(struct i915_power_domains *power_domains)
    390{
    391	struct drm_i915_private *i915 = container_of(power_domains,
    392						     struct drm_i915_private,
    393						     power_domains);
    394
    395	drm_dbg(&i915->drm, "async_put_wakeref %u\n",
    396		power_domains->async_put_wakeref);
    397
    398	print_power_domains(power_domains, "async_put_domains[0]",
    399			    &power_domains->async_put_domains[0]);
    400	print_power_domains(power_domains, "async_put_domains[1]",
    401			    &power_domains->async_put_domains[1]);
    402}
    403
    404static void
    405verify_async_put_domains_state(struct i915_power_domains *power_domains)
    406{
    407	if (!__async_put_domains_state_ok(power_domains))
    408		print_async_put_domains_state(power_domains);
    409}
    410
    411#else
    412
    413static void
    414assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
    415{
    416}
    417
    418static void
    419verify_async_put_domains_state(struct i915_power_domains *power_domains)
    420{
    421}
    422
    423#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
    424
    425static void async_put_domains_mask(struct i915_power_domains *power_domains,
    426				   struct intel_power_domain_mask *mask)
    427
    428{
    429	assert_async_put_domain_masks_disjoint(power_domains);
    430
    431	__async_put_domains_mask(power_domains, mask);
    432}
    433
    434static void
    435async_put_domains_clear_domain(struct i915_power_domains *power_domains,
    436			       enum intel_display_power_domain domain)
    437{
    438	assert_async_put_domain_masks_disjoint(power_domains);
    439
    440	clear_bit(domain, power_domains->async_put_domains[0].bits);
    441	clear_bit(domain, power_domains->async_put_domains[1].bits);
    442}
    443
    444static bool
    445intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
    446				       enum intel_display_power_domain domain)
    447{
    448	struct i915_power_domains *power_domains = &dev_priv->power_domains;
    449	struct intel_power_domain_mask async_put_mask;
    450	bool ret = false;
    451
    452	async_put_domains_mask(power_domains, &async_put_mask);
    453	if (!test_bit(domain, async_put_mask.bits))
    454		goto out_verify;
    455
    456	async_put_domains_clear_domain(power_domains, domain);
    457
    458	ret = true;
    459
    460	async_put_domains_mask(power_domains, &async_put_mask);
    461	if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
    462		goto out_verify;
    463
    464	cancel_delayed_work(&power_domains->async_put_work);
    465	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
    466				 fetch_and_zero(&power_domains->async_put_wakeref));
    467out_verify:
    468	verify_async_put_domains_state(power_domains);
    469
    470	return ret;
    471}
    472
    473static void
    474__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
    475				 enum intel_display_power_domain domain)
    476{
    477	struct i915_power_domains *power_domains = &dev_priv->power_domains;
    478	struct i915_power_well *power_well;
    479
    480	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
    481		return;
    482
    483	for_each_power_domain_well(dev_priv, power_well, domain)
    484		intel_power_well_get(dev_priv, power_well);
    485
    486	power_domains->domain_use_count[domain]++;
    487}
    488
    489/**
    490 * intel_display_power_get - grab a power domain reference
    491 * @dev_priv: i915 device instance
    492 * @domain: power domain to reference
    493 *
    494 * This function grabs a power domain reference for @domain and ensures that the
    495 * power domain and all its parents are powered up. Therefore users should only
    496 * grab a reference to the innermost power domain they need.
    497 *
    498 * Any power domain reference obtained by this function must have a symmetric
    499 * call to intel_display_power_put() to release the reference again.
    500 */
    501intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
    502					enum intel_display_power_domain domain)
    503{
    504	struct i915_power_domains *power_domains = &dev_priv->power_domains;
    505	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
    506
    507	mutex_lock(&power_domains->lock);
    508	__intel_display_power_get_domain(dev_priv, domain);
    509	mutex_unlock(&power_domains->lock);
    510
    511	return wakeref;
    512}
    513
    514/**
    515 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
    516 * @dev_priv: i915 device instance
    517 * @domain: power domain to reference
    518 *
    519 * This function grabs a power domain reference for @domain and ensures that the
    520 * power domain and all its parents are powered up. Therefore users should only
    521 * grab a reference to the innermost power domain they need.
    522 *
    523 * Any power domain reference obtained by this function must have a symmetric
    524 * call to intel_display_power_put() to release the reference again.
    525 */
    526intel_wakeref_t
    527intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
    528				   enum intel_display_power_domain domain)
    529{
    530	struct i915_power_domains *power_domains = &dev_priv->power_domains;
    531	intel_wakeref_t wakeref;
    532	bool is_enabled;
    533
    534	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
    535	if (!wakeref)
    536		return false;
    537
    538	mutex_lock(&power_domains->lock);
    539
    540	if (__intel_display_power_is_enabled(dev_priv, domain)) {
    541		__intel_display_power_get_domain(dev_priv, domain);
    542		is_enabled = true;
    543	} else {
    544		is_enabled = false;
    545	}
    546
    547	mutex_unlock(&power_domains->lock);
    548
    549	if (!is_enabled) {
    550		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
    551		wakeref = 0;
    552	}
    553
    554	return wakeref;
    555}
    556
    557static void
    558__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
    559				 enum intel_display_power_domain domain)
    560{
    561	struct i915_power_domains *power_domains;
    562	struct i915_power_well *power_well;
    563	const char *name = intel_display_power_domain_str(domain);
    564	struct intel_power_domain_mask async_put_mask;
    565
    566	power_domains = &dev_priv->power_domains;
    567
    568	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
    569		 "Use count on domain %s is already zero\n",
    570		 name);
    571	async_put_domains_mask(power_domains, &async_put_mask);
    572	drm_WARN(&dev_priv->drm,
    573		 test_bit(domain, async_put_mask.bits),
    574		 "Async disabling of domain %s is pending\n",
    575		 name);
    576
    577	power_domains->domain_use_count[domain]--;
    578
    579	for_each_power_domain_well_reverse(dev_priv, power_well, domain)
    580		intel_power_well_put(dev_priv, power_well);
    581}
    582
    583static void __intel_display_power_put(struct drm_i915_private *dev_priv,
    584				      enum intel_display_power_domain domain)
    585{
    586	struct i915_power_domains *power_domains = &dev_priv->power_domains;
    587
    588	mutex_lock(&power_domains->lock);
    589	__intel_display_power_put_domain(dev_priv, domain);
    590	mutex_unlock(&power_domains->lock);
    591}
    592
    593static void
    594queue_async_put_domains_work(struct i915_power_domains *power_domains,
    595			     intel_wakeref_t wakeref)
    596{
    597	struct drm_i915_private *i915 = container_of(power_domains,
    598						     struct drm_i915_private,
    599						     power_domains);
    600	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
    601	power_domains->async_put_wakeref = wakeref;
    602	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
    603						    &power_domains->async_put_work,
    604						    msecs_to_jiffies(100)));
    605}
    606
    607static void
    608release_async_put_domains(struct i915_power_domains *power_domains,
    609			  struct intel_power_domain_mask *mask)
    610{
    611	struct drm_i915_private *dev_priv =
    612		container_of(power_domains, struct drm_i915_private,
    613			     power_domains);
    614	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
    615	enum intel_display_power_domain domain;
    616	intel_wakeref_t wakeref;
    617
    618	/*
    619	 * The caller must hold already raw wakeref, upgrade that to a proper
    620	 * wakeref to make the state checker happy about the HW access during
    621	 * power well disabling.
    622	 */
    623	assert_rpm_raw_wakeref_held(rpm);
    624	wakeref = intel_runtime_pm_get(rpm);
    625
    626	for_each_power_domain(domain, mask) {
    627		/* Clear before put, so put's sanity check is happy. */
    628		async_put_domains_clear_domain(power_domains, domain);
    629		__intel_display_power_put_domain(dev_priv, domain);
    630	}
    631
    632	intel_runtime_pm_put(rpm, wakeref);
    633}
    634
    635static void
    636intel_display_power_put_async_work(struct work_struct *work)
    637{
    638	struct drm_i915_private *dev_priv =
    639		container_of(work, struct drm_i915_private,
    640			     power_domains.async_put_work.work);
    641	struct i915_power_domains *power_domains = &dev_priv->power_domains;
    642	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
    643	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
    644	intel_wakeref_t old_work_wakeref = 0;
    645
    646	mutex_lock(&power_domains->lock);
    647
    648	/*
    649	 * Bail out if all the domain refs pending to be released were grabbed
    650	 * by subsequent gets or a flush_work.
    651	 */
    652	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
    653	if (!old_work_wakeref)
    654		goto out_verify;
    655
    656	release_async_put_domains(power_domains,
    657				  &power_domains->async_put_domains[0]);
    658
    659	/* Requeue the work if more domains were async put meanwhile. */
    660	if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
    661		bitmap_copy(power_domains->async_put_domains[0].bits,
    662			    power_domains->async_put_domains[1].bits,
    663			    POWER_DOMAIN_NUM);
    664		bitmap_zero(power_domains->async_put_domains[1].bits,
    665			    POWER_DOMAIN_NUM);
    666		queue_async_put_domains_work(power_domains,
    667					     fetch_and_zero(&new_work_wakeref));
    668	} else {
    669		/*
    670		 * Cancel the work that got queued after this one got dequeued,
    671		 * since here we released the corresponding async-put reference.
    672		 */
    673		cancel_delayed_work(&power_domains->async_put_work);
    674	}
    675
    676out_verify:
    677	verify_async_put_domains_state(power_domains);
    678
    679	mutex_unlock(&power_domains->lock);
    680
    681	if (old_work_wakeref)
    682		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
    683	if (new_work_wakeref)
    684		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
    685}
    686
    687/**
    688 * intel_display_power_put_async - release a power domain reference asynchronously
    689 * @i915: i915 device instance
    690 * @domain: power domain to reference
    691 * @wakeref: wakeref acquired for the reference that is being released
    692 *
    693 * This function drops the power domain reference obtained by
    694 * intel_display_power_get*() and schedules a work to power down the
    695 * corresponding hardware block if this is the last reference.
    696 */
    697void __intel_display_power_put_async(struct drm_i915_private *i915,
    698				     enum intel_display_power_domain domain,
    699				     intel_wakeref_t wakeref)
    700{
    701	struct i915_power_domains *power_domains = &i915->power_domains;
    702	struct intel_runtime_pm *rpm = &i915->runtime_pm;
    703	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
    704
    705	mutex_lock(&power_domains->lock);
    706
    707	if (power_domains->domain_use_count[domain] > 1) {
    708		__intel_display_power_put_domain(i915, domain);
    709
    710		goto out_verify;
    711	}
    712
    713	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
    714
    715	/* Let a pending work requeue itself or queue a new one. */
    716	if (power_domains->async_put_wakeref) {
    717		set_bit(domain, power_domains->async_put_domains[1].bits);
    718	} else {
    719		set_bit(domain, power_domains->async_put_domains[0].bits);
    720		queue_async_put_domains_work(power_domains,
    721					     fetch_and_zero(&work_wakeref));
    722	}
    723
    724out_verify:
    725	verify_async_put_domains_state(power_domains);
    726
    727	mutex_unlock(&power_domains->lock);
    728
    729	if (work_wakeref)
    730		intel_runtime_pm_put_raw(rpm, work_wakeref);
    731
    732	intel_runtime_pm_put(rpm, wakeref);
    733}
    734
    735/**
    736 * intel_display_power_flush_work - flushes the async display power disabling work
    737 * @i915: i915 device instance
    738 *
    739 * Flushes any pending work that was scheduled by a preceding
    740 * intel_display_power_put_async() call, completing the disabling of the
    741 * corresponding power domains.
    742 *
    743 * Note that the work handler function may still be running after this
    744 * function returns; to ensure that the work handler isn't running use
    745 * intel_display_power_flush_work_sync() instead.
    746 */
    747void intel_display_power_flush_work(struct drm_i915_private *i915)
    748{
    749	struct i915_power_domains *power_domains = &i915->power_domains;
    750	struct intel_power_domain_mask async_put_mask;
    751	intel_wakeref_t work_wakeref;
    752
    753	mutex_lock(&power_domains->lock);
    754
    755	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
    756	if (!work_wakeref)
    757		goto out_verify;
    758
    759	async_put_domains_mask(power_domains, &async_put_mask);
    760	release_async_put_domains(power_domains, &async_put_mask);
    761	cancel_delayed_work(&power_domains->async_put_work);
    762
    763out_verify:
    764	verify_async_put_domains_state(power_domains);
    765
    766	mutex_unlock(&power_domains->lock);
    767
    768	if (work_wakeref)
    769		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
    770}
    771
    772/**
    773 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
    774 * @i915: i915 device instance
    775 *
    776 * Like intel_display_power_flush_work(), but also ensure that the work
    777 * handler function is not running any more when this function returns.
    778 */
    779static void
    780intel_display_power_flush_work_sync(struct drm_i915_private *i915)
    781{
    782	struct i915_power_domains *power_domains = &i915->power_domains;
    783
    784	intel_display_power_flush_work(i915);
    785	cancel_delayed_work_sync(&power_domains->async_put_work);
    786
    787	verify_async_put_domains_state(power_domains);
    788
    789	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
    790}
    791
    792#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
    793/**
    794 * intel_display_power_put - release a power domain reference
    795 * @dev_priv: i915 device instance
    796 * @domain: power domain to reference
    797 * @wakeref: wakeref acquired for the reference that is being released
    798 *
    799 * This function drops the power domain reference obtained by
    800 * intel_display_power_get() and might power down the corresponding hardware
    801 * block right away if this is the last reference.
    802 */
    803void intel_display_power_put(struct drm_i915_private *dev_priv,
    804			     enum intel_display_power_domain domain,
    805			     intel_wakeref_t wakeref)
    806{
    807	__intel_display_power_put(dev_priv, domain);
    808	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
    809}
    810#else
    811/**
    812 * intel_display_power_put_unchecked - release an unchecked power domain reference
    813 * @dev_priv: i915 device instance
    814 * @domain: power domain to reference
    815 *
    816 * This function drops the power domain reference obtained by
    817 * intel_display_power_get() and might power down the corresponding hardware
    818 * block right away if this is the last reference.
    819 *
    820 * This function is only for the power domain code's internal use to suppress wakeref
    821 * tracking when the correspondig debug kconfig option is disabled, should not
    822 * be used otherwise.
    823 */
    824void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
    825				       enum intel_display_power_domain domain)
    826{
    827	__intel_display_power_put(dev_priv, domain);
    828	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
    829}
    830#endif
    831
    832void
    833intel_display_power_get_in_set(struct drm_i915_private *i915,
    834			       struct intel_display_power_domain_set *power_domain_set,
    835			       enum intel_display_power_domain domain)
    836{
    837	intel_wakeref_t __maybe_unused wf;
    838
    839	drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
    840
    841	wf = intel_display_power_get(i915, domain);
    842#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
    843	power_domain_set->wakerefs[domain] = wf;
    844#endif
    845	set_bit(domain, power_domain_set->mask.bits);
    846}
    847
    848bool
    849intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
    850					  struct intel_display_power_domain_set *power_domain_set,
    851					  enum intel_display_power_domain domain)
    852{
    853	intel_wakeref_t wf;
    854
    855	drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
    856
    857	wf = intel_display_power_get_if_enabled(i915, domain);
    858	if (!wf)
    859		return false;
    860
    861#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
    862	power_domain_set->wakerefs[domain] = wf;
    863#endif
    864	set_bit(domain, power_domain_set->mask.bits);
    865
    866	return true;
    867}
    868
    869void
    870intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
    871				    struct intel_display_power_domain_set *power_domain_set,
    872				    struct intel_power_domain_mask *mask)
    873{
    874	enum intel_display_power_domain domain;
    875
    876	drm_WARN_ON(&i915->drm,
    877		    !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
    878
    879	for_each_power_domain(domain, mask) {
    880		intel_wakeref_t __maybe_unused wf = -1;
    881
    882#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
    883		wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
    884#endif
    885		intel_display_power_put(i915, domain, wf);
    886		clear_bit(domain, power_domain_set->mask.bits);
    887	}
    888}
    889
    890static int
    891sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
    892				   int disable_power_well)
    893{
    894	if (disable_power_well >= 0)
    895		return !!disable_power_well;
    896
    897	return 1;
    898}
    899
    900static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
    901			       int enable_dc)
    902{
    903	u32 mask;
    904	int requested_dc;
    905	int max_dc;
    906
    907	if (!HAS_DISPLAY(dev_priv))
    908		return 0;
    909
    910	if (IS_DG1(dev_priv))
    911		max_dc = 3;
    912	else if (DISPLAY_VER(dev_priv) >= 12)
    913		max_dc = 4;
    914	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
    915		max_dc = 1;
    916	else if (DISPLAY_VER(dev_priv) >= 9)
    917		max_dc = 2;
    918	else
    919		max_dc = 0;
    920
    921	/*
    922	 * DC9 has a separate HW flow from the rest of the DC states,
    923	 * not depending on the DMC firmware. It's needed by system
    924	 * suspend/resume, so allow it unconditionally.
    925	 */
    926	mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
    927		DISPLAY_VER(dev_priv) >= 11 ?
    928	       DC_STATE_EN_DC9 : 0;
    929
    930	if (!dev_priv->params.disable_power_well)
    931		max_dc = 0;
    932
    933	if (enable_dc >= 0 && enable_dc <= max_dc) {
    934		requested_dc = enable_dc;
    935	} else if (enable_dc == -1) {
    936		requested_dc = max_dc;
    937	} else if (enable_dc > max_dc && enable_dc <= 4) {
    938		drm_dbg_kms(&dev_priv->drm,
    939			    "Adjusting requested max DC state (%d->%d)\n",
    940			    enable_dc, max_dc);
    941		requested_dc = max_dc;
    942	} else {
    943		drm_err(&dev_priv->drm,
    944			"Unexpected value for enable_dc (%d)\n", enable_dc);
    945		requested_dc = max_dc;
    946	}
    947
    948	switch (requested_dc) {
    949	case 4:
    950		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
    951		break;
    952	case 3:
    953		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
    954		break;
    955	case 2:
    956		mask |= DC_STATE_EN_UPTO_DC6;
    957		break;
    958	case 1:
    959		mask |= DC_STATE_EN_UPTO_DC5;
    960		break;
    961	}
    962
    963	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
    964
    965	return mask;
    966}
    967
    968/**
    969 * intel_power_domains_init - initializes the power domain structures
    970 * @dev_priv: i915 device instance
    971 *
    972 * Initializes the power domain structures for @dev_priv depending upon the
    973 * supported platform.
    974 */
    975int intel_power_domains_init(struct drm_i915_private *dev_priv)
    976{
    977	struct i915_power_domains *power_domains = &dev_priv->power_domains;
    978
    979	dev_priv->params.disable_power_well =
    980		sanitize_disable_power_well_option(dev_priv,
    981						   dev_priv->params.disable_power_well);
    982	dev_priv->dmc.allowed_dc_mask =
    983		get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
    984
    985	dev_priv->dmc.target_dc_state =
    986		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
    987
    988	mutex_init(&power_domains->lock);
    989
    990	INIT_DELAYED_WORK(&power_domains->async_put_work,
    991			  intel_display_power_put_async_work);
    992
    993	return intel_display_power_map_init(power_domains);
    994}
    995
    996/**
    997 * intel_power_domains_cleanup - clean up power domains resources
    998 * @dev_priv: i915 device instance
    999 *
   1000 * Release any resources acquired by intel_power_domains_init()
   1001 */
   1002void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
   1003{
   1004	intel_display_power_map_cleanup(&dev_priv->power_domains);
   1005}
   1006
   1007static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
   1008{
   1009	struct i915_power_domains *power_domains = &dev_priv->power_domains;
   1010	struct i915_power_well *power_well;
   1011
   1012	mutex_lock(&power_domains->lock);
   1013	for_each_power_well(dev_priv, power_well)
   1014		intel_power_well_sync_hw(dev_priv, power_well);
   1015	mutex_unlock(&power_domains->lock);
   1016}
   1017
   1018static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
   1019				enum dbuf_slice slice, bool enable)
   1020{
   1021	i915_reg_t reg = DBUF_CTL_S(slice);
   1022	bool state;
   1023
   1024	intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
   1025		     enable ? DBUF_POWER_REQUEST : 0);
   1026	intel_de_posting_read(dev_priv, reg);
   1027	udelay(10);
   1028
   1029	state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
   1030	drm_WARN(&dev_priv->drm, enable != state,
   1031		 "DBuf slice %d power %s timeout!\n",
   1032		 slice, str_enable_disable(enable));
   1033}
   1034
   1035void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
   1036			     u8 req_slices)
   1037{
   1038	struct i915_power_domains *power_domains = &dev_priv->power_domains;
   1039	u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
   1040	enum dbuf_slice slice;
   1041
   1042	drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
   1043		 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
   1044		 req_slices, slice_mask);
   1045
   1046	drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
   1047		    req_slices);
   1048
   1049	/*
   1050	 * Might be running this in parallel to gen9_dc_off_power_well_enable
   1051	 * being called from intel_dp_detect for instance,
   1052	 * which causes assertion triggered by race condition,
   1053	 * as gen9_assert_dbuf_enabled might preempt this when registers
   1054	 * were already updated, while dev_priv was not.
   1055	 */
   1056	mutex_lock(&power_domains->lock);
   1057
   1058	for_each_dbuf_slice(dev_priv, slice)
   1059		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
   1060
   1061	dev_priv->dbuf.enabled_slices = req_slices;
   1062
   1063	mutex_unlock(&power_domains->lock);
   1064}
   1065
   1066static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
   1067{
   1068	dev_priv->dbuf.enabled_slices =
   1069		intel_enabled_dbuf_slices_mask(dev_priv);
   1070
   1071	/*
   1072	 * Just power up at least 1 slice, we will
   1073	 * figure out later which slices we have and what we need.
   1074	 */
   1075	gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
   1076				dev_priv->dbuf.enabled_slices);
   1077}
   1078
   1079static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
   1080{
   1081	gen9_dbuf_slices_update(dev_priv, 0);
   1082}
   1083
   1084static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
   1085{
   1086	enum dbuf_slice slice;
   1087
   1088	if (IS_ALDERLAKE_P(dev_priv))
   1089		return;
   1090
   1091	for_each_dbuf_slice(dev_priv, slice)
   1092		intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
   1093			     DBUF_TRACKER_STATE_SERVICE_MASK,
   1094			     DBUF_TRACKER_STATE_SERVICE(8));
   1095}
   1096
   1097static void icl_mbus_init(struct drm_i915_private *dev_priv)
   1098{
   1099	unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
   1100	u32 mask, val, i;
   1101
   1102	if (IS_ALDERLAKE_P(dev_priv))
   1103		return;
   1104
   1105	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
   1106		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
   1107		MBUS_ABOX_B_CREDIT_MASK |
   1108		MBUS_ABOX_BW_CREDIT_MASK;
   1109	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
   1110		MBUS_ABOX_BT_CREDIT_POOL2(16) |
   1111		MBUS_ABOX_B_CREDIT(1) |
   1112		MBUS_ABOX_BW_CREDIT(1);
   1113
   1114	/*
   1115	 * gen12 platforms that use abox1 and abox2 for pixel data reads still
   1116	 * expect us to program the abox_ctl0 register as well, even though
   1117	 * we don't have to program other instance-0 registers like BW_BUDDY.
   1118	 */
   1119	if (DISPLAY_VER(dev_priv) == 12)
   1120		abox_regs |= BIT(0);
   1121
   1122	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
   1123		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
   1124}
   1125
   1126static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
   1127{
   1128	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
   1129
   1130	/*
   1131	 * The LCPLL register should be turned on by the BIOS. For now
   1132	 * let's just check its state and print errors in case
   1133	 * something is wrong.  Don't even try to turn it on.
   1134	 */
   1135
   1136	if (val & LCPLL_CD_SOURCE_FCLK)
   1137		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
   1138
   1139	if (val & LCPLL_PLL_DISABLE)
   1140		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
   1141
   1142	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
   1143		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
   1144}
   1145
   1146static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
   1147{
   1148	struct drm_device *dev = &dev_priv->drm;
   1149	struct intel_crtc *crtc;
   1150
   1151	for_each_intel_crtc(dev, crtc)
   1152		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
   1153				pipe_name(crtc->pipe));
   1154
   1155	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
   1156			"Display power well on\n");
   1157	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
   1158			"SPLL enabled\n");
   1159	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
   1160			"WRPLL1 enabled\n");
   1161	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
   1162			"WRPLL2 enabled\n");
   1163	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
   1164			"Panel power on\n");
   1165	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
   1166			"CPU PWM1 enabled\n");
   1167	if (IS_HASWELL(dev_priv))
   1168		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
   1169				"CPU PWM2 enabled\n");
   1170	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
   1171			"PCH PWM1 enabled\n");
   1172	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
   1173			"Utility pin enabled\n");
   1174	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
   1175			"PCH GTC enabled\n");
   1176
   1177	/*
   1178	 * In theory we can still leave IRQs enabled, as long as only the HPD
   1179	 * interrupts remain enabled. We used to check for that, but since it's
   1180	 * gen-specific and since we only disable LCPLL after we fully disable
   1181	 * the interrupts, the check below should be enough.
   1182	 */
   1183	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
   1184}
   1185
   1186static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
   1187{
   1188	if (IS_HASWELL(dev_priv))
   1189		return intel_de_read(dev_priv, D_COMP_HSW);
   1190	else
   1191		return intel_de_read(dev_priv, D_COMP_BDW);
   1192}
   1193
   1194static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
   1195{
   1196	if (IS_HASWELL(dev_priv)) {
   1197		if (snb_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
   1198			drm_dbg_kms(&dev_priv->drm,
   1199				    "Failed to write to D_COMP\n");
   1200	} else {
   1201		intel_de_write(dev_priv, D_COMP_BDW, val);
   1202		intel_de_posting_read(dev_priv, D_COMP_BDW);
   1203	}
   1204}
   1205
   1206/*
   1207 * This function implements pieces of two sequences from BSpec:
   1208 * - Sequence for display software to disable LCPLL
   1209 * - Sequence for display software to allow package C8+
   1210 * The steps implemented here are just the steps that actually touch the LCPLL
   1211 * register. Callers should take care of disabling all the display engine
   1212 * functions, doing the mode unset, fixing interrupts, etc.
   1213 */
   1214static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
   1215			      bool switch_to_fclk, bool allow_power_down)
   1216{
   1217	u32 val;
   1218
   1219	assert_can_disable_lcpll(dev_priv);
   1220
   1221	val = intel_de_read(dev_priv, LCPLL_CTL);
   1222
   1223	if (switch_to_fclk) {
   1224		val |= LCPLL_CD_SOURCE_FCLK;
   1225		intel_de_write(dev_priv, LCPLL_CTL, val);
   1226
   1227		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
   1228				LCPLL_CD_SOURCE_FCLK_DONE, 1))
   1229			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
   1230
   1231		val = intel_de_read(dev_priv, LCPLL_CTL);
   1232	}
   1233
   1234	val |= LCPLL_PLL_DISABLE;
   1235	intel_de_write(dev_priv, LCPLL_CTL, val);
   1236	intel_de_posting_read(dev_priv, LCPLL_CTL);
   1237
   1238	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
   1239		drm_err(&dev_priv->drm, "LCPLL still locked\n");
   1240
   1241	val = hsw_read_dcomp(dev_priv);
   1242	val |= D_COMP_COMP_DISABLE;
   1243	hsw_write_dcomp(dev_priv, val);
   1244	ndelay(100);
   1245
   1246	if (wait_for((hsw_read_dcomp(dev_priv) &
   1247		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
   1248		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
   1249
   1250	if (allow_power_down) {
   1251		val = intel_de_read(dev_priv, LCPLL_CTL);
   1252		val |= LCPLL_POWER_DOWN_ALLOW;
   1253		intel_de_write(dev_priv, LCPLL_CTL, val);
   1254		intel_de_posting_read(dev_priv, LCPLL_CTL);
   1255	}
   1256}
   1257
   1258/*
   1259 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
   1260 * source.
   1261 */
   1262static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
   1263{
   1264	u32 val;
   1265
   1266	val = intel_de_read(dev_priv, LCPLL_CTL);
   1267
   1268	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
   1269		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
   1270		return;
   1271
   1272	/*
   1273	 * Make sure we're not on PC8 state before disabling PC8, otherwise
   1274	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
   1275	 */
   1276	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
   1277
   1278	if (val & LCPLL_POWER_DOWN_ALLOW) {
   1279		val &= ~LCPLL_POWER_DOWN_ALLOW;
   1280		intel_de_write(dev_priv, LCPLL_CTL, val);
   1281		intel_de_posting_read(dev_priv, LCPLL_CTL);
   1282	}
   1283
   1284	val = hsw_read_dcomp(dev_priv);
   1285	val |= D_COMP_COMP_FORCE;
   1286	val &= ~D_COMP_COMP_DISABLE;
   1287	hsw_write_dcomp(dev_priv, val);
   1288
   1289	val = intel_de_read(dev_priv, LCPLL_CTL);
   1290	val &= ~LCPLL_PLL_DISABLE;
   1291	intel_de_write(dev_priv, LCPLL_CTL, val);
   1292
   1293	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
   1294		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
   1295
   1296	if (val & LCPLL_CD_SOURCE_FCLK) {
   1297		val = intel_de_read(dev_priv, LCPLL_CTL);
   1298		val &= ~LCPLL_CD_SOURCE_FCLK;
   1299		intel_de_write(dev_priv, LCPLL_CTL, val);
   1300
   1301		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
   1302				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
   1303			drm_err(&dev_priv->drm,
   1304				"Switching back to LCPLL failed\n");
   1305	}
   1306
   1307	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
   1308
   1309	intel_update_cdclk(dev_priv);
   1310	intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
   1311}
   1312
   1313/*
   1314 * Package states C8 and deeper are really deep PC states that can only be
   1315 * reached when all the devices on the system allow it, so even if the graphics
   1316 * device allows PC8+, it doesn't mean the system will actually get to these
   1317 * states. Our driver only allows PC8+ when going into runtime PM.
   1318 *
   1319 * The requirements for PC8+ are that all the outputs are disabled, the power
   1320 * well is disabled and most interrupts are disabled, and these are also
   1321 * requirements for runtime PM. When these conditions are met, we manually do
   1322 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
   1323 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
   1324 * hang the machine.
   1325 *
   1326 * When we really reach PC8 or deeper states (not just when we allow it) we lose
   1327 * the state of some registers, so when we come back from PC8+ we need to
   1328 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
   1329 * need to take care of the registers kept by RC6. Notice that this happens even
   1330 * if we don't put the device in PCI D3 state (which is what currently happens
   1331 * because of the runtime PM support).
   1332 *
   1333 * For more, read "Display Sequences for Package C8" on the hardware
   1334 * documentation.
   1335 */
   1336static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
   1337{
   1338	u32 val;
   1339
   1340	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
   1341
   1342	if (HAS_PCH_LPT_LP(dev_priv)) {
   1343		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
   1344		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
   1345		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
   1346	}
   1347
   1348	lpt_disable_clkout_dp(dev_priv);
   1349	hsw_disable_lcpll(dev_priv, true, true);
   1350}
   1351
   1352static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
   1353{
   1354	u32 val;
   1355
   1356	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
   1357
   1358	hsw_restore_lcpll(dev_priv);
   1359	intel_init_pch_refclk(dev_priv);
   1360
   1361	if (HAS_PCH_LPT_LP(dev_priv)) {
   1362		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
   1363		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
   1364		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
   1365	}
   1366}
   1367
   1368static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
   1369				      bool enable)
   1370{
   1371	i915_reg_t reg;
   1372	u32 reset_bits, val;
   1373
   1374	if (IS_IVYBRIDGE(dev_priv)) {
   1375		reg = GEN7_MSG_CTL;
   1376		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
   1377	} else {
   1378		reg = HSW_NDE_RSTWRN_OPT;
   1379		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
   1380	}
   1381
   1382	val = intel_de_read(dev_priv, reg);
   1383
   1384	if (enable)
   1385		val |= reset_bits;
   1386	else
   1387		val &= ~reset_bits;
   1388
   1389	intel_de_write(dev_priv, reg, val);
   1390}
   1391
   1392static void skl_display_core_init(struct drm_i915_private *dev_priv,
   1393				  bool resume)
   1394{
   1395	struct i915_power_domains *power_domains = &dev_priv->power_domains;
   1396	struct i915_power_well *well;
   1397
   1398	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
   1399
   1400	/* enable PCH reset handshake */
   1401	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
   1402
   1403	if (!HAS_DISPLAY(dev_priv))
   1404		return;
   1405
   1406	/* enable PG1 and Misc I/O */
   1407	mutex_lock(&power_domains->lock);
   1408
   1409	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
   1410	intel_power_well_enable(dev_priv, well);
   1411
   1412	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
   1413	intel_power_well_enable(dev_priv, well);
   1414
   1415	mutex_unlock(&power_domains->lock);
   1416
   1417	intel_cdclk_init_hw(dev_priv);
   1418
   1419	gen9_dbuf_enable(dev_priv);
   1420
   1421	if (resume)
   1422		intel_dmc_load_program(dev_priv);
   1423}
   1424
   1425static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
   1426{
   1427	struct i915_power_domains *power_domains = &dev_priv->power_domains;
   1428	struct i915_power_well *well;
   1429
   1430	if (!HAS_DISPLAY(dev_priv))
   1431		return;
   1432
   1433	gen9_disable_dc_states(dev_priv);
   1434
   1435	gen9_dbuf_disable(dev_priv);
   1436
   1437	intel_cdclk_uninit_hw(dev_priv);
   1438
   1439	/* The spec doesn't call for removing the reset handshake flag */
   1440	/* disable PG1 and Misc I/O */
   1441
   1442	mutex_lock(&power_domains->lock);
   1443
   1444	/*
   1445	 * BSpec says to keep the MISC IO power well enabled here, only
   1446	 * remove our request for power well 1.
   1447	 * Note that even though the driver's request is removed power well 1
   1448	 * may stay enabled after this due to DMC's own request on it.
   1449	 */
   1450	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
   1451	intel_power_well_disable(dev_priv, well);
   1452
   1453	mutex_unlock(&power_domains->lock);
   1454
   1455	usleep_range(10, 30);		/* 10 us delay per Bspec */
   1456}
   1457
   1458static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
   1459{
   1460	struct i915_power_domains *power_domains = &dev_priv->power_domains;
   1461	struct i915_power_well *well;
   1462
   1463	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
   1464
   1465	/*
   1466	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
   1467	 * or else the reset will hang because there is no PCH to respond.
   1468	 * Move the handshake programming to initialization sequence.
   1469	 * Previously was left up to BIOS.
   1470	 */
   1471	intel_pch_reset_handshake(dev_priv, false);
   1472
   1473	if (!HAS_DISPLAY(dev_priv))
   1474		return;
   1475
   1476	/* Enable PG1 */
   1477	mutex_lock(&power_domains->lock);
   1478
   1479	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
   1480	intel_power_well_enable(dev_priv, well);
   1481
   1482	mutex_unlock(&power_domains->lock);
   1483
   1484	intel_cdclk_init_hw(dev_priv);
   1485
   1486	gen9_dbuf_enable(dev_priv);
   1487
   1488	if (resume)
   1489		intel_dmc_load_program(dev_priv);
   1490}
   1491
   1492static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
   1493{
   1494	struct i915_power_domains *power_domains = &dev_priv->power_domains;
   1495	struct i915_power_well *well;
   1496
   1497	if (!HAS_DISPLAY(dev_priv))
   1498		return;
   1499
   1500	gen9_disable_dc_states(dev_priv);
   1501
   1502	gen9_dbuf_disable(dev_priv);
   1503
   1504	intel_cdclk_uninit_hw(dev_priv);
   1505
   1506	/* The spec doesn't call for removing the reset handshake flag */
   1507
   1508	/*
   1509	 * Disable PW1 (PG1).
   1510	 * Note that even though the driver's request is removed power well 1
   1511	 * may stay enabled after this due to DMC's own request on it.
   1512	 */
   1513	mutex_lock(&power_domains->lock);
   1514
   1515	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
   1516	intel_power_well_disable(dev_priv, well);
   1517
   1518	mutex_unlock(&power_domains->lock);
   1519
   1520	usleep_range(10, 30);		/* 10 us delay per Bspec */
   1521}
   1522
   1523struct buddy_page_mask {
   1524	u32 page_mask;
   1525	u8 type;
   1526	u8 num_channels;
   1527};
   1528
   1529static const struct buddy_page_mask tgl_buddy_page_masks[] = {
   1530	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
   1531	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,	.page_mask = 0xF },
   1532	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
   1533	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
   1534	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
   1535	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1E },
   1536	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
   1537	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
   1538	{}
   1539};
   1540
   1541static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
   1542	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
   1543	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
   1544	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1 },
   1545	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
   1546	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
   1547	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
   1548	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x3 },
   1549	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
   1550	{}
   1551};
   1552
   1553static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
   1554{
   1555	enum intel_dram_type type = dev_priv->dram_info.type;
   1556	u8 num_channels = dev_priv->dram_info.num_channels;
   1557	const struct buddy_page_mask *table;
   1558	unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask;
   1559	int config, i;
   1560
   1561	/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
   1562	if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
   1563		return;
   1564
   1565	if (IS_ALDERLAKE_S(dev_priv) ||
   1566	    IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
   1567	    IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
   1568	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
   1569		/* Wa_1409767108:tgl,dg1,adl-s */
   1570		table = wa_1409767108_buddy_page_masks;
   1571	else
   1572		table = tgl_buddy_page_masks;
   1573
   1574	for (config = 0; table[config].page_mask != 0; config++)
   1575		if (table[config].num_channels == num_channels &&
   1576		    table[config].type == type)
   1577			break;
   1578
   1579	if (table[config].page_mask == 0) {
   1580		drm_dbg(&dev_priv->drm,
   1581			"Unknown memory configuration; disabling address buddy logic.\n");
   1582		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
   1583			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
   1584				       BW_BUDDY_DISABLE);
   1585	} else {
   1586		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
   1587			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
   1588				       table[config].page_mask);
   1589
   1590			/* Wa_22010178259:tgl,dg1,rkl,adl-s */
   1591			if (DISPLAY_VER(dev_priv) == 12)
   1592				intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
   1593					     BW_BUDDY_TLB_REQ_TIMER_MASK,
   1594					     BW_BUDDY_TLB_REQ_TIMER(0x8));
   1595		}
   1596	}
   1597}
   1598
   1599static void icl_display_core_init(struct drm_i915_private *dev_priv,
   1600				  bool resume)
   1601{
   1602	struct i915_power_domains *power_domains = &dev_priv->power_domains;
   1603	struct i915_power_well *well;
   1604	u32 val;
   1605
   1606	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
   1607
   1608	/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
   1609	if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
   1610	    INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
   1611		intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
   1612			     PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
   1613
   1614	/* 1. Enable PCH reset handshake. */
   1615	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
   1616
   1617	if (!HAS_DISPLAY(dev_priv))
   1618		return;
   1619
   1620	/* 2. Initialize all combo phys */
   1621	intel_combo_phy_init(dev_priv);
   1622
   1623	/*
   1624	 * 3. Enable Power Well 1 (PG1).
   1625	 *    The AUX IO power wells will be enabled on demand.
   1626	 */
   1627	mutex_lock(&power_domains->lock);
   1628	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
   1629	intel_power_well_enable(dev_priv, well);
   1630	mutex_unlock(&power_domains->lock);
   1631
   1632	/* 4. Enable CDCLK. */
   1633	intel_cdclk_init_hw(dev_priv);
   1634
   1635	if (DISPLAY_VER(dev_priv) >= 12)
   1636		gen12_dbuf_slices_config(dev_priv);
   1637
   1638	/* 5. Enable DBUF. */
   1639	gen9_dbuf_enable(dev_priv);
   1640
   1641	/* 6. Setup MBUS. */
   1642	icl_mbus_init(dev_priv);
   1643
   1644	/* 7. Program arbiter BW_BUDDY registers */
   1645	if (DISPLAY_VER(dev_priv) >= 12)
   1646		tgl_bw_buddy_init(dev_priv);
   1647
   1648	/* 8. Ensure PHYs have completed calibration and adaptation */
   1649	if (IS_DG2(dev_priv))
   1650		intel_snps_phy_wait_for_calibration(dev_priv);
   1651
   1652	if (resume)
   1653		intel_dmc_load_program(dev_priv);
   1654
   1655	/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
   1656	if (DISPLAY_VER(dev_priv) >= 12) {
   1657		val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
   1658		      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
   1659		intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
   1660	}
   1661
   1662	/* Wa_14011503030:xelpd */
   1663	if (DISPLAY_VER(dev_priv) >= 13)
   1664		intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
   1665}
   1666
   1667static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
   1668{
   1669	struct i915_power_domains *power_domains = &dev_priv->power_domains;
   1670	struct i915_power_well *well;
   1671
   1672	if (!HAS_DISPLAY(dev_priv))
   1673		return;
   1674
   1675	gen9_disable_dc_states(dev_priv);
   1676
   1677	/* 1. Disable all display engine functions -> aready done */
   1678
   1679	/* 2. Disable DBUF */
   1680	gen9_dbuf_disable(dev_priv);
   1681
   1682	/* 3. Disable CD clock */
   1683	intel_cdclk_uninit_hw(dev_priv);
   1684
   1685	/*
   1686	 * 4. Disable Power Well 1 (PG1).
   1687	 *    The AUX IO power wells are toggled on demand, so they are already
   1688	 *    disabled at this point.
   1689	 */
   1690	mutex_lock(&power_domains->lock);
   1691	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
   1692	intel_power_well_disable(dev_priv, well);
   1693	mutex_unlock(&power_domains->lock);
   1694
   1695	/* 5. */
   1696	intel_combo_phy_uninit(dev_priv);
   1697}
   1698
   1699static void chv_phy_control_init(struct drm_i915_private *dev_priv)
   1700{
   1701	struct i915_power_well *cmn_bc =
   1702		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
   1703	struct i915_power_well *cmn_d =
   1704		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
   1705
   1706	/*
   1707	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
   1708	 * workaround never ever read DISPLAY_PHY_CONTROL, and
   1709	 * instead maintain a shadow copy ourselves. Use the actual
   1710	 * power well state and lane status to reconstruct the
   1711	 * expected initial value.
   1712	 */
   1713	dev_priv->chv_phy_control =
   1714		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
   1715		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
   1716		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
   1717		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
   1718		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
   1719
   1720	/*
   1721	 * If all lanes are disabled we leave the override disabled
   1722	 * with all power down bits cleared to match the state we
   1723	 * would use after disabling the port. Otherwise enable the
   1724	 * override and set the lane powerdown bits accding to the
   1725	 * current lane status.
   1726	 */
   1727	if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
   1728		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
   1729		unsigned int mask;
   1730
   1731		mask = status & DPLL_PORTB_READY_MASK;
   1732		if (mask == 0xf)
   1733			mask = 0x0;
   1734		else
   1735			dev_priv->chv_phy_control |=
   1736				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
   1737
   1738		dev_priv->chv_phy_control |=
   1739			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
   1740
   1741		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
   1742		if (mask == 0xf)
   1743			mask = 0x0;
   1744		else
   1745			dev_priv->chv_phy_control |=
   1746				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
   1747
   1748		dev_priv->chv_phy_control |=
   1749			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
   1750
   1751		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
   1752
   1753		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
   1754	} else {
   1755		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
   1756	}
   1757
   1758	if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
   1759		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
   1760		unsigned int mask;
   1761
   1762		mask = status & DPLL_PORTD_READY_MASK;
   1763
   1764		if (mask == 0xf)
   1765			mask = 0x0;
   1766		else
   1767			dev_priv->chv_phy_control |=
   1768				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
   1769
   1770		dev_priv->chv_phy_control |=
   1771			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
   1772
   1773		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
   1774
   1775		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
   1776	} else {
   1777		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
   1778	}
   1779
   1780	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
   1781		    dev_priv->chv_phy_control);
   1782
   1783	/* Defer application of initial phy_control to enabling the powerwell */
   1784}
   1785
   1786static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
   1787{
   1788	struct i915_power_well *cmn =
   1789		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
   1790	struct i915_power_well *disp2d =
   1791		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
   1792
   1793	/* If the display might be already active skip this */
   1794	if (intel_power_well_is_enabled(dev_priv, cmn) &&
   1795	    intel_power_well_is_enabled(dev_priv, disp2d) &&
   1796	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
   1797		return;
   1798
   1799	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
   1800
   1801	/* cmnlane needs DPLL registers */
   1802	intel_power_well_enable(dev_priv, disp2d);
   1803
   1804	/*
   1805	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
   1806	 * Need to assert and de-assert PHY SB reset by gating the
   1807	 * common lane power, then un-gating it.
   1808	 * Simply ungating isn't enough to reset the PHY enough to get
   1809	 * ports and lanes running.
   1810	 */
   1811	intel_power_well_disable(dev_priv, cmn);
   1812}
   1813
   1814static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
   1815{
   1816	bool ret;
   1817
   1818	vlv_punit_get(dev_priv);
   1819	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
   1820	vlv_punit_put(dev_priv);
   1821
   1822	return ret;
   1823}
   1824
   1825static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
   1826{
   1827	drm_WARN(&dev_priv->drm,
   1828		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
   1829		 "VED not power gated\n");
   1830}
   1831
   1832static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
   1833{
   1834	static const struct pci_device_id isp_ids[] = {
   1835		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
   1836		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
   1837		{}
   1838	};
   1839
   1840	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
   1841		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
   1842		 "ISP not power gated\n");
   1843}
   1844
   1845static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
   1846
   1847/**
   1848 * intel_power_domains_init_hw - initialize hardware power domain state
   1849 * @i915: i915 device instance
   1850 * @resume: Called from resume code paths or not
   1851 *
   1852 * This function initializes the hardware power domain state and enables all
   1853 * power wells belonging to the INIT power domain. Power wells in other
   1854 * domains (and not in the INIT domain) are referenced or disabled by
   1855 * intel_modeset_readout_hw_state(). After that the reference count of each
   1856 * power well must match its HW enabled state, see
   1857 * intel_power_domains_verify_state().
   1858 *
   1859 * It will return with power domains disabled (to be enabled later by
   1860 * intel_power_domains_enable()) and must be paired with
   1861 * intel_power_domains_driver_remove().
   1862 */
   1863void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
   1864{
   1865	struct i915_power_domains *power_domains = &i915->power_domains;
   1866
   1867	power_domains->initializing = true;
   1868
   1869	if (DISPLAY_VER(i915) >= 11) {
   1870		icl_display_core_init(i915, resume);
   1871	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
   1872		bxt_display_core_init(i915, resume);
   1873	} else if (DISPLAY_VER(i915) == 9) {
   1874		skl_display_core_init(i915, resume);
   1875	} else if (IS_CHERRYVIEW(i915)) {
   1876		mutex_lock(&power_domains->lock);
   1877		chv_phy_control_init(i915);
   1878		mutex_unlock(&power_domains->lock);
   1879		assert_isp_power_gated(i915);
   1880	} else if (IS_VALLEYVIEW(i915)) {
   1881		mutex_lock(&power_domains->lock);
   1882		vlv_cmnlane_wa(i915);
   1883		mutex_unlock(&power_domains->lock);
   1884		assert_ved_power_gated(i915);
   1885		assert_isp_power_gated(i915);
   1886	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
   1887		hsw_assert_cdclk(i915);
   1888		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
   1889	} else if (IS_IVYBRIDGE(i915)) {
   1890		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
   1891	}
   1892
   1893	/*
   1894	 * Keep all power wells enabled for any dependent HW access during
   1895	 * initialization and to make sure we keep BIOS enabled display HW
   1896	 * resources powered until display HW readout is complete. We drop
   1897	 * this reference in intel_power_domains_enable().
   1898	 */
   1899	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
   1900	power_domains->init_wakeref =
   1901		intel_display_power_get(i915, POWER_DOMAIN_INIT);
   1902
   1903	/* Disable power support if the user asked so. */
   1904	if (!i915->params.disable_power_well) {
   1905		drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
   1906		i915->power_domains.disable_wakeref = intel_display_power_get(i915,
   1907									      POWER_DOMAIN_INIT);
   1908	}
   1909	intel_power_domains_sync_hw(i915);
   1910
   1911	power_domains->initializing = false;
   1912}
   1913
   1914/**
   1915 * intel_power_domains_driver_remove - deinitialize hw power domain state
   1916 * @i915: i915 device instance
   1917 *
   1918 * De-initializes the display power domain HW state. It also ensures that the
   1919 * device stays powered up so that the driver can be reloaded.
   1920 *
   1921 * It must be called with power domains already disabled (after a call to
   1922 * intel_power_domains_disable()) and must be paired with
   1923 * intel_power_domains_init_hw().
   1924 */
   1925void intel_power_domains_driver_remove(struct drm_i915_private *i915)
   1926{
   1927	intel_wakeref_t wakeref __maybe_unused =
   1928		fetch_and_zero(&i915->power_domains.init_wakeref);
   1929
   1930	/* Remove the refcount we took to keep power well support disabled. */
   1931	if (!i915->params.disable_power_well)
   1932		intel_display_power_put(i915, POWER_DOMAIN_INIT,
   1933					fetch_and_zero(&i915->power_domains.disable_wakeref));
   1934
   1935	intel_display_power_flush_work_sync(i915);
   1936
   1937	intel_power_domains_verify_state(i915);
   1938
   1939	/* Keep the power well enabled, but cancel its rpm wakeref. */
   1940	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
   1941}
   1942
   1943/**
   1944 * intel_power_domains_sanitize_state - sanitize power domains state
   1945 * @i915: i915 device instance
   1946 *
   1947 * Sanitize the power domains state during driver loading and system resume.
   1948 * The function will disable all display power wells that BIOS has enabled
   1949 * without a user for it (any user for a power well has taken a reference
   1950 * on it by the time this function is called, after the state of all the
   1951 * pipe, encoder, etc. HW resources have been sanitized).
   1952 */
   1953void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
   1954{
   1955	struct i915_power_domains *power_domains = &i915->power_domains;
   1956	struct i915_power_well *power_well;
   1957
   1958	mutex_lock(&power_domains->lock);
   1959
   1960	for_each_power_well_reverse(i915, power_well) {
   1961		if (power_well->desc->always_on || power_well->count ||
   1962		    !intel_power_well_is_enabled(i915, power_well))
   1963			continue;
   1964
   1965		drm_dbg_kms(&i915->drm,
   1966			    "BIOS left unused %s power well enabled, disabling it\n",
   1967			    intel_power_well_name(power_well));
   1968		intel_power_well_disable(i915, power_well);
   1969	}
   1970
   1971	mutex_unlock(&power_domains->lock);
   1972}
   1973
   1974/**
   1975 * intel_power_domains_enable - enable toggling of display power wells
   1976 * @i915: i915 device instance
   1977 *
   1978 * Enable the ondemand enabling/disabling of the display power wells. Note that
   1979 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
   1980 * only at specific points of the display modeset sequence, thus they are not
   1981 * affected by the intel_power_domains_enable()/disable() calls. The purpose
   1982 * of these function is to keep the rest of power wells enabled until the end
   1983 * of display HW readout (which will acquire the power references reflecting
   1984 * the current HW state).
   1985 */
   1986void intel_power_domains_enable(struct drm_i915_private *i915)
   1987{
   1988	intel_wakeref_t wakeref __maybe_unused =
   1989		fetch_and_zero(&i915->power_domains.init_wakeref);
   1990
   1991	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
   1992	intel_power_domains_verify_state(i915);
   1993}
   1994
   1995/**
   1996 * intel_power_domains_disable - disable toggling of display power wells
   1997 * @i915: i915 device instance
   1998 *
   1999 * Disable the ondemand enabling/disabling of the display power wells. See
   2000 * intel_power_domains_enable() for which power wells this call controls.
   2001 */
   2002void intel_power_domains_disable(struct drm_i915_private *i915)
   2003{
   2004	struct i915_power_domains *power_domains = &i915->power_domains;
   2005
   2006	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
   2007	power_domains->init_wakeref =
   2008		intel_display_power_get(i915, POWER_DOMAIN_INIT);
   2009
   2010	intel_power_domains_verify_state(i915);
   2011}
   2012
   2013/**
   2014 * intel_power_domains_suspend - suspend power domain state
   2015 * @i915: i915 device instance
   2016 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
   2017 *
   2018 * This function prepares the hardware power domain state before entering
   2019 * system suspend.
   2020 *
   2021 * It must be called with power domains already disabled (after a call to
   2022 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
   2023 */
   2024void intel_power_domains_suspend(struct drm_i915_private *i915,
   2025				 enum i915_drm_suspend_mode suspend_mode)
   2026{
   2027	struct i915_power_domains *power_domains = &i915->power_domains;
   2028	intel_wakeref_t wakeref __maybe_unused =
   2029		fetch_and_zero(&power_domains->init_wakeref);
   2030
   2031	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
   2032
   2033	/*
   2034	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
   2035	 * support don't manually deinit the power domains. This also means the
   2036	 * DMC firmware will stay active, it will power down any HW
   2037	 * resources as required and also enable deeper system power states
   2038	 * that would be blocked if the firmware was inactive.
   2039	 */
   2040	if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
   2041	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
   2042	    intel_dmc_has_payload(i915)) {
   2043		intel_display_power_flush_work(i915);
   2044		intel_power_domains_verify_state(i915);
   2045		return;
   2046	}
   2047
   2048	/*
   2049	 * Even if power well support was disabled we still want to disable
   2050	 * power wells if power domains must be deinitialized for suspend.
   2051	 */
   2052	if (!i915->params.disable_power_well)
   2053		intel_display_power_put(i915, POWER_DOMAIN_INIT,
   2054					fetch_and_zero(&i915->power_domains.disable_wakeref));
   2055
   2056	intel_display_power_flush_work(i915);
   2057	intel_power_domains_verify_state(i915);
   2058
   2059	if (DISPLAY_VER(i915) >= 11)
   2060		icl_display_core_uninit(i915);
   2061	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
   2062		bxt_display_core_uninit(i915);
   2063	else if (DISPLAY_VER(i915) == 9)
   2064		skl_display_core_uninit(i915);
   2065
   2066	power_domains->display_core_suspended = true;
   2067}
   2068
   2069/**
   2070 * intel_power_domains_resume - resume power domain state
   2071 * @i915: i915 device instance
   2072 *
   2073 * This function resume the hardware power domain state during system resume.
   2074 *
   2075 * It will return with power domain support disabled (to be enabled later by
   2076 * intel_power_domains_enable()) and must be paired with
   2077 * intel_power_domains_suspend().
   2078 */
   2079void intel_power_domains_resume(struct drm_i915_private *i915)
   2080{
   2081	struct i915_power_domains *power_domains = &i915->power_domains;
   2082
   2083	if (power_domains->display_core_suspended) {
   2084		intel_power_domains_init_hw(i915, true);
   2085		power_domains->display_core_suspended = false;
   2086	} else {
   2087		drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
   2088		power_domains->init_wakeref =
   2089			intel_display_power_get(i915, POWER_DOMAIN_INIT);
   2090	}
   2091
   2092	intel_power_domains_verify_state(i915);
   2093}
   2094
   2095#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
   2096
   2097static void intel_power_domains_dump_info(struct drm_i915_private *i915)
   2098{
   2099	struct i915_power_domains *power_domains = &i915->power_domains;
   2100	struct i915_power_well *power_well;
   2101
   2102	for_each_power_well(i915, power_well) {
   2103		enum intel_display_power_domain domain;
   2104
   2105		drm_dbg(&i915->drm, "%-25s %d\n",
   2106			intel_power_well_name(power_well), intel_power_well_refcount(power_well));
   2107
   2108		for_each_power_domain(domain, intel_power_well_domains(power_well))
   2109			drm_dbg(&i915->drm, "  %-23s %d\n",
   2110				intel_display_power_domain_str(domain),
   2111				power_domains->domain_use_count[domain]);
   2112	}
   2113}
   2114
   2115/**
   2116 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
   2117 * @i915: i915 device instance
   2118 *
   2119 * Verify if the reference count of each power well matches its HW enabled
   2120 * state and the total refcount of the domains it belongs to. This must be
   2121 * called after modeset HW state sanitization, which is responsible for
   2122 * acquiring reference counts for any power wells in use and disabling the
   2123 * ones left on by BIOS but not required by any active output.
   2124 */
   2125static void intel_power_domains_verify_state(struct drm_i915_private *i915)
   2126{
   2127	struct i915_power_domains *power_domains = &i915->power_domains;
   2128	struct i915_power_well *power_well;
   2129	bool dump_domain_info;
   2130
   2131	mutex_lock(&power_domains->lock);
   2132
   2133	verify_async_put_domains_state(power_domains);
   2134
   2135	dump_domain_info = false;
   2136	for_each_power_well(i915, power_well) {
   2137		enum intel_display_power_domain domain;
   2138		int domains_count;
   2139		bool enabled;
   2140
   2141		enabled = intel_power_well_is_enabled(i915, power_well);
   2142		if ((intel_power_well_refcount(power_well) ||
   2143		     intel_power_well_is_always_on(power_well)) !=
   2144		    enabled)
   2145			drm_err(&i915->drm,
   2146				"power well %s state mismatch (refcount %d/enabled %d)",
   2147				intel_power_well_name(power_well),
   2148				intel_power_well_refcount(power_well), enabled);
   2149
   2150		domains_count = 0;
   2151		for_each_power_domain(domain, intel_power_well_domains(power_well))
   2152			domains_count += power_domains->domain_use_count[domain];
   2153
   2154		if (intel_power_well_refcount(power_well) != domains_count) {
   2155			drm_err(&i915->drm,
   2156				"power well %s refcount/domain refcount mismatch "
   2157				"(refcount %d/domains refcount %d)\n",
   2158				intel_power_well_name(power_well),
   2159				intel_power_well_refcount(power_well),
   2160				domains_count);
   2161			dump_domain_info = true;
   2162		}
   2163	}
   2164
   2165	if (dump_domain_info) {
   2166		static bool dumped;
   2167
   2168		if (!dumped) {
   2169			intel_power_domains_dump_info(i915);
   2170			dumped = true;
   2171		}
   2172	}
   2173
   2174	mutex_unlock(&power_domains->lock);
   2175}
   2176
   2177#else
   2178
   2179static void intel_power_domains_verify_state(struct drm_i915_private *i915)
   2180{
   2181}
   2182
   2183#endif
   2184
   2185void intel_display_power_suspend_late(struct drm_i915_private *i915)
   2186{
   2187	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
   2188	    IS_BROXTON(i915)) {
   2189		bxt_enable_dc9(i915);
   2190	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
   2191		hsw_enable_pc8(i915);
   2192	}
   2193
   2194	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
   2195	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
   2196		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
   2197}
   2198
   2199void intel_display_power_resume_early(struct drm_i915_private *i915)
   2200{
   2201	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
   2202	    IS_BROXTON(i915)) {
   2203		gen9_sanitize_dc_state(i915);
   2204		bxt_disable_dc9(i915);
   2205	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
   2206		hsw_disable_pc8(i915);
   2207	}
   2208
   2209	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
   2210	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
   2211		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
   2212}
   2213
   2214void intel_display_power_suspend(struct drm_i915_private *i915)
   2215{
   2216	if (DISPLAY_VER(i915) >= 11) {
   2217		icl_display_core_uninit(i915);
   2218		bxt_enable_dc9(i915);
   2219	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
   2220		bxt_display_core_uninit(i915);
   2221		bxt_enable_dc9(i915);
   2222	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
   2223		hsw_enable_pc8(i915);
   2224	}
   2225}
   2226
   2227void intel_display_power_resume(struct drm_i915_private *i915)
   2228{
   2229	if (DISPLAY_VER(i915) >= 11) {
   2230		bxt_disable_dc9(i915);
   2231		icl_display_core_init(i915, true);
   2232		if (intel_dmc_has_payload(i915)) {
   2233			if (i915->dmc.allowed_dc_mask &
   2234			    DC_STATE_EN_UPTO_DC6)
   2235				skl_enable_dc6(i915);
   2236			else if (i915->dmc.allowed_dc_mask &
   2237				 DC_STATE_EN_UPTO_DC5)
   2238				gen9_enable_dc5(i915);
   2239		}
   2240	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
   2241		bxt_disable_dc9(i915);
   2242		bxt_display_core_init(i915, true);
   2243		if (intel_dmc_has_payload(i915) &&
   2244		    (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
   2245			gen9_enable_dc5(i915);
   2246	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
   2247		hsw_disable_pc8(i915);
   2248	}
   2249}
   2250
   2251void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
   2252{
   2253	struct i915_power_domains *power_domains = &i915->power_domains;
   2254	int i;
   2255
   2256	mutex_lock(&power_domains->lock);
   2257
   2258	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
   2259	for (i = 0; i < power_domains->power_well_count; i++) {
   2260		struct i915_power_well *power_well;
   2261		enum intel_display_power_domain power_domain;
   2262
   2263		power_well = &power_domains->power_wells[i];
   2264		seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
   2265			   intel_power_well_refcount(power_well));
   2266
   2267		for_each_power_domain(power_domain, intel_power_well_domains(power_well))
   2268			seq_printf(m, "  %-23s %d\n",
   2269				   intel_display_power_domain_str(power_domain),
   2270				   power_domains->domain_use_count[power_domain]);
   2271	}
   2272
   2273	mutex_unlock(&power_domains->lock);
   2274}
   2275
   2276struct intel_ddi_port_domains {
   2277	enum port port_start;
   2278	enum port port_end;
   2279	enum aux_ch aux_ch_start;
   2280	enum aux_ch aux_ch_end;
   2281
   2282	enum intel_display_power_domain ddi_lanes;
   2283	enum intel_display_power_domain ddi_io;
   2284	enum intel_display_power_domain aux_legacy_usbc;
   2285	enum intel_display_power_domain aux_tbt;
   2286};
   2287
   2288static const struct intel_ddi_port_domains
   2289i9xx_port_domains[] = {
   2290	{
   2291		.port_start = PORT_A,
   2292		.port_end = PORT_F,
   2293		.aux_ch_start = AUX_CH_A,
   2294		.aux_ch_end = AUX_CH_F,
   2295
   2296		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
   2297		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
   2298		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
   2299		.aux_tbt = POWER_DOMAIN_INVALID,
   2300	},
   2301};
   2302
   2303static const struct intel_ddi_port_domains
   2304d11_port_domains[] = {
   2305	{
   2306		.port_start = PORT_A,
   2307		.port_end = PORT_B,
   2308		.aux_ch_start = AUX_CH_A,
   2309		.aux_ch_end = AUX_CH_B,
   2310
   2311		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
   2312		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
   2313		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
   2314		.aux_tbt = POWER_DOMAIN_INVALID,
   2315	}, {
   2316		.port_start = PORT_C,
   2317		.port_end = PORT_F,
   2318		.aux_ch_start = AUX_CH_C,
   2319		.aux_ch_end = AUX_CH_F,
   2320
   2321		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
   2322		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
   2323		.aux_legacy_usbc = POWER_DOMAIN_AUX_C,
   2324		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
   2325	},
   2326};
   2327
   2328static const struct intel_ddi_port_domains
   2329d12_port_domains[] = {
   2330	{
   2331		.port_start = PORT_A,
   2332		.port_end = PORT_C,
   2333		.aux_ch_start = AUX_CH_A,
   2334		.aux_ch_end = AUX_CH_C,
   2335
   2336		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
   2337		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
   2338		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
   2339		.aux_tbt = POWER_DOMAIN_INVALID,
   2340	}, {
   2341		.port_start = PORT_TC1,
   2342		.port_end = PORT_TC6,
   2343		.aux_ch_start = AUX_CH_USBC1,
   2344		.aux_ch_end = AUX_CH_USBC6,
   2345
   2346		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
   2347		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
   2348		.aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
   2349		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
   2350	},
   2351};
   2352
   2353static const struct intel_ddi_port_domains
   2354d13_port_domains[] = {
   2355	{
   2356		.port_start = PORT_A,
   2357		.port_end = PORT_C,
   2358		.aux_ch_start = AUX_CH_A,
   2359		.aux_ch_end = AUX_CH_C,
   2360
   2361		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
   2362		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
   2363		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
   2364		.aux_tbt = POWER_DOMAIN_INVALID,
   2365	}, {
   2366		.port_start = PORT_TC1,
   2367		.port_end = PORT_TC4,
   2368		.aux_ch_start = AUX_CH_USBC1,
   2369		.aux_ch_end = AUX_CH_USBC4,
   2370
   2371		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
   2372		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
   2373		.aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
   2374		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
   2375	}, {
   2376		.port_start = PORT_D_XELPD,
   2377		.port_end = PORT_E_XELPD,
   2378		.aux_ch_start = AUX_CH_D_XELPD,
   2379		.aux_ch_end = AUX_CH_E_XELPD,
   2380
   2381		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
   2382		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
   2383		.aux_legacy_usbc = POWER_DOMAIN_AUX_D,
   2384		.aux_tbt = POWER_DOMAIN_INVALID,
   2385	},
   2386};
   2387
   2388static void
   2389intel_port_domains_for_platform(struct drm_i915_private *i915,
   2390				const struct intel_ddi_port_domains **domains,
   2391				int *domains_size)
   2392{
   2393	if (DISPLAY_VER(i915) >= 13) {
   2394		*domains = d13_port_domains;
   2395		*domains_size = ARRAY_SIZE(d13_port_domains);
   2396	} else if (DISPLAY_VER(i915) >= 12) {
   2397		*domains = d12_port_domains;
   2398		*domains_size = ARRAY_SIZE(d12_port_domains);
   2399	} else if (DISPLAY_VER(i915) >= 11) {
   2400		*domains = d11_port_domains;
   2401		*domains_size = ARRAY_SIZE(d11_port_domains);
   2402	} else {
   2403		*domains = i9xx_port_domains;
   2404		*domains_size = ARRAY_SIZE(i9xx_port_domains);
   2405	}
   2406}
   2407
   2408static const struct intel_ddi_port_domains *
   2409intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
   2410{
   2411	const struct intel_ddi_port_domains *domains;
   2412	int domains_size;
   2413	int i;
   2414
   2415	intel_port_domains_for_platform(i915, &domains, &domains_size);
   2416	for (i = 0; i < domains_size; i++)
   2417		if (port >= domains[i].port_start && port <= domains[i].port_end)
   2418			return &domains[i];
   2419
   2420	return NULL;
   2421}
   2422
   2423enum intel_display_power_domain
   2424intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
   2425{
   2426	const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
   2427
   2428	if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID)
   2429		return POWER_DOMAIN_PORT_DDI_IO_A;
   2430
   2431	return domains->ddi_io + (int)(port - domains->port_start);
   2432}
   2433
   2434enum intel_display_power_domain
   2435intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
   2436{
   2437	const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
   2438
   2439	if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID)
   2440		return POWER_DOMAIN_PORT_DDI_LANES_A;
   2441
   2442	return domains->ddi_lanes + (int)(port - domains->port_start);
   2443}
   2444
   2445static const struct intel_ddi_port_domains *
   2446intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
   2447{
   2448	const struct intel_ddi_port_domains *domains;
   2449	int domains_size;
   2450	int i;
   2451
   2452	intel_port_domains_for_platform(i915, &domains, &domains_size);
   2453	for (i = 0; i < domains_size; i++)
   2454		if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
   2455			return &domains[i];
   2456
   2457	return NULL;
   2458}
   2459
   2460enum intel_display_power_domain
   2461intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
   2462{
   2463	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
   2464
   2465	if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)
   2466		return POWER_DOMAIN_AUX_A;
   2467
   2468	return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
   2469}
   2470
   2471enum intel_display_power_domain
   2472intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
   2473{
   2474	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
   2475
   2476	if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID)
   2477		return POWER_DOMAIN_AUX_TBT1;
   2478
   2479	return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
   2480}