cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_psr.c (76529B)


      1/*
      2 * Copyright © 2014 Intel Corporation
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     21 * DEALINGS IN THE SOFTWARE.
     22 */
     23
     24#include <drm/drm_atomic_helper.h>
     25#include <drm/drm_damage_helper.h>
     26
     27#include "display/intel_dp.h"
     28
     29#include "i915_drv.h"
     30#include "intel_atomic.h"
     31#include "intel_crtc.h"
     32#include "intel_de.h"
     33#include "intel_display_types.h"
     34#include "intel_dp_aux.h"
     35#include "intel_hdmi.h"
     36#include "intel_psr.h"
     37#include "intel_snps_phy.h"
     38#include "skl_universal_plane.h"
     39
     40/**
     41 * DOC: Panel Self Refresh (PSR/SRD)
     42 *
     43 * Since Haswell Display controller supports Panel Self-Refresh on display
     44 * panels witch have a remote frame buffer (RFB) implemented according to PSR
     45 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
     46 * when system is idle but display is on as it eliminates display refresh
     47 * request to DDR memory completely as long as the frame buffer for that
     48 * display is unchanged.
     49 *
     50 * Panel Self Refresh must be supported by both Hardware (source) and
     51 * Panel (sink).
     52 *
     53 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
     54 * to power down the link and memory controller. For DSI panels the same idea
     55 * is called "manual mode".
     56 *
     57 * The implementation uses the hardware-based PSR support which automatically
     58 * enters/exits self-refresh mode. The hardware takes care of sending the
     59 * required DP aux message and could even retrain the link (that part isn't
     60 * enabled yet though). The hardware also keeps track of any frontbuffer
     61 * changes to know when to exit self-refresh mode again. Unfortunately that
     62 * part doesn't work too well, hence why the i915 PSR support uses the
     63 * software frontbuffer tracking to make sure it doesn't miss a screen
     64 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
     65 * get called by the frontbuffer tracking code. Note that because of locking
     66 * issues the self-refresh re-enable code is done from a work queue, which
     67 * must be correctly synchronized/cancelled when shutting down the pipe."
     68 *
     69 * DC3CO (DC3 clock off)
     70 *
     71 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
     72 * clock off automatically during PSR2 idle state.
     73 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
     74 * entry/exit allows the HW to enter a low-power state even when page flipping
     75 * periodically (for instance a 30fps video playback scenario).
     76 *
     77 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
     78 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
     79 * frames, if no other flip occurs and the function above is executed, DC3CO is
     80 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
     81 * of another flip.
     82 * Front buffer modifications do not trigger DC3CO activation on purpose as it
     83 * would bring a lot of complexity and most of the moderns systems will only
     84 * use page flips.
     85 */
     86
     87static bool psr_global_enabled(struct intel_dp *intel_dp)
     88{
     89	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
     90
     91	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
     92	case I915_PSR_DEBUG_DEFAULT:
     93		return i915->params.enable_psr;
     94	case I915_PSR_DEBUG_DISABLE:
     95		return false;
     96	default:
     97		return true;
     98	}
     99}
    100
    101static bool psr2_global_enabled(struct intel_dp *intel_dp)
    102{
    103	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
    104
    105	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
    106	case I915_PSR_DEBUG_DISABLE:
    107	case I915_PSR_DEBUG_FORCE_PSR1:
    108		return false;
    109	default:
    110		if (i915->params.enable_psr == 1)
    111			return false;
    112		return true;
    113	}
    114}
    115
    116static void psr_irq_control(struct intel_dp *intel_dp)
    117{
    118	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    119	enum transcoder trans_shift;
    120	i915_reg_t imr_reg;
    121	u32 mask, val;
    122
    123	/*
    124	 * gen12+ has registers relative to transcoder and one per transcoder
    125	 * using the same bit definition: handle it as TRANSCODER_EDP to force
    126	 * 0 shift in bit definition
    127	 */
    128	if (DISPLAY_VER(dev_priv) >= 12) {
    129		trans_shift = 0;
    130		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
    131	} else {
    132		trans_shift = intel_dp->psr.transcoder;
    133		imr_reg = EDP_PSR_IMR;
    134	}
    135
    136	mask = EDP_PSR_ERROR(trans_shift);
    137	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
    138		mask |= EDP_PSR_POST_EXIT(trans_shift) |
    139			EDP_PSR_PRE_ENTRY(trans_shift);
    140
    141	/* Warning: it is masking/setting reserved bits too */
    142	val = intel_de_read(dev_priv, imr_reg);
    143	val &= ~EDP_PSR_TRANS_MASK(trans_shift);
    144	val |= ~mask;
    145	intel_de_write(dev_priv, imr_reg, val);
    146}
    147
    148static void psr_event_print(struct drm_i915_private *i915,
    149			    u32 val, bool psr2_enabled)
    150{
    151	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
    152	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
    153		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
    154	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
    155		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
    156	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
    157		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
    158	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
    159		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
    160	if (val & PSR_EVENT_GRAPHICS_RESET)
    161		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
    162	if (val & PSR_EVENT_PCH_INTERRUPT)
    163		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
    164	if (val & PSR_EVENT_MEMORY_UP)
    165		drm_dbg_kms(&i915->drm, "\tMemory up\n");
    166	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
    167		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
    168	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
    169		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
    170	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
    171		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
    172	if (val & PSR_EVENT_REGISTER_UPDATE)
    173		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
    174	if (val & PSR_EVENT_HDCP_ENABLE)
    175		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
    176	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
    177		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
    178	if (val & PSR_EVENT_VBI_ENABLE)
    179		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
    180	if (val & PSR_EVENT_LPSP_MODE_EXIT)
    181		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
    182	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
    183		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
    184}
    185
    186void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
    187{
    188	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
    189	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    190	ktime_t time_ns =  ktime_get();
    191	enum transcoder trans_shift;
    192	i915_reg_t imr_reg;
    193
    194	if (DISPLAY_VER(dev_priv) >= 12) {
    195		trans_shift = 0;
    196		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
    197	} else {
    198		trans_shift = intel_dp->psr.transcoder;
    199		imr_reg = EDP_PSR_IMR;
    200	}
    201
    202	if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
    203		intel_dp->psr.last_entry_attempt = time_ns;
    204		drm_dbg_kms(&dev_priv->drm,
    205			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
    206			    transcoder_name(cpu_transcoder));
    207	}
    208
    209	if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
    210		intel_dp->psr.last_exit = time_ns;
    211		drm_dbg_kms(&dev_priv->drm,
    212			    "[transcoder %s] PSR exit completed\n",
    213			    transcoder_name(cpu_transcoder));
    214
    215		if (DISPLAY_VER(dev_priv) >= 9) {
    216			u32 val = intel_de_read(dev_priv,
    217						PSR_EVENT(cpu_transcoder));
    218			bool psr2_enabled = intel_dp->psr.psr2_enabled;
    219
    220			intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
    221				       val);
    222			psr_event_print(dev_priv, val, psr2_enabled);
    223		}
    224	}
    225
    226	if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
    227		u32 val;
    228
    229		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
    230			 transcoder_name(cpu_transcoder));
    231
    232		intel_dp->psr.irq_aux_error = true;
    233
    234		/*
    235		 * If this interruption is not masked it will keep
    236		 * interrupting so fast that it prevents the scheduled
    237		 * work to run.
    238		 * Also after a PSR error, we don't want to arm PSR
    239		 * again so we don't care about unmask the interruption
    240		 * or unset irq_aux_error.
    241		 */
    242		val = intel_de_read(dev_priv, imr_reg);
    243		val |= EDP_PSR_ERROR(trans_shift);
    244		intel_de_write(dev_priv, imr_reg, val);
    245
    246		schedule_work(&intel_dp->psr.work);
    247	}
    248}
    249
    250static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
    251{
    252	u8 alpm_caps = 0;
    253
    254	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
    255			      &alpm_caps) != 1)
    256		return false;
    257	return alpm_caps & DP_ALPM_CAP;
    258}
    259
    260static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
    261{
    262	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
    263	u8 val = 8; /* assume the worst if we can't read the value */
    264
    265	if (drm_dp_dpcd_readb(&intel_dp->aux,
    266			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
    267		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
    268	else
    269		drm_dbg_kms(&i915->drm,
    270			    "Unable to get sink synchronization latency, assuming 8 frames\n");
    271	return val;
    272}
    273
    274static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
    275{
    276	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
    277	ssize_t r;
    278	u16 w;
    279	u8 y;
    280
    281	/* If sink don't have specific granularity requirements set legacy ones */
    282	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
    283		/* As PSR2 HW sends full lines, we do not care about x granularity */
    284		w = 4;
    285		y = 4;
    286		goto exit;
    287	}
    288
    289	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
    290	if (r != 2)
    291		drm_dbg_kms(&i915->drm,
    292			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
    293	/*
    294	 * Spec says that if the value read is 0 the default granularity should
    295	 * be used instead.
    296	 */
    297	if (r != 2 || w == 0)
    298		w = 4;
    299
    300	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
    301	if (r != 1) {
    302		drm_dbg_kms(&i915->drm,
    303			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
    304		y = 4;
    305	}
    306	if (y == 0)
    307		y = 1;
    308
    309exit:
    310	intel_dp->psr.su_w_granularity = w;
    311	intel_dp->psr.su_y_granularity = y;
    312}
    313
    314void intel_psr_init_dpcd(struct intel_dp *intel_dp)
    315{
    316	struct drm_i915_private *dev_priv =
    317		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
    318
    319	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
    320			 sizeof(intel_dp->psr_dpcd));
    321
    322	if (!intel_dp->psr_dpcd[0])
    323		return;
    324	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
    325		    intel_dp->psr_dpcd[0]);
    326
    327	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
    328		drm_dbg_kms(&dev_priv->drm,
    329			    "PSR support not currently available for this panel\n");
    330		return;
    331	}
    332
    333	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
    334		drm_dbg_kms(&dev_priv->drm,
    335			    "Panel lacks power state control, PSR cannot be enabled\n");
    336		return;
    337	}
    338
    339	intel_dp->psr.sink_support = true;
    340	intel_dp->psr.sink_sync_latency =
    341		intel_dp_get_sink_sync_latency(intel_dp);
    342
    343	if (DISPLAY_VER(dev_priv) >= 9 &&
    344	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
    345		bool y_req = intel_dp->psr_dpcd[1] &
    346			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
    347		bool alpm = intel_dp_get_alpm_status(intel_dp);
    348
    349		/*
    350		 * All panels that supports PSR version 03h (PSR2 +
    351		 * Y-coordinate) can handle Y-coordinates in VSC but we are
    352		 * only sure that it is going to be used when required by the
    353		 * panel. This way panel is capable to do selective update
    354		 * without a aux frame sync.
    355		 *
    356		 * To support PSR version 02h and PSR version 03h without
    357		 * Y-coordinate requirement panels we would need to enable
    358		 * GTC first.
    359		 */
    360		intel_dp->psr.sink_psr2_support = y_req && alpm;
    361		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
    362			    intel_dp->psr.sink_psr2_support ? "" : "not ");
    363
    364		if (intel_dp->psr.sink_psr2_support) {
    365			intel_dp->psr.colorimetry_support =
    366				intel_dp_get_colorimetry_status(intel_dp);
    367			intel_dp_get_su_granularity(intel_dp);
    368		}
    369	}
    370}
    371
    372static void intel_psr_enable_sink(struct intel_dp *intel_dp)
    373{
    374	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    375	u8 dpcd_val = DP_PSR_ENABLE;
    376
    377	/* Enable ALPM at sink for psr2 */
    378	if (intel_dp->psr.psr2_enabled) {
    379		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
    380				   DP_ALPM_ENABLE |
    381				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
    382
    383		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
    384	} else {
    385		if (intel_dp->psr.link_standby)
    386			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
    387
    388		if (DISPLAY_VER(dev_priv) >= 8)
    389			dpcd_val |= DP_PSR_CRC_VERIFICATION;
    390	}
    391
    392	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
    393		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
    394
    395	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
    396
    397	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
    398}
    399
    400static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
    401{
    402	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    403	u32 val = 0;
    404
    405	if (DISPLAY_VER(dev_priv) >= 11)
    406		val |= EDP_PSR_TP4_TIME_0US;
    407
    408	if (dev_priv->params.psr_safest_params) {
    409		val |= EDP_PSR_TP1_TIME_2500us;
    410		val |= EDP_PSR_TP2_TP3_TIME_2500us;
    411		goto check_tp3_sel;
    412	}
    413
    414	if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
    415		val |= EDP_PSR_TP1_TIME_0us;
    416	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
    417		val |= EDP_PSR_TP1_TIME_100us;
    418	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
    419		val |= EDP_PSR_TP1_TIME_500us;
    420	else
    421		val |= EDP_PSR_TP1_TIME_2500us;
    422
    423	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
    424		val |= EDP_PSR_TP2_TP3_TIME_0us;
    425	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
    426		val |= EDP_PSR_TP2_TP3_TIME_100us;
    427	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
    428		val |= EDP_PSR_TP2_TP3_TIME_500us;
    429	else
    430		val |= EDP_PSR_TP2_TP3_TIME_2500us;
    431
    432check_tp3_sel:
    433	if (intel_dp_source_supports_tps3(dev_priv) &&
    434	    drm_dp_tps3_supported(intel_dp->dpcd))
    435		val |= EDP_PSR_TP1_TP3_SEL;
    436	else
    437		val |= EDP_PSR_TP1_TP2_SEL;
    438
    439	return val;
    440}
    441
    442static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
    443{
    444	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    445	int idle_frames;
    446
    447	/* Let's use 6 as the minimum to cover all known cases including the
    448	 * off-by-one issue that HW has in some cases.
    449	 */
    450	idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
    451	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
    452
    453	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
    454		idle_frames = 0xf;
    455
    456	return idle_frames;
    457}
    458
    459static void hsw_activate_psr1(struct intel_dp *intel_dp)
    460{
    461	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    462	u32 max_sleep_time = 0x1f;
    463	u32 val = EDP_PSR_ENABLE;
    464
    465	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
    466
    467	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
    468	if (IS_HASWELL(dev_priv))
    469		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
    470
    471	if (intel_dp->psr.link_standby)
    472		val |= EDP_PSR_LINK_STANDBY;
    473
    474	val |= intel_psr1_get_tp_time(intel_dp);
    475
    476	if (DISPLAY_VER(dev_priv) >= 8)
    477		val |= EDP_PSR_CRC_ENABLE;
    478
    479	val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
    480		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
    481	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
    482}
    483
    484static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
    485{
    486	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    487	u32 val = 0;
    488
    489	if (dev_priv->params.psr_safest_params)
    490		return EDP_PSR2_TP2_TIME_2500us;
    491
    492	if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
    493	    dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
    494		val |= EDP_PSR2_TP2_TIME_50us;
    495	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
    496		val |= EDP_PSR2_TP2_TIME_100us;
    497	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
    498		val |= EDP_PSR2_TP2_TIME_500us;
    499	else
    500		val |= EDP_PSR2_TP2_TIME_2500us;
    501
    502	return val;
    503}
    504
    505static void hsw_activate_psr2(struct intel_dp *intel_dp)
    506{
    507	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    508	u32 val = EDP_PSR2_ENABLE;
    509
    510	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
    511
    512	if (!IS_ALDERLAKE_P(dev_priv))
    513		val |= EDP_SU_TRACK_ENABLE;
    514
    515	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
    516		val |= EDP_Y_COORDINATE_ENABLE;
    517
    518	val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
    519	val |= intel_psr2_get_tp_time(intel_dp);
    520
    521	/* Wa_22012278275:adl-p */
    522	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
    523		static const u8 map[] = {
    524			2, /* 5 lines */
    525			1, /* 6 lines */
    526			0, /* 7 lines */
    527			3, /* 8 lines */
    528			6, /* 9 lines */
    529			5, /* 10 lines */
    530			4, /* 11 lines */
    531			7, /* 12 lines */
    532		};
    533		/*
    534		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
    535		 * comments bellow for more information
    536		 */
    537		u32 tmp, lines = 7;
    538
    539		val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
    540
    541		tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
    542		tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
    543		val |= tmp;
    544
    545		tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
    546		tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
    547		val |= tmp;
    548	} else if (DISPLAY_VER(dev_priv) >= 12) {
    549		/*
    550		 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
    551		 * values from BSpec. In order to setting an optimal power
    552		 * consumption, lower than 4k resoluition mode needs to decrese
    553		 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
    554		 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
    555		 */
    556		val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
    557		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
    558		val |= TGL_EDP_PSR2_FAST_WAKE(7);
    559	} else if (DISPLAY_VER(dev_priv) >= 9) {
    560		val |= EDP_PSR2_IO_BUFFER_WAKE(7);
    561		val |= EDP_PSR2_FAST_WAKE(7);
    562	}
    563
    564	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
    565		val |= EDP_PSR2_SU_SDP_SCANLINE;
    566
    567	if (intel_dp->psr.psr2_sel_fetch_enabled) {
    568		u32 tmp;
    569
    570		/* Wa_1408330847 */
    571		if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
    572			intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
    573				     DIS_RAM_BYPASS_PSR2_MAN_TRACK,
    574				     DIS_RAM_BYPASS_PSR2_MAN_TRACK);
    575
    576		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
    577		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
    578	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
    579		intel_de_write(dev_priv,
    580			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
    581	}
    582
    583	/*
    584	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
    585	 * recommending keep this bit unset while PSR2 is enabled.
    586	 */
    587	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
    588
    589	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
    590}
    591
    592static bool
    593transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
    594{
    595	if (IS_ALDERLAKE_P(dev_priv))
    596		return trans == TRANSCODER_A || trans == TRANSCODER_B;
    597	else if (DISPLAY_VER(dev_priv) >= 12)
    598		return trans == TRANSCODER_A;
    599	else
    600		return trans == TRANSCODER_EDP;
    601}
    602
    603static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
    604{
    605	if (!cstate || !cstate->hw.active)
    606		return 0;
    607
    608	return DIV_ROUND_UP(1000 * 1000,
    609			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
    610}
    611
    612static void psr2_program_idle_frames(struct intel_dp *intel_dp,
    613				     u32 idle_frames)
    614{
    615	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    616	u32 val;
    617
    618	idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
    619	val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
    620	val &= ~EDP_PSR2_IDLE_FRAME_MASK;
    621	val |= idle_frames;
    622	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
    623}
    624
    625static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
    626{
    627	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    628
    629	psr2_program_idle_frames(intel_dp, 0);
    630	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
    631}
    632
    633static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
    634{
    635	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    636
    637	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
    638	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
    639}
    640
    641static void tgl_dc3co_disable_work(struct work_struct *work)
    642{
    643	struct intel_dp *intel_dp =
    644		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
    645
    646	mutex_lock(&intel_dp->psr.lock);
    647	/* If delayed work is pending, it is not idle */
    648	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
    649		goto unlock;
    650
    651	tgl_psr2_disable_dc3co(intel_dp);
    652unlock:
    653	mutex_unlock(&intel_dp->psr.lock);
    654}
    655
    656static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
    657{
    658	if (!intel_dp->psr.dc3co_exitline)
    659		return;
    660
    661	cancel_delayed_work(&intel_dp->psr.dc3co_work);
    662	/* Before PSR2 exit disallow dc3co*/
    663	tgl_psr2_disable_dc3co(intel_dp);
    664}
    665
    666static bool
    667dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
    668			      struct intel_crtc_state *crtc_state)
    669{
    670	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
    671	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
    672	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    673	enum port port = dig_port->base.port;
    674
    675	if (IS_ALDERLAKE_P(dev_priv))
    676		return pipe <= PIPE_B && port <= PORT_B;
    677	else
    678		return pipe == PIPE_A && port == PORT_A;
    679}
    680
    681static void
    682tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
    683				  struct intel_crtc_state *crtc_state)
    684{
    685	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
    686	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    687	u32 exit_scanlines;
    688
    689	/*
    690	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
    691	 * disable DC3CO until the changed dc3co activating/deactivating sequence
    692	 * is applied. B.Specs:49196
    693	 */
    694	return;
    695
    696	/*
    697	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
    698	 * TODO: when the issue is addressed, this restriction should be removed.
    699	 */
    700	if (crtc_state->enable_psr2_sel_fetch)
    701		return;
    702
    703	if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
    704		return;
    705
    706	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
    707		return;
    708
    709	/* Wa_16011303918:adl-p */
    710	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
    711		return;
    712
    713	/*
    714	 * DC3CO Exit time 200us B.Spec 49196
    715	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
    716	 */
    717	exit_scanlines =
    718		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
    719
    720	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
    721		return;
    722
    723	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
    724}
    725
    726static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
    727					      struct intel_crtc_state *crtc_state)
    728{
    729	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    730
    731	if (!dev_priv->params.enable_psr2_sel_fetch &&
    732	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
    733		drm_dbg_kms(&dev_priv->drm,
    734			    "PSR2 sel fetch not enabled, disabled by parameter\n");
    735		return false;
    736	}
    737
    738	if (crtc_state->uapi.async_flip) {
    739		drm_dbg_kms(&dev_priv->drm,
    740			    "PSR2 sel fetch not enabled, async flip enabled\n");
    741		return false;
    742	}
    743
    744	/* Wa_14010254185 Wa_14010103792 */
    745	if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
    746		drm_dbg_kms(&dev_priv->drm,
    747			    "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
    748		return false;
    749	}
    750
    751	return crtc_state->enable_psr2_sel_fetch = true;
    752}
    753
    754static bool psr2_granularity_check(struct intel_dp *intel_dp,
    755				   struct intel_crtc_state *crtc_state)
    756{
    757	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    758	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
    759	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
    760	u16 y_granularity = 0;
    761
    762	/* PSR2 HW only send full lines so we only need to validate the width */
    763	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
    764		return false;
    765
    766	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
    767		return false;
    768
    769	/* HW tracking is only aligned to 4 lines */
    770	if (!crtc_state->enable_psr2_sel_fetch)
    771		return intel_dp->psr.su_y_granularity == 4;
    772
    773	/*
    774	 * adl_p has 1 line granularity. For other platforms with SW tracking we
    775	 * can adjust the y coordinates to match sink requirement if multiple of
    776	 * 4.
    777	 */
    778	if (IS_ALDERLAKE_P(dev_priv))
    779		y_granularity = intel_dp->psr.su_y_granularity;
    780	else if (intel_dp->psr.su_y_granularity <= 2)
    781		y_granularity = 4;
    782	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
    783		y_granularity = intel_dp->psr.su_y_granularity;
    784
    785	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
    786		return false;
    787
    788	crtc_state->su_y_granularity = y_granularity;
    789	return true;
    790}
    791
    792static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
    793							struct intel_crtc_state *crtc_state)
    794{
    795	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
    796	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    797	u32 hblank_total, hblank_ns, req_ns;
    798
    799	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
    800	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
    801
    802	/* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */
    803	req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000);
    804
    805	if ((hblank_ns - req_ns) > 100)
    806		return true;
    807
    808	if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
    809		return false;
    810
    811	crtc_state->req_psr2_sdp_prior_scanline = true;
    812	return true;
    813}
    814
    815static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
    816				    struct intel_crtc_state *crtc_state)
    817{
    818	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    819	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
    820	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
    821	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
    822
    823	if (!intel_dp->psr.sink_psr2_support)
    824		return false;
    825
    826	/* JSL and EHL only supports eDP 1.3 */
    827	if (IS_JSL_EHL(dev_priv)) {
    828		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
    829		return false;
    830	}
    831
    832	/* Wa_16011181250 */
    833	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
    834	    IS_DG2(dev_priv)) {
    835		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
    836		return false;
    837	}
    838
    839	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
    840		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
    841		return false;
    842	}
    843
    844	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
    845		drm_dbg_kms(&dev_priv->drm,
    846			    "PSR2 not supported in transcoder %s\n",
    847			    transcoder_name(crtc_state->cpu_transcoder));
    848		return false;
    849	}
    850
    851	if (!psr2_global_enabled(intel_dp)) {
    852		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
    853		return false;
    854	}
    855
    856	/*
    857	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
    858	 * resolution requires DSC to be enabled, priority is given to DSC
    859	 * over PSR2.
    860	 */
    861	if (crtc_state->dsc.compression_enable) {
    862		drm_dbg_kms(&dev_priv->drm,
    863			    "PSR2 cannot be enabled since DSC is enabled\n");
    864		return false;
    865	}
    866
    867	if (crtc_state->crc_enabled) {
    868		drm_dbg_kms(&dev_priv->drm,
    869			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
    870		return false;
    871	}
    872
    873	if (DISPLAY_VER(dev_priv) >= 12) {
    874		psr_max_h = 5120;
    875		psr_max_v = 3200;
    876		max_bpp = 30;
    877	} else if (DISPLAY_VER(dev_priv) >= 10) {
    878		psr_max_h = 4096;
    879		psr_max_v = 2304;
    880		max_bpp = 24;
    881	} else if (DISPLAY_VER(dev_priv) == 9) {
    882		psr_max_h = 3640;
    883		psr_max_v = 2304;
    884		max_bpp = 24;
    885	}
    886
    887	if (crtc_state->pipe_bpp > max_bpp) {
    888		drm_dbg_kms(&dev_priv->drm,
    889			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
    890			    crtc_state->pipe_bpp, max_bpp);
    891		return false;
    892	}
    893
    894	/* Wa_16011303918:adl-p */
    895	if (crtc_state->vrr.enable &&
    896	    IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
    897		drm_dbg_kms(&dev_priv->drm,
    898			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
    899		return false;
    900	}
    901
    902	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
    903		drm_dbg_kms(&dev_priv->drm,
    904			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
    905		return false;
    906	}
    907
    908	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
    909		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
    910		    !HAS_PSR_HW_TRACKING(dev_priv)) {
    911			drm_dbg_kms(&dev_priv->drm,
    912				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
    913			return false;
    914		}
    915	}
    916
    917	/* Wa_2209313811 */
    918	if (!crtc_state->enable_psr2_sel_fetch &&
    919	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
    920		drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
    921		goto unsupported;
    922	}
    923
    924	if (!psr2_granularity_check(intel_dp, crtc_state)) {
    925		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
    926		goto unsupported;
    927	}
    928
    929	if (!crtc_state->enable_psr2_sel_fetch &&
    930	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
    931		drm_dbg_kms(&dev_priv->drm,
    932			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
    933			    crtc_hdisplay, crtc_vdisplay,
    934			    psr_max_h, psr_max_v);
    935		goto unsupported;
    936	}
    937
    938	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
    939	return true;
    940
    941unsupported:
    942	crtc_state->enable_psr2_sel_fetch = false;
    943	return false;
    944}
    945
    946void intel_psr_compute_config(struct intel_dp *intel_dp,
    947			      struct intel_crtc_state *crtc_state,
    948			      struct drm_connector_state *conn_state)
    949{
    950	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
    951	const struct drm_display_mode *adjusted_mode =
    952		&crtc_state->hw.adjusted_mode;
    953	int psr_setup_time;
    954
    955	/*
    956	 * Current PSR panels dont work reliably with VRR enabled
    957	 * So if VRR is enabled, do not enable PSR.
    958	 */
    959	if (crtc_state->vrr.enable)
    960		return;
    961
    962	if (!CAN_PSR(intel_dp))
    963		return;
    964
    965	if (!psr_global_enabled(intel_dp)) {
    966		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
    967		return;
    968	}
    969
    970	if (intel_dp->psr.sink_not_reliable) {
    971		drm_dbg_kms(&dev_priv->drm,
    972			    "PSR sink implementation is not reliable\n");
    973		return;
    974	}
    975
    976	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
    977		drm_dbg_kms(&dev_priv->drm,
    978			    "PSR condition failed: Interlaced mode enabled\n");
    979		return;
    980	}
    981
    982	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
    983	if (psr_setup_time < 0) {
    984		drm_dbg_kms(&dev_priv->drm,
    985			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
    986			    intel_dp->psr_dpcd[1]);
    987		return;
    988	}
    989
    990	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
    991	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
    992		drm_dbg_kms(&dev_priv->drm,
    993			    "PSR condition failed: PSR setup time (%d us) too long\n",
    994			    psr_setup_time);
    995		return;
    996	}
    997
    998	crtc_state->has_psr = true;
    999	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
   1000
   1001	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
   1002	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
   1003				     &crtc_state->psr_vsc);
   1004}
   1005
   1006void intel_psr_get_config(struct intel_encoder *encoder,
   1007			  struct intel_crtc_state *pipe_config)
   1008{
   1009	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
   1010	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
   1011	struct intel_dp *intel_dp;
   1012	u32 val;
   1013
   1014	if (!dig_port)
   1015		return;
   1016
   1017	intel_dp = &dig_port->dp;
   1018	if (!CAN_PSR(intel_dp))
   1019		return;
   1020
   1021	mutex_lock(&intel_dp->psr.lock);
   1022	if (!intel_dp->psr.enabled)
   1023		goto unlock;
   1024
   1025	/*
   1026	 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
   1027	 * enabled/disabled because of frontbuffer tracking and others.
   1028	 */
   1029	pipe_config->has_psr = true;
   1030	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
   1031	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
   1032
   1033	if (!intel_dp->psr.psr2_enabled)
   1034		goto unlock;
   1035
   1036	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
   1037		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
   1038		if (val & PSR2_MAN_TRK_CTL_ENABLE)
   1039			pipe_config->enable_psr2_sel_fetch = true;
   1040	}
   1041
   1042	if (DISPLAY_VER(dev_priv) >= 12) {
   1043		val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
   1044		val &= EXITLINE_MASK;
   1045		pipe_config->dc3co_exitline = val;
   1046	}
   1047unlock:
   1048	mutex_unlock(&intel_dp->psr.lock);
   1049}
   1050
   1051static void intel_psr_activate(struct intel_dp *intel_dp)
   1052{
   1053	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1054	enum transcoder transcoder = intel_dp->psr.transcoder;
   1055
   1056	if (transcoder_has_psr2(dev_priv, transcoder))
   1057		drm_WARN_ON(&dev_priv->drm,
   1058			    intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
   1059
   1060	drm_WARN_ON(&dev_priv->drm,
   1061		    intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
   1062	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
   1063	lockdep_assert_held(&intel_dp->psr.lock);
   1064
   1065	/* psr1 and psr2 are mutually exclusive.*/
   1066	if (intel_dp->psr.psr2_enabled)
   1067		hsw_activate_psr2(intel_dp);
   1068	else
   1069		hsw_activate_psr1(intel_dp);
   1070
   1071	intel_dp->psr.active = true;
   1072}
   1073
   1074static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
   1075{
   1076	switch (intel_dp->psr.pipe) {
   1077	case PIPE_A:
   1078		return LATENCY_REPORTING_REMOVED_PIPE_A;
   1079	case PIPE_B:
   1080		return LATENCY_REPORTING_REMOVED_PIPE_B;
   1081	case PIPE_C:
   1082		return LATENCY_REPORTING_REMOVED_PIPE_C;
   1083	default:
   1084		MISSING_CASE(intel_dp->psr.pipe);
   1085		return 0;
   1086	}
   1087}
   1088
   1089static void intel_psr_enable_source(struct intel_dp *intel_dp,
   1090				    const struct intel_crtc_state *crtc_state)
   1091{
   1092	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1093	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
   1094	u32 mask;
   1095
   1096	/*
   1097	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
   1098	 * mask LPSP to avoid dependency on other drivers that might block
   1099	 * runtime_pm besides preventing  other hw tracking issues now we
   1100	 * can rely on frontbuffer tracking.
   1101	 */
   1102	mask = EDP_PSR_DEBUG_MASK_MEMUP |
   1103	       EDP_PSR_DEBUG_MASK_HPD |
   1104	       EDP_PSR_DEBUG_MASK_LPSP |
   1105	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;
   1106
   1107	if (DISPLAY_VER(dev_priv) < 11)
   1108		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
   1109
   1110	intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
   1111		       mask);
   1112
   1113	psr_irq_control(intel_dp);
   1114
   1115	if (intel_dp->psr.dc3co_exitline) {
   1116		u32 val;
   1117
   1118		/*
   1119		 * TODO: if future platforms supports DC3CO in more than one
   1120		 * transcoder, EXITLINE will need to be unset when disabling PSR
   1121		 */
   1122		val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
   1123		val &= ~EXITLINE_MASK;
   1124		val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
   1125		val |= EXITLINE_ENABLE;
   1126		intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
   1127	}
   1128
   1129	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
   1130		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
   1131			     intel_dp->psr.psr2_sel_fetch_enabled ?
   1132			     IGNORE_PSR2_HW_TRACKING : 0);
   1133
   1134	if (intel_dp->psr.psr2_enabled) {
   1135		if (DISPLAY_VER(dev_priv) == 9)
   1136			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
   1137				     PSR2_VSC_ENABLE_PROG_HEADER |
   1138				     PSR2_ADD_VERTICAL_LINE_COUNT);
   1139
   1140		/*
   1141		 * Wa_16014451276:adlp
   1142		 * All supported adlp panels have 1-based X granularity, this may
   1143		 * cause issues if non-supported panels are used.
   1144		 */
   1145		if (IS_ALDERLAKE_P(dev_priv))
   1146			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
   1147				     ADLP_1_BASED_X_GRANULARITY);
   1148
   1149		/* Wa_16011168373:adl-p */
   1150		if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
   1151			intel_de_rmw(dev_priv,
   1152				     TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
   1153				     TRANS_SET_CONTEXT_LATENCY_MASK,
   1154				     TRANS_SET_CONTEXT_LATENCY_VALUE(1));
   1155
   1156		/* Wa_16012604467:adlp */
   1157		if (IS_ALDERLAKE_P(dev_priv))
   1158			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
   1159				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
   1160
   1161		/* Wa_16013835468:tgl[b0+], dg1 */
   1162		if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
   1163		    IS_DG1(dev_priv)) {
   1164			u16 vtotal, vblank;
   1165
   1166			vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
   1167				 crtc_state->uapi.adjusted_mode.crtc_vdisplay;
   1168			vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
   1169				 crtc_state->uapi.adjusted_mode.crtc_vblank_start;
   1170			if (vblank > vtotal)
   1171				intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
   1172					     wa_16013835468_bit_get(intel_dp));
   1173		}
   1174	}
   1175}
   1176
   1177static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
   1178{
   1179	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1180	u32 val;
   1181
   1182	/*
   1183	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
   1184	 * will still keep the error set even after the reset done in the
   1185	 * irq_preinstall and irq_uninstall hooks.
   1186	 * And enabling in this situation cause the screen to freeze in the
   1187	 * first time that PSR HW tries to activate so lets keep PSR disabled
   1188	 * to avoid any rendering problems.
   1189	 */
   1190	if (DISPLAY_VER(dev_priv) >= 12) {
   1191		val = intel_de_read(dev_priv,
   1192				    TRANS_PSR_IIR(intel_dp->psr.transcoder));
   1193		val &= EDP_PSR_ERROR(0);
   1194	} else {
   1195		val = intel_de_read(dev_priv, EDP_PSR_IIR);
   1196		val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
   1197	}
   1198	if (val) {
   1199		intel_dp->psr.sink_not_reliable = true;
   1200		drm_dbg_kms(&dev_priv->drm,
   1201			    "PSR interruption error set, not enabling PSR\n");
   1202		return false;
   1203	}
   1204
   1205	return true;
   1206}
   1207
   1208static void intel_psr_enable_locked(struct intel_dp *intel_dp,
   1209				    const struct intel_crtc_state *crtc_state)
   1210{
   1211	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
   1212	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1213	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
   1214	struct intel_encoder *encoder = &dig_port->base;
   1215	u32 val;
   1216
   1217	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
   1218
   1219	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
   1220	intel_dp->psr.busy_frontbuffer_bits = 0;
   1221	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
   1222	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
   1223	/* DC5/DC6 requires at least 6 idle frames */
   1224	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
   1225	intel_dp->psr.dc3co_exit_delay = val;
   1226	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
   1227	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
   1228	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
   1229	intel_dp->psr.req_psr2_sdp_prior_scanline =
   1230		crtc_state->req_psr2_sdp_prior_scanline;
   1231
   1232	if (!psr_interrupt_error_check(intel_dp))
   1233		return;
   1234
   1235	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
   1236		    intel_dp->psr.psr2_enabled ? "2" : "1");
   1237	intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
   1238	intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
   1239	intel_psr_enable_sink(intel_dp);
   1240	intel_psr_enable_source(intel_dp, crtc_state);
   1241	intel_dp->psr.enabled = true;
   1242	intel_dp->psr.paused = false;
   1243
   1244	intel_psr_activate(intel_dp);
   1245}
   1246
   1247static void intel_psr_exit(struct intel_dp *intel_dp)
   1248{
   1249	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1250	u32 val;
   1251
   1252	if (!intel_dp->psr.active) {
   1253		if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
   1254			val = intel_de_read(dev_priv,
   1255					    EDP_PSR2_CTL(intel_dp->psr.transcoder));
   1256			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
   1257		}
   1258
   1259		val = intel_de_read(dev_priv,
   1260				    EDP_PSR_CTL(intel_dp->psr.transcoder));
   1261		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
   1262
   1263		return;
   1264	}
   1265
   1266	if (intel_dp->psr.psr2_enabled) {
   1267		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
   1268		val = intel_de_read(dev_priv,
   1269				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
   1270		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
   1271		val &= ~EDP_PSR2_ENABLE;
   1272		intel_de_write(dev_priv,
   1273			       EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
   1274	} else {
   1275		val = intel_de_read(dev_priv,
   1276				    EDP_PSR_CTL(intel_dp->psr.transcoder));
   1277		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
   1278		val &= ~EDP_PSR_ENABLE;
   1279		intel_de_write(dev_priv,
   1280			       EDP_PSR_CTL(intel_dp->psr.transcoder), val);
   1281	}
   1282	intel_dp->psr.active = false;
   1283}
   1284
   1285static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
   1286{
   1287	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1288	i915_reg_t psr_status;
   1289	u32 psr_status_mask;
   1290
   1291	if (intel_dp->psr.psr2_enabled) {
   1292		psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
   1293		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
   1294	} else {
   1295		psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
   1296		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
   1297	}
   1298
   1299	/* Wait till PSR is idle */
   1300	if (intel_de_wait_for_clear(dev_priv, psr_status,
   1301				    psr_status_mask, 2000))
   1302		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
   1303}
   1304
   1305static void intel_psr_disable_locked(struct intel_dp *intel_dp)
   1306{
   1307	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1308	enum phy phy = intel_port_to_phy(dev_priv,
   1309					 dp_to_dig_port(intel_dp)->base.port);
   1310
   1311	lockdep_assert_held(&intel_dp->psr.lock);
   1312
   1313	if (!intel_dp->psr.enabled)
   1314		return;
   1315
   1316	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
   1317		    intel_dp->psr.psr2_enabled ? "2" : "1");
   1318
   1319	intel_psr_exit(intel_dp);
   1320	intel_psr_wait_exit_locked(intel_dp);
   1321
   1322	/* Wa_1408330847 */
   1323	if (intel_dp->psr.psr2_sel_fetch_enabled &&
   1324	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
   1325		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
   1326			     DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
   1327
   1328	if (intel_dp->psr.psr2_enabled) {
   1329		/* Wa_16011168373:adl-p */
   1330		if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
   1331			intel_de_rmw(dev_priv,
   1332				     TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
   1333				     TRANS_SET_CONTEXT_LATENCY_MASK, 0);
   1334
   1335		/* Wa_16012604467:adlp */
   1336		if (IS_ALDERLAKE_P(dev_priv))
   1337			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
   1338				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
   1339
   1340		/* Wa_16013835468:tgl[b0+], dg1 */
   1341		if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
   1342		    IS_DG1(dev_priv))
   1343			intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
   1344				     wa_16013835468_bit_get(intel_dp), 0);
   1345	}
   1346
   1347	intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
   1348
   1349	/* Disable PSR on Sink */
   1350	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
   1351
   1352	if (intel_dp->psr.psr2_enabled)
   1353		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
   1354
   1355	intel_dp->psr.enabled = false;
   1356	intel_dp->psr.psr2_enabled = false;
   1357	intel_dp->psr.psr2_sel_fetch_enabled = false;
   1358	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
   1359}
   1360
   1361/**
   1362 * intel_psr_disable - Disable PSR
   1363 * @intel_dp: Intel DP
   1364 * @old_crtc_state: old CRTC state
   1365 *
   1366 * This function needs to be called before disabling pipe.
   1367 */
   1368void intel_psr_disable(struct intel_dp *intel_dp,
   1369		       const struct intel_crtc_state *old_crtc_state)
   1370{
   1371	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1372
   1373	if (!old_crtc_state->has_psr)
   1374		return;
   1375
   1376	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
   1377		return;
   1378
   1379	mutex_lock(&intel_dp->psr.lock);
   1380
   1381	intel_psr_disable_locked(intel_dp);
   1382
   1383	mutex_unlock(&intel_dp->psr.lock);
   1384	cancel_work_sync(&intel_dp->psr.work);
   1385	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
   1386}
   1387
   1388/**
   1389 * intel_psr_pause - Pause PSR
   1390 * @intel_dp: Intel DP
   1391 *
   1392 * This function need to be called after enabling psr.
   1393 */
   1394void intel_psr_pause(struct intel_dp *intel_dp)
   1395{
   1396	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1397	struct intel_psr *psr = &intel_dp->psr;
   1398
   1399	if (!CAN_PSR(intel_dp))
   1400		return;
   1401
   1402	mutex_lock(&psr->lock);
   1403
   1404	if (!psr->enabled) {
   1405		mutex_unlock(&psr->lock);
   1406		return;
   1407	}
   1408
   1409	/* If we ever hit this, we will need to add refcount to pause/resume */
   1410	drm_WARN_ON(&dev_priv->drm, psr->paused);
   1411
   1412	intel_psr_exit(intel_dp);
   1413	intel_psr_wait_exit_locked(intel_dp);
   1414	psr->paused = true;
   1415
   1416	mutex_unlock(&psr->lock);
   1417
   1418	cancel_work_sync(&psr->work);
   1419	cancel_delayed_work_sync(&psr->dc3co_work);
   1420}
   1421
   1422/**
   1423 * intel_psr_resume - Resume PSR
   1424 * @intel_dp: Intel DP
   1425 *
   1426 * This function need to be called after pausing psr.
   1427 */
   1428void intel_psr_resume(struct intel_dp *intel_dp)
   1429{
   1430	struct intel_psr *psr = &intel_dp->psr;
   1431
   1432	if (!CAN_PSR(intel_dp))
   1433		return;
   1434
   1435	mutex_lock(&psr->lock);
   1436
   1437	if (!psr->paused)
   1438		goto unlock;
   1439
   1440	psr->paused = false;
   1441	intel_psr_activate(intel_dp);
   1442
   1443unlock:
   1444	mutex_unlock(&psr->lock);
   1445}
   1446
   1447static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
   1448{
   1449	return IS_ALDERLAKE_P(dev_priv) ? 0 : PSR2_MAN_TRK_CTL_ENABLE;
   1450}
   1451
   1452static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
   1453{
   1454	return IS_ALDERLAKE_P(dev_priv) ?
   1455	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
   1456	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
   1457}
   1458
   1459static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
   1460{
   1461	return IS_ALDERLAKE_P(dev_priv) ?
   1462	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
   1463	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
   1464}
   1465
   1466static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
   1467{
   1468	return IS_ALDERLAKE_P(dev_priv) ?
   1469	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
   1470	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
   1471}
   1472
   1473static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
   1474{
   1475	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1476
   1477	if (intel_dp->psr.psr2_sel_fetch_enabled)
   1478		intel_de_write(dev_priv,
   1479			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
   1480			       man_trk_ctl_enable_bit_get(dev_priv) |
   1481			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
   1482			       man_trk_ctl_single_full_frame_bit_get(dev_priv));
   1483
   1484	/*
   1485	 * Display WA #0884: skl+
   1486	 * This documented WA for bxt can be safely applied
   1487	 * broadly so we can force HW tracking to exit PSR
   1488	 * instead of disabling and re-enabling.
   1489	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
   1490	 * but it makes more sense write to the current active
   1491	 * pipe.
   1492	 *
   1493	 * This workaround do not exist for platforms with display 10 or newer
   1494	 * but testing proved that it works for up display 13, for newer
   1495	 * than that testing will be needed.
   1496	 */
   1497	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
   1498}
   1499
   1500void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
   1501					const struct intel_crtc_state *crtc_state)
   1502{
   1503	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   1504	enum pipe pipe = plane->pipe;
   1505
   1506	if (!crtc_state->enable_psr2_sel_fetch)
   1507		return;
   1508
   1509	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
   1510}
   1511
   1512void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
   1513					const struct intel_crtc_state *crtc_state,
   1514					const struct intel_plane_state *plane_state,
   1515					int color_plane)
   1516{
   1517	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
   1518	enum pipe pipe = plane->pipe;
   1519	const struct drm_rect *clip;
   1520	u32 val;
   1521	int x, y;
   1522
   1523	if (!crtc_state->enable_psr2_sel_fetch)
   1524		return;
   1525
   1526	if (plane->id == PLANE_CURSOR) {
   1527		intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
   1528				  plane_state->ctl);
   1529		return;
   1530	}
   1531
   1532	clip = &plane_state->psr2_sel_fetch_area;
   1533
   1534	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
   1535	val |= plane_state->uapi.dst.x1;
   1536	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
   1537
   1538	x = plane_state->view.color_plane[color_plane].x;
   1539
   1540	/*
   1541	 * From Bspec: UV surface Start Y Position = half of Y plane Y
   1542	 * start position.
   1543	 */
   1544	if (!color_plane)
   1545		y = plane_state->view.color_plane[color_plane].y + clip->y1;
   1546	else
   1547		y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
   1548
   1549	val = y << 16 | x;
   1550
   1551	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
   1552			  val);
   1553
   1554	/* Sizes are 0 based */
   1555	val = (drm_rect_height(clip) - 1) << 16;
   1556	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
   1557	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
   1558
   1559	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
   1560			  PLANE_SEL_FETCH_CTL_ENABLE);
   1561}
   1562
   1563void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
   1564{
   1565	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1566	struct intel_encoder *encoder;
   1567
   1568	if (!crtc_state->enable_psr2_sel_fetch)
   1569		return;
   1570
   1571	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
   1572					     crtc_state->uapi.encoder_mask) {
   1573		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
   1574
   1575		lockdep_assert_held(&intel_dp->psr.lock);
   1576		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
   1577			return;
   1578		break;
   1579	}
   1580
   1581	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
   1582		       crtc_state->psr2_man_track_ctl);
   1583}
   1584
   1585static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
   1586				  struct drm_rect *clip, bool full_update)
   1587{
   1588	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1589	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1590	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
   1591
   1592	/* SF partial frame enable has to be set even on full update */
   1593	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
   1594
   1595	if (full_update) {
   1596		/*
   1597		 * Not applying Wa_14014971508:adlp as we do not support the
   1598		 * feature that requires this workaround.
   1599		 */
   1600		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
   1601		goto exit;
   1602	}
   1603
   1604	if (clip->y1 == -1)
   1605		goto exit;
   1606
   1607	if (IS_ALDERLAKE_P(dev_priv)) {
   1608		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
   1609		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
   1610	} else {
   1611		drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
   1612
   1613		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
   1614		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
   1615	}
   1616exit:
   1617	crtc_state->psr2_man_track_ctl = val;
   1618}
   1619
   1620static void clip_area_update(struct drm_rect *overlap_damage_area,
   1621			     struct drm_rect *damage_area)
   1622{
   1623	if (overlap_damage_area->y1 == -1) {
   1624		overlap_damage_area->y1 = damage_area->y1;
   1625		overlap_damage_area->y2 = damage_area->y2;
   1626		return;
   1627	}
   1628
   1629	if (damage_area->y1 < overlap_damage_area->y1)
   1630		overlap_damage_area->y1 = damage_area->y1;
   1631
   1632	if (damage_area->y2 > overlap_damage_area->y2)
   1633		overlap_damage_area->y2 = damage_area->y2;
   1634}
   1635
   1636static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
   1637						struct drm_rect *pipe_clip)
   1638{
   1639	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1640	const u16 y_alignment = crtc_state->su_y_granularity;
   1641
   1642	pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
   1643	if (pipe_clip->y2 % y_alignment)
   1644		pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
   1645
   1646	if (IS_ALDERLAKE_P(dev_priv) && crtc_state->dsc.compression_enable)
   1647		drm_warn(&dev_priv->drm, "Missing PSR2 sel fetch alignment with DSC\n");
   1648}
   1649
   1650/*
   1651 * TODO: Not clear how to handle planes with negative position,
   1652 * also planes are not updated if they have a negative X
   1653 * position so for now doing a full update in this cases
   1654 *
   1655 * Plane scaling and rotation is not supported by selective fetch and both
   1656 * properties can change without a modeset, so need to be check at every
   1657 * atomic commmit.
   1658 */
   1659static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
   1660{
   1661	if (plane_state->uapi.dst.y1 < 0 ||
   1662	    plane_state->uapi.dst.x1 < 0 ||
   1663	    plane_state->scaler_id >= 0 ||
   1664	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
   1665		return false;
   1666
   1667	return true;
   1668}
   1669
   1670/*
   1671 * Check for pipe properties that is not supported by selective fetch.
   1672 *
   1673 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
   1674 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
   1675 * enabled and going to the full update path.
   1676 */
   1677static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
   1678{
   1679	if (crtc_state->scaler_state.scaler_id >= 0)
   1680		return false;
   1681
   1682	return true;
   1683}
   1684
   1685int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
   1686				struct intel_crtc *crtc)
   1687{
   1688	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
   1689	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
   1690	struct intel_plane_state *new_plane_state, *old_plane_state;
   1691	struct intel_plane *plane;
   1692	bool full_update = false;
   1693	int i, ret;
   1694
   1695	if (!crtc_state->enable_psr2_sel_fetch)
   1696		return 0;
   1697
   1698	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
   1699		full_update = true;
   1700		goto skip_sel_fetch_set_loop;
   1701	}
   1702
   1703	/*
   1704	 * Calculate minimal selective fetch area of each plane and calculate
   1705	 * the pipe damaged area.
   1706	 * In the next loop the plane selective fetch area will actually be set
   1707	 * using whole pipe damaged area.
   1708	 */
   1709	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
   1710					     new_plane_state, i) {
   1711		struct drm_rect src, damaged_area = { .y1 = -1 };
   1712		struct drm_atomic_helper_damage_iter iter;
   1713		struct drm_rect clip;
   1714
   1715		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
   1716			continue;
   1717
   1718		if (!new_plane_state->uapi.visible &&
   1719		    !old_plane_state->uapi.visible)
   1720			continue;
   1721
   1722		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
   1723			full_update = true;
   1724			break;
   1725		}
   1726
   1727		/*
   1728		 * If visibility or plane moved, mark the whole plane area as
   1729		 * damaged as it needs to be complete redraw in the new and old
   1730		 * position.
   1731		 */
   1732		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
   1733		    !drm_rect_equals(&new_plane_state->uapi.dst,
   1734				     &old_plane_state->uapi.dst)) {
   1735			if (old_plane_state->uapi.visible) {
   1736				damaged_area.y1 = old_plane_state->uapi.dst.y1;
   1737				damaged_area.y2 = old_plane_state->uapi.dst.y2;
   1738				clip_area_update(&pipe_clip, &damaged_area);
   1739			}
   1740
   1741			if (new_plane_state->uapi.visible) {
   1742				damaged_area.y1 = new_plane_state->uapi.dst.y1;
   1743				damaged_area.y2 = new_plane_state->uapi.dst.y2;
   1744				clip_area_update(&pipe_clip, &damaged_area);
   1745			}
   1746			continue;
   1747		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
   1748			/* If alpha changed mark the whole plane area as damaged */
   1749			damaged_area.y1 = new_plane_state->uapi.dst.y1;
   1750			damaged_area.y2 = new_plane_state->uapi.dst.y2;
   1751			clip_area_update(&pipe_clip, &damaged_area);
   1752			continue;
   1753		}
   1754
   1755		drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
   1756
   1757		drm_atomic_helper_damage_iter_init(&iter,
   1758						   &old_plane_state->uapi,
   1759						   &new_plane_state->uapi);
   1760		drm_atomic_for_each_plane_damage(&iter, &clip) {
   1761			if (drm_rect_intersect(&clip, &src))
   1762				clip_area_update(&damaged_area, &clip);
   1763		}
   1764
   1765		if (damaged_area.y1 == -1)
   1766			continue;
   1767
   1768		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
   1769		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
   1770		clip_area_update(&pipe_clip, &damaged_area);
   1771	}
   1772
   1773	if (full_update)
   1774		goto skip_sel_fetch_set_loop;
   1775
   1776	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
   1777	if (ret)
   1778		return ret;
   1779
   1780	intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
   1781
   1782	/*
   1783	 * Now that we have the pipe damaged area check if it intersect with
   1784	 * every plane, if it does set the plane selective fetch area.
   1785	 */
   1786	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
   1787					     new_plane_state, i) {
   1788		struct drm_rect *sel_fetch_area, inter;
   1789		struct intel_plane *linked = new_plane_state->planar_linked_plane;
   1790
   1791		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
   1792		    !new_plane_state->uapi.visible)
   1793			continue;
   1794
   1795		inter = pipe_clip;
   1796		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
   1797			continue;
   1798
   1799		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
   1800			full_update = true;
   1801			break;
   1802		}
   1803
   1804		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
   1805		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
   1806		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
   1807		crtc_state->update_planes |= BIT(plane->id);
   1808
   1809		/*
   1810		 * Sel_fetch_area is calculated for UV plane. Use
   1811		 * same area for Y plane as well.
   1812		 */
   1813		if (linked) {
   1814			struct intel_plane_state *linked_new_plane_state;
   1815			struct drm_rect *linked_sel_fetch_area;
   1816
   1817			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
   1818			if (IS_ERR(linked_new_plane_state))
   1819				return PTR_ERR(linked_new_plane_state);
   1820
   1821			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
   1822			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
   1823			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
   1824			crtc_state->update_planes |= BIT(linked->id);
   1825		}
   1826	}
   1827
   1828skip_sel_fetch_set_loop:
   1829	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
   1830	return 0;
   1831}
   1832
   1833void intel_psr_pre_plane_update(struct intel_atomic_state *state,
   1834				struct intel_crtc *crtc)
   1835{
   1836	struct drm_i915_private *i915 = to_i915(state->base.dev);
   1837	const struct intel_crtc_state *crtc_state =
   1838		intel_atomic_get_new_crtc_state(state, crtc);
   1839	struct intel_encoder *encoder;
   1840
   1841	if (!HAS_PSR(i915))
   1842		return;
   1843
   1844	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
   1845					     crtc_state->uapi.encoder_mask) {
   1846		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
   1847		struct intel_psr *psr = &intel_dp->psr;
   1848		bool needs_to_disable = false;
   1849
   1850		mutex_lock(&psr->lock);
   1851
   1852		/*
   1853		 * Reasons to disable:
   1854		 * - PSR disabled in new state
   1855		 * - All planes will go inactive
   1856		 * - Changing between PSR versions
   1857		 */
   1858		needs_to_disable |= intel_crtc_needs_modeset(crtc_state);
   1859		needs_to_disable |= !crtc_state->has_psr;
   1860		needs_to_disable |= !crtc_state->active_planes;
   1861		needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled;
   1862
   1863		if (psr->enabled && needs_to_disable)
   1864			intel_psr_disable_locked(intel_dp);
   1865
   1866		mutex_unlock(&psr->lock);
   1867	}
   1868}
   1869
   1870static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
   1871					 const struct intel_crtc_state *crtc_state)
   1872{
   1873	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   1874	struct intel_encoder *encoder;
   1875
   1876	if (!crtc_state->has_psr)
   1877		return;
   1878
   1879	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
   1880					     crtc_state->uapi.encoder_mask) {
   1881		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
   1882		struct intel_psr *psr = &intel_dp->psr;
   1883
   1884		mutex_lock(&psr->lock);
   1885
   1886		if (psr->sink_not_reliable)
   1887			goto exit;
   1888
   1889		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
   1890
   1891		/* Only enable if there is active planes */
   1892		if (!psr->enabled && crtc_state->active_planes)
   1893			intel_psr_enable_locked(intel_dp, crtc_state);
   1894
   1895		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
   1896		if (crtc_state->crc_enabled && psr->enabled)
   1897			psr_force_hw_tracking_exit(intel_dp);
   1898
   1899exit:
   1900		mutex_unlock(&psr->lock);
   1901	}
   1902}
   1903
   1904void intel_psr_post_plane_update(const struct intel_atomic_state *state)
   1905{
   1906	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   1907	struct intel_crtc_state *crtc_state;
   1908	struct intel_crtc *crtc;
   1909	int i;
   1910
   1911	if (!HAS_PSR(dev_priv))
   1912		return;
   1913
   1914	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
   1915		_intel_psr_post_plane_update(state, crtc_state);
   1916}
   1917
   1918static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
   1919{
   1920	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1921
   1922	/*
   1923	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
   1924	 * As all higher states has bit 4 of PSR2 state set we can just wait for
   1925	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
   1926	 */
   1927	return intel_de_wait_for_clear(dev_priv,
   1928				       EDP_PSR2_STATUS(intel_dp->psr.transcoder),
   1929				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
   1930}
   1931
   1932static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
   1933{
   1934	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1935
   1936	/*
   1937	 * From bspec: Panel Self Refresh (BDW+)
   1938	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
   1939	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
   1940	 * defensive enough to cover everything.
   1941	 */
   1942	return intel_de_wait_for_clear(dev_priv,
   1943				       EDP_PSR_STATUS(intel_dp->psr.transcoder),
   1944				       EDP_PSR_STATUS_STATE_MASK, 50);
   1945}
   1946
   1947/**
   1948 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
   1949 * @new_crtc_state: new CRTC state
   1950 *
   1951 * This function is expected to be called from pipe_update_start() where it is
   1952 * not expected to race with PSR enable or disable.
   1953 */
   1954void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
   1955{
   1956	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
   1957	struct intel_encoder *encoder;
   1958
   1959	if (!new_crtc_state->has_psr)
   1960		return;
   1961
   1962	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
   1963					     new_crtc_state->uapi.encoder_mask) {
   1964		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
   1965		int ret;
   1966
   1967		lockdep_assert_held(&intel_dp->psr.lock);
   1968
   1969		if (!intel_dp->psr.enabled)
   1970			continue;
   1971
   1972		if (intel_dp->psr.psr2_enabled)
   1973			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
   1974		else
   1975			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
   1976
   1977		if (ret)
   1978			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
   1979	}
   1980}
   1981
   1982static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
   1983{
   1984	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   1985	i915_reg_t reg;
   1986	u32 mask;
   1987	int err;
   1988
   1989	if (!intel_dp->psr.enabled)
   1990		return false;
   1991
   1992	if (intel_dp->psr.psr2_enabled) {
   1993		reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
   1994		mask = EDP_PSR2_STATUS_STATE_MASK;
   1995	} else {
   1996		reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
   1997		mask = EDP_PSR_STATUS_STATE_MASK;
   1998	}
   1999
   2000	mutex_unlock(&intel_dp->psr.lock);
   2001
   2002	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
   2003	if (err)
   2004		drm_err(&dev_priv->drm,
   2005			"Timed out waiting for PSR Idle for re-enable\n");
   2006
   2007	/* After the unlocked wait, verify that PSR is still wanted! */
   2008	mutex_lock(&intel_dp->psr.lock);
   2009	return err == 0 && intel_dp->psr.enabled;
   2010}
   2011
   2012static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
   2013{
   2014	struct drm_connector_list_iter conn_iter;
   2015	struct drm_device *dev = &dev_priv->drm;
   2016	struct drm_modeset_acquire_ctx ctx;
   2017	struct drm_atomic_state *state;
   2018	struct drm_connector *conn;
   2019	int err = 0;
   2020
   2021	state = drm_atomic_state_alloc(dev);
   2022	if (!state)
   2023		return -ENOMEM;
   2024
   2025	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
   2026	state->acquire_ctx = &ctx;
   2027
   2028retry:
   2029
   2030	drm_connector_list_iter_begin(dev, &conn_iter);
   2031	drm_for_each_connector_iter(conn, &conn_iter) {
   2032		struct drm_connector_state *conn_state;
   2033		struct drm_crtc_state *crtc_state;
   2034
   2035		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
   2036			continue;
   2037
   2038		conn_state = drm_atomic_get_connector_state(state, conn);
   2039		if (IS_ERR(conn_state)) {
   2040			err = PTR_ERR(conn_state);
   2041			break;
   2042		}
   2043
   2044		if (!conn_state->crtc)
   2045			continue;
   2046
   2047		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
   2048		if (IS_ERR(crtc_state)) {
   2049			err = PTR_ERR(crtc_state);
   2050			break;
   2051		}
   2052
   2053		/* Mark mode as changed to trigger a pipe->update() */
   2054		crtc_state->mode_changed = true;
   2055	}
   2056	drm_connector_list_iter_end(&conn_iter);
   2057
   2058	if (err == 0)
   2059		err = drm_atomic_commit(state);
   2060
   2061	if (err == -EDEADLK) {
   2062		drm_atomic_state_clear(state);
   2063		err = drm_modeset_backoff(&ctx);
   2064		if (!err)
   2065			goto retry;
   2066	}
   2067
   2068	drm_modeset_drop_locks(&ctx);
   2069	drm_modeset_acquire_fini(&ctx);
   2070	drm_atomic_state_put(state);
   2071
   2072	return err;
   2073}
   2074
   2075int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
   2076{
   2077	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   2078	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
   2079	u32 old_mode;
   2080	int ret;
   2081
   2082	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
   2083	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
   2084		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
   2085		return -EINVAL;
   2086	}
   2087
   2088	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
   2089	if (ret)
   2090		return ret;
   2091
   2092	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
   2093	intel_dp->psr.debug = val;
   2094
   2095	/*
   2096	 * Do it right away if it's already enabled, otherwise it will be done
   2097	 * when enabling the source.
   2098	 */
   2099	if (intel_dp->psr.enabled)
   2100		psr_irq_control(intel_dp);
   2101
   2102	mutex_unlock(&intel_dp->psr.lock);
   2103
   2104	if (old_mode != mode)
   2105		ret = intel_psr_fastset_force(dev_priv);
   2106
   2107	return ret;
   2108}
   2109
   2110static void intel_psr_handle_irq(struct intel_dp *intel_dp)
   2111{
   2112	struct intel_psr *psr = &intel_dp->psr;
   2113
   2114	intel_psr_disable_locked(intel_dp);
   2115	psr->sink_not_reliable = true;
   2116	/* let's make sure that sink is awaken */
   2117	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
   2118}
   2119
   2120static void intel_psr_work(struct work_struct *work)
   2121{
   2122	struct intel_dp *intel_dp =
   2123		container_of(work, typeof(*intel_dp), psr.work);
   2124
   2125	mutex_lock(&intel_dp->psr.lock);
   2126
   2127	if (!intel_dp->psr.enabled)
   2128		goto unlock;
   2129
   2130	if (READ_ONCE(intel_dp->psr.irq_aux_error))
   2131		intel_psr_handle_irq(intel_dp);
   2132
   2133	/*
   2134	 * We have to make sure PSR is ready for re-enable
   2135	 * otherwise it keeps disabled until next full enable/disable cycle.
   2136	 * PSR might take some time to get fully disabled
   2137	 * and be ready for re-enable.
   2138	 */
   2139	if (!__psr_wait_for_idle_locked(intel_dp))
   2140		goto unlock;
   2141
   2142	/*
   2143	 * The delayed work can race with an invalidate hence we need to
   2144	 * recheck. Since psr_flush first clears this and then reschedules we
   2145	 * won't ever miss a flush when bailing out here.
   2146	 */
   2147	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
   2148		goto unlock;
   2149
   2150	intel_psr_activate(intel_dp);
   2151unlock:
   2152	mutex_unlock(&intel_dp->psr.lock);
   2153}
   2154
   2155static void _psr_invalidate_handle(struct intel_dp *intel_dp)
   2156{
   2157	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   2158
   2159	if (intel_dp->psr.psr2_sel_fetch_enabled) {
   2160		u32 val;
   2161
   2162		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
   2163			return;
   2164
   2165		val = man_trk_ctl_enable_bit_get(dev_priv) |
   2166		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
   2167		      man_trk_ctl_continuos_full_frame(dev_priv);
   2168		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
   2169		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
   2170		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
   2171	} else {
   2172		intel_psr_exit(intel_dp);
   2173	}
   2174}
   2175
   2176/**
   2177 * intel_psr_invalidate - Invalidade PSR
   2178 * @dev_priv: i915 device
   2179 * @frontbuffer_bits: frontbuffer plane tracking bits
   2180 * @origin: which operation caused the invalidate
   2181 *
   2182 * Since the hardware frontbuffer tracking has gaps we need to integrate
   2183 * with the software frontbuffer tracking. This function gets called every
   2184 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
   2185 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
   2186 *
   2187 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
   2188 */
   2189void intel_psr_invalidate(struct drm_i915_private *dev_priv,
   2190			  unsigned frontbuffer_bits, enum fb_op_origin origin)
   2191{
   2192	struct intel_encoder *encoder;
   2193
   2194	if (origin == ORIGIN_FLIP)
   2195		return;
   2196
   2197	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
   2198		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
   2199		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
   2200
   2201		mutex_lock(&intel_dp->psr.lock);
   2202		if (!intel_dp->psr.enabled) {
   2203			mutex_unlock(&intel_dp->psr.lock);
   2204			continue;
   2205		}
   2206
   2207		pipe_frontbuffer_bits &=
   2208			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
   2209		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
   2210
   2211		if (pipe_frontbuffer_bits)
   2212			_psr_invalidate_handle(intel_dp);
   2213
   2214		mutex_unlock(&intel_dp->psr.lock);
   2215	}
   2216}
   2217/*
   2218 * When we will be completely rely on PSR2 S/W tracking in future,
   2219 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
   2220 * event also therefore tgl_dc3co_flush_locked() require to be changed
   2221 * accordingly in future.
   2222 */
   2223static void
   2224tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
   2225		       enum fb_op_origin origin)
   2226{
   2227	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
   2228	    !intel_dp->psr.active)
   2229		return;
   2230
   2231	/*
   2232	 * At every frontbuffer flush flip event modified delay of delayed work,
   2233	 * when delayed work schedules that means display has been idle.
   2234	 */
   2235	if (!(frontbuffer_bits &
   2236	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
   2237		return;
   2238
   2239	tgl_psr2_enable_dc3co(intel_dp);
   2240	mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
   2241			 intel_dp->psr.dc3co_exit_delay);
   2242}
   2243
   2244static void _psr_flush_handle(struct intel_dp *intel_dp)
   2245{
   2246	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   2247
   2248	if (intel_dp->psr.psr2_sel_fetch_enabled) {
   2249		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
   2250			/* can we turn CFF off? */
   2251			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
   2252				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
   2253					  man_trk_ctl_partial_frame_bit_get(dev_priv) |
   2254					  man_trk_ctl_single_full_frame_bit_get(dev_priv);
   2255
   2256				/*
   2257				 * turn continuous full frame off and do a single
   2258				 * full frame
   2259				 */
   2260				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
   2261					       val);
   2262				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
   2263				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
   2264			}
   2265		} else {
   2266			/*
   2267			 * continuous full frame is disabled, only a single full
   2268			 * frame is required
   2269			 */
   2270			psr_force_hw_tracking_exit(intel_dp);
   2271		}
   2272	} else {
   2273		psr_force_hw_tracking_exit(intel_dp);
   2274
   2275		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
   2276			schedule_work(&intel_dp->psr.work);
   2277	}
   2278}
   2279
   2280/**
   2281 * intel_psr_flush - Flush PSR
   2282 * @dev_priv: i915 device
   2283 * @frontbuffer_bits: frontbuffer plane tracking bits
   2284 * @origin: which operation caused the flush
   2285 *
   2286 * Since the hardware frontbuffer tracking has gaps we need to integrate
   2287 * with the software frontbuffer tracking. This function gets called every
   2288 * time frontbuffer rendering has completed and flushed out to memory. PSR
   2289 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
   2290 *
   2291 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
   2292 */
   2293void intel_psr_flush(struct drm_i915_private *dev_priv,
   2294		     unsigned frontbuffer_bits, enum fb_op_origin origin)
   2295{
   2296	struct intel_encoder *encoder;
   2297
   2298	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
   2299		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
   2300		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
   2301
   2302		mutex_lock(&intel_dp->psr.lock);
   2303		if (!intel_dp->psr.enabled) {
   2304			mutex_unlock(&intel_dp->psr.lock);
   2305			continue;
   2306		}
   2307
   2308		pipe_frontbuffer_bits &=
   2309			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
   2310		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
   2311
   2312		/*
   2313		 * If the PSR is paused by an explicit intel_psr_paused() call,
   2314		 * we have to ensure that the PSR is not activated until
   2315		 * intel_psr_resume() is called.
   2316		 */
   2317		if (intel_dp->psr.paused)
   2318			goto unlock;
   2319
   2320		if (origin == ORIGIN_FLIP ||
   2321		    (origin == ORIGIN_CURSOR_UPDATE &&
   2322		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
   2323			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
   2324			goto unlock;
   2325		}
   2326
   2327		if (pipe_frontbuffer_bits == 0)
   2328			goto unlock;
   2329
   2330		/* By definition flush = invalidate + flush */
   2331		_psr_flush_handle(intel_dp);
   2332unlock:
   2333		mutex_unlock(&intel_dp->psr.lock);
   2334	}
   2335}
   2336
   2337/**
   2338 * intel_psr_init - Init basic PSR work and mutex.
   2339 * @intel_dp: Intel DP
   2340 *
   2341 * This function is called after the initializing connector.
   2342 * (the initializing of connector treats the handling of connector capabilities)
   2343 * And it initializes basic PSR stuff for each DP Encoder.
   2344 */
   2345void intel_psr_init(struct intel_dp *intel_dp)
   2346{
   2347	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
   2348	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   2349
   2350	if (!HAS_PSR(dev_priv))
   2351		return;
   2352
   2353	/*
   2354	 * HSW spec explicitly says PSR is tied to port A.
   2355	 * BDW+ platforms have a instance of PSR registers per transcoder but
   2356	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
   2357	 * than eDP one.
   2358	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
   2359	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
   2360	 * But GEN12 supports a instance of PSR registers per transcoder.
   2361	 */
   2362	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
   2363		drm_dbg_kms(&dev_priv->drm,
   2364			    "PSR condition failed: Port not supported\n");
   2365		return;
   2366	}
   2367
   2368	intel_dp->psr.source_support = true;
   2369
   2370	if (dev_priv->params.enable_psr == -1)
   2371		if (!dev_priv->vbt.psr.enable)
   2372			dev_priv->params.enable_psr = 0;
   2373
   2374	/* Set link_standby x link_off defaults */
   2375	if (DISPLAY_VER(dev_priv) < 12)
   2376		/* For new platforms up to TGL let's respect VBT back again */
   2377		intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
   2378
   2379	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
   2380	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
   2381	mutex_init(&intel_dp->psr.lock);
   2382}
   2383
   2384static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
   2385					   u8 *status, u8 *error_status)
   2386{
   2387	struct drm_dp_aux *aux = &intel_dp->aux;
   2388	int ret;
   2389
   2390	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
   2391	if (ret != 1)
   2392		return ret;
   2393
   2394	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
   2395	if (ret != 1)
   2396		return ret;
   2397
   2398	*status = *status & DP_PSR_SINK_STATE_MASK;
   2399
   2400	return 0;
   2401}
   2402
   2403static void psr_alpm_check(struct intel_dp *intel_dp)
   2404{
   2405	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   2406	struct drm_dp_aux *aux = &intel_dp->aux;
   2407	struct intel_psr *psr = &intel_dp->psr;
   2408	u8 val;
   2409	int r;
   2410
   2411	if (!psr->psr2_enabled)
   2412		return;
   2413
   2414	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
   2415	if (r != 1) {
   2416		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
   2417		return;
   2418	}
   2419
   2420	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
   2421		intel_psr_disable_locked(intel_dp);
   2422		psr->sink_not_reliable = true;
   2423		drm_dbg_kms(&dev_priv->drm,
   2424			    "ALPM lock timeout error, disabling PSR\n");
   2425
   2426		/* Clearing error */
   2427		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
   2428	}
   2429}
   2430
   2431static void psr_capability_changed_check(struct intel_dp *intel_dp)
   2432{
   2433	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   2434	struct intel_psr *psr = &intel_dp->psr;
   2435	u8 val;
   2436	int r;
   2437
   2438	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
   2439	if (r != 1) {
   2440		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
   2441		return;
   2442	}
   2443
   2444	if (val & DP_PSR_CAPS_CHANGE) {
   2445		intel_psr_disable_locked(intel_dp);
   2446		psr->sink_not_reliable = true;
   2447		drm_dbg_kms(&dev_priv->drm,
   2448			    "Sink PSR capability changed, disabling PSR\n");
   2449
   2450		/* Clearing it */
   2451		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
   2452	}
   2453}
   2454
   2455void intel_psr_short_pulse(struct intel_dp *intel_dp)
   2456{
   2457	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
   2458	struct intel_psr *psr = &intel_dp->psr;
   2459	u8 status, error_status;
   2460	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
   2461			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
   2462			  DP_PSR_LINK_CRC_ERROR;
   2463
   2464	if (!CAN_PSR(intel_dp))
   2465		return;
   2466
   2467	mutex_lock(&psr->lock);
   2468
   2469	if (!psr->enabled)
   2470		goto exit;
   2471
   2472	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
   2473		drm_err(&dev_priv->drm,
   2474			"Error reading PSR status or error status\n");
   2475		goto exit;
   2476	}
   2477
   2478	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
   2479		intel_psr_disable_locked(intel_dp);
   2480		psr->sink_not_reliable = true;
   2481	}
   2482
   2483	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
   2484		drm_dbg_kms(&dev_priv->drm,
   2485			    "PSR sink internal error, disabling PSR\n");
   2486	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
   2487		drm_dbg_kms(&dev_priv->drm,
   2488			    "PSR RFB storage error, disabling PSR\n");
   2489	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
   2490		drm_dbg_kms(&dev_priv->drm,
   2491			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
   2492	if (error_status & DP_PSR_LINK_CRC_ERROR)
   2493		drm_dbg_kms(&dev_priv->drm,
   2494			    "PSR Link CRC error, disabling PSR\n");
   2495
   2496	if (error_status & ~errors)
   2497		drm_err(&dev_priv->drm,
   2498			"PSR_ERROR_STATUS unhandled errors %x\n",
   2499			error_status & ~errors);
   2500	/* clear status register */
   2501	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
   2502
   2503	psr_alpm_check(intel_dp);
   2504	psr_capability_changed_check(intel_dp);
   2505
   2506exit:
   2507	mutex_unlock(&psr->lock);
   2508}
   2509
   2510bool intel_psr_enabled(struct intel_dp *intel_dp)
   2511{
   2512	bool ret;
   2513
   2514	if (!CAN_PSR(intel_dp))
   2515		return false;
   2516
   2517	mutex_lock(&intel_dp->psr.lock);
   2518	ret = intel_dp->psr.enabled;
   2519	mutex_unlock(&intel_dp->psr.lock);
   2520
   2521	return ret;
   2522}
   2523
   2524/**
   2525 * intel_psr_lock - grab PSR lock
   2526 * @crtc_state: the crtc state
   2527 *
   2528 * This is initially meant to be used by around CRTC update, when
   2529 * vblank sensitive registers are updated and we need grab the lock
   2530 * before it to avoid vblank evasion.
   2531 */
   2532void intel_psr_lock(const struct intel_crtc_state *crtc_state)
   2533{
   2534	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
   2535	struct intel_encoder *encoder;
   2536
   2537	if (!crtc_state->has_psr)
   2538		return;
   2539
   2540	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
   2541					     crtc_state->uapi.encoder_mask) {
   2542		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
   2543
   2544		mutex_lock(&intel_dp->psr.lock);
   2545		break;
   2546	}
   2547}
   2548
   2549/**
   2550 * intel_psr_unlock - release PSR lock
   2551 * @crtc_state: the crtc state
   2552 *
   2553 * Release the PSR lock that was held during pipe update.
   2554 */
   2555void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
   2556{
   2557	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
   2558	struct intel_encoder *encoder;
   2559
   2560	if (!crtc_state->has_psr)
   2561		return;
   2562
   2563	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
   2564					     crtc_state->uapi.encoder_mask) {
   2565		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
   2566
   2567		mutex_unlock(&intel_dp->psr.lock);
   2568		break;
   2569	}
   2570}